Bullet Collision Detection & Physics Library
btBatchedConstraints.cpp
Go to the documentation of this file.
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
4 
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10 
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 
16 
17 #include "btBatchedConstraints.h"
18 
20 #include "LinearMath/btMinMax.h"
22 #include "LinearMath/btQuickprof.h"
23 
24 #include <string.h> //for memset
25 
26 const int kNoMerge = -1;
27 
29 
30 
32 {
35  int bodyIds[2];
36 };
37 
38 
40 {
43 
44  btBatchInfo() : numConstraints(0), mergeIndex(kNoMerge) {}
45 };
46 
47 
49 {
50  //
51  // validate: for debugging only. Verify coloring of bodies, that no body is touched by more than one batch in any given phase
52  //
53  int errors = 0;
54  const int kUnassignedBatch = -1;
55 
56  btAlignedObjectArray<int> bodyBatchId;
57  for (int iPhase = 0; iPhase < m_phases.size(); ++iPhase)
58  {
59  bodyBatchId.resizeNoInitialize(0);
60  bodyBatchId.resize( bodies.size(), kUnassignedBatch );
61  const Range& phase = m_phases[iPhase];
62  for (int iBatch = phase.begin; iBatch < phase.end; ++iBatch)
63  {
64  const Range& batch = m_batches[iBatch];
65  for (int iiCons = batch.begin; iiCons < batch.end; ++iiCons)
66  {
67  int iCons = m_constraintIndices[iiCons];
68  const btSolverConstraint& cons = constraints->at(iCons);
69  const btSolverBody& bodyA = bodies[cons.m_solverBodyIdA];
70  const btSolverBody& bodyB = bodies[cons.m_solverBodyIdB];
71  if (! bodyA.internalGetInvMass().isZero())
72  {
73  int thisBodyBatchId = bodyBatchId[cons.m_solverBodyIdA];
74  if (thisBodyBatchId == kUnassignedBatch)
75  {
76  bodyBatchId[cons.m_solverBodyIdA] = iBatch;
77  }
78  else if (thisBodyBatchId != iBatch)
79  {
80  btAssert( !"dynamic body is used in 2 different batches in the same phase" );
81  errors++;
82  }
83  }
84  if (! bodyB.internalGetInvMass().isZero())
85  {
86  int thisBodyBatchId = bodyBatchId[cons.m_solverBodyIdB];
87  if (thisBodyBatchId == kUnassignedBatch)
88  {
89  bodyBatchId[cons.m_solverBodyIdB] = iBatch;
90  }
91  else if (thisBodyBatchId != iBatch)
92  {
93  btAssert( !"dynamic body is used in 2 different batches in the same phase" );
94  errors++;
95  }
96  }
97  }
98  }
99  }
100  return errors == 0;
101 }
102 
103 
105  btConstraintArray* constraints,
107  int iBatch,
108  const btVector3& color,
109  const btVector3& offset
110  )
111 {
112  if (bc && bc->m_debugDrawer && iBatch < bc->m_batches.size())
113  {
114  const btBatchedConstraints::Range& b = bc->m_batches[iBatch];
115  for (int iiCon = b.begin; iiCon < b.end; ++iiCon)
116  {
117  int iCon = bc->m_constraintIndices[iiCon];
118  const btSolverConstraint& con = constraints->at(iCon);
119  int iBody0 = con.m_solverBodyIdA;
120  int iBody1 = con.m_solverBodyIdB;
121  btVector3 pos0 = bodies[iBody0].getWorldTransform().getOrigin() + offset;
122  btVector3 pos1 = bodies[iBody1].getWorldTransform().getOrigin() + offset;
123  bc->m_debugDrawer->drawLine(pos0, pos1, color);
124  }
125  }
126 }
127 
128 
129 static void debugDrawPhase( const btBatchedConstraints* bc,
130  btConstraintArray* constraints,
132  int iPhase,
133  const btVector3& color0,
134  const btVector3& color1,
135  const btVector3& offset
136  )
137 {
138  BT_PROFILE( "debugDrawPhase" );
139  if ( bc && bc->m_debugDrawer && iPhase < bc->m_phases.size() )
140  {
141  const btBatchedConstraints::Range& phase = bc->m_phases[iPhase];
142  for (int iBatch = phase.begin; iBatch < phase.end; ++iBatch)
143  {
144  float tt = float(iBatch - phase.begin) / float(btMax(1, phase.end - phase.begin - 1));
145  btVector3 col = lerp(color0, color1, tt);
146  debugDrawSingleBatch(bc, constraints, bodies, iBatch, col, offset);
147  }
148  }
149 }
150 
151 
153  btConstraintArray* constraints,
155  )
156 {
157  BT_PROFILE( "debugDrawAllBatches" );
158  if ( bc && bc->m_debugDrawer && bc->m_phases.size() > 0 )
159  {
161  btVector3 bboxMax = -bboxMin;
162  for (int iBody = 0; iBody < bodies.size(); ++iBody)
163  {
164  const btVector3& pos = bodies[iBody].getWorldTransform().getOrigin();
165  bboxMin.setMin(pos);
166  bboxMax.setMax(pos);
167  }
168  btVector3 bboxExtent = bboxMax - bboxMin;
169  btVector3 offsetBase = btVector3( 0, bboxExtent.y()*1.1f, 0 );
170  btVector3 offsetStep = btVector3( 0, 0, bboxExtent.z()*1.1f );
171  int numPhases = bc->m_phases.size();
172  for (int iPhase = 0; iPhase < numPhases; ++iPhase)
173  {
174  float b = float(iPhase)/float(numPhases-1);
175  btVector3 color0 = btVector3(1,0,b);
176  btVector3 color1 = btVector3(0,1,b);
177  btVector3 offset = offsetBase + offsetStep*(float(iPhase) - float(numPhases-1)*0.5);
178  debugDrawPhase(bc, constraints, bodies, iPhase, color0, color1, offset);
179  }
180  }
181 }
182 
183 
185 {
186  BT_PROFILE("initBatchedBodyDynamicFlags");
187  btAlignedObjectArray<bool>& bodyDynamicFlags = *outBodyDynamicFlags;
188  bodyDynamicFlags.resizeNoInitialize(bodies.size());
189  for (int i = 0; i < bodies.size(); ++i)
190  {
191  const btSolverBody& body = bodies[ i ];
192  bodyDynamicFlags[i] = ( body.internalGetInvMass().x() > btScalar( 0 ) );
193  }
194 }
195 
196 
197 static int runLengthEncodeConstraintInfo(btBatchedConstraintInfo* outConInfos, int numConstraints)
198 {
199  BT_PROFILE("runLengthEncodeConstraintInfo");
200  // detect and run-length encode constraint rows that repeat the same bodies
201  int iDest = 0;
202  int iSrc = 0;
203  while (iSrc < numConstraints)
204  {
205  const btBatchedConstraintInfo& srcConInfo = outConInfos[iSrc];
206  btBatchedConstraintInfo& conInfo = outConInfos[iDest];
207  conInfo.constraintIndex = iSrc;
208  conInfo.bodyIds[0] = srcConInfo.bodyIds[0];
209  conInfo.bodyIds[1] = srcConInfo.bodyIds[1];
210  while (iSrc < numConstraints && outConInfos[iSrc].bodyIds[0] == srcConInfo.bodyIds[0] && outConInfos[iSrc].bodyIds[1] == srcConInfo.bodyIds[1])
211  {
212  ++iSrc;
213  }
214  conInfo.numConstraintRows = iSrc - conInfo.constraintIndex;
215  ++iDest;
216  }
217  return iDest;
218 }
219 
220 
222 {
225 
227  {
228  m_outConInfos = outConInfos;
229  m_constraints = constraints;
230  }
231  void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
232  {
233  for (int i = iBegin; i < iEnd; ++i)
234  {
235  btBatchedConstraintInfo& conInfo = m_outConInfos[i];
236  const btSolverConstraint& con = m_constraints->at( i );
237  conInfo.bodyIds[0] = con.m_solverBodyIdA;
238  conInfo.bodyIds[1] = con.m_solverBodyIdB;
239  conInfo.constraintIndex = i;
240  conInfo.numConstraintRows = 1;
241  }
242  }
243 };
244 
245 
247 {
248  BT_PROFILE("initBatchedConstraintInfo");
249  int numConstraints = constraints->size();
250  bool inParallel = true;
251  if (inParallel)
252  {
253  ReadSolverConstraintsLoop loop(outConInfos, constraints);
254  int grainSize = 1200;
255  btParallelFor(0, numConstraints, grainSize, loop);
256  }
257  else
258  {
259  for (int i = 0; i < numConstraints; ++i)
260  {
261  btBatchedConstraintInfo& conInfo = outConInfos[i];
262  const btSolverConstraint& con = constraints->at( i );
263  conInfo.bodyIds[0] = con.m_solverBodyIdA;
264  conInfo.bodyIds[1] = con.m_solverBodyIdB;
265  conInfo.constraintIndex = i;
266  conInfo.numConstraintRows = 1;
267  }
268  }
269  bool useRunLengthEncoding = true;
270  if (useRunLengthEncoding)
271  {
272  numConstraints = runLengthEncodeConstraintInfo(outConInfos, numConstraints);
273  }
274  return numConstraints;
275 }
276 
277 
278 static void expandConstraintRowsInPlace(int* constraintBatchIds, const btBatchedConstraintInfo* conInfos, int numConstraints, int numConstraintRows)
279 {
280  BT_PROFILE("expandConstraintRowsInPlace");
281  if (numConstraintRows > numConstraints)
282  {
283  // we walk the array in reverse to avoid overwriteing
284  for (int iCon = numConstraints - 1; iCon >= 0; --iCon)
285  {
286  const btBatchedConstraintInfo& conInfo = conInfos[iCon];
287  int iBatch = constraintBatchIds[iCon];
288  for (int i = conInfo.numConstraintRows - 1; i >= 0; --i)
289  {
290  int iDest = conInfo.constraintIndex + i;
291  btAssert(iDest >= iCon);
292  btAssert(iDest >= 0 && iDest < numConstraintRows);
293  constraintBatchIds[iDest] = iBatch;
294  }
295  }
296  }
297 }
298 
299 
300 static void expandConstraintRows(int* destConstraintBatchIds, const int* srcConstraintBatchIds, const btBatchedConstraintInfo* conInfos, int numConstraints, int numConstraintRows)
301 {
302  BT_PROFILE("expandConstraintRows");
303  for ( int iCon = 0; iCon < numConstraints; ++iCon )
304  {
305  const btBatchedConstraintInfo& conInfo = conInfos[ iCon ];
306  int iBatch = srcConstraintBatchIds[ iCon ];
307  for ( int i = 0; i < conInfo.numConstraintRows; ++i )
308  {
309  int iDest = conInfo.constraintIndex + i;
310  btAssert( iDest >= iCon );
311  btAssert( iDest >= 0 && iDest < numConstraintRows );
312  destConstraintBatchIds[ iDest ] = iBatch;
313  }
314  }
315 }
316 
317 
319 {
324 
325  ExpandConstraintRowsLoop( int* destConstraintBatchIds, const int* srcConstraintBatchIds, const btBatchedConstraintInfo* conInfos, int numConstraintRows)
326  {
327  m_destConstraintBatchIds = destConstraintBatchIds;
328  m_srcConstraintBatchIds = srcConstraintBatchIds;
329  m_conInfos = conInfos;
330  m_numConstraintRows = numConstraintRows;
331  }
332  void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
333  {
334  expandConstraintRows(m_destConstraintBatchIds, m_srcConstraintBatchIds + iBegin, m_conInfos + iBegin, iEnd - iBegin, m_numConstraintRows);
335  }
336 };
337 
338 
339 static void expandConstraintRowsMt(int* destConstraintBatchIds, const int* srcConstraintBatchIds, const btBatchedConstraintInfo* conInfos, int numConstraints, int numConstraintRows)
340 {
341  BT_PROFILE("expandConstraintRowsMt");
342  ExpandConstraintRowsLoop loop(destConstraintBatchIds, srcConstraintBatchIds, conInfos, numConstraintRows);
343  int grainSize = 600;
344  btParallelFor(0, numConstraints, grainSize, loop);
345 }
346 
347 
349 {
350  BT_PROFILE("initBatchedConstraintInfoArray");
351  btAlignedObjectArray<btBatchedConstraintInfo>& conInfos = *outConInfos;
352  int numConstraints = constraints->size();
353  conInfos.resizeNoInitialize(numConstraints);
354 
355  int newSize = initBatchedConstraintInfo(&outConInfos->at(0), constraints);
356  conInfos.resizeNoInitialize(newSize);
357 }
358 
359 
360 static void mergeSmallBatches(btBatchInfo* batches, int iBeginBatch, int iEndBatch, int minBatchSize, int maxBatchSize)
361 {
362  BT_PROFILE("mergeSmallBatches");
363  for ( int iBatch = iEndBatch - 1; iBatch >= iBeginBatch; --iBatch )
364  {
365  btBatchInfo& batch = batches[ iBatch ];
366  if ( batch.mergeIndex == kNoMerge && batch.numConstraints > 0 && batch.numConstraints < minBatchSize )
367  {
368  for ( int iDestBatch = iBatch - 1; iDestBatch >= iBeginBatch; --iDestBatch )
369  {
370  btBatchInfo& destBatch = batches[ iDestBatch ];
371  if ( destBatch.mergeIndex == kNoMerge && ( destBatch.numConstraints + batch.numConstraints ) < maxBatchSize )
372  {
373  destBatch.numConstraints += batch.numConstraints;
374  batch.numConstraints = 0;
375  batch.mergeIndex = iDestBatch;
376  break;
377  }
378  }
379  }
380  }
381  // flatten mergeIndexes
382  // e.g. in case where A was merged into B and then B was merged into C, we need A to point to C instead of B
383  // Note: loop goes forward through batches because batches always merge from higher indexes to lower,
384  // so by going from low to high it reduces the amount of trail-following
385  for ( int iBatch = iBeginBatch; iBatch < iEndBatch; ++iBatch )
386  {
387  btBatchInfo& batch = batches[ iBatch ];
388  if ( batch.mergeIndex != kNoMerge )
389  {
390  int iMergeDest = batches[ batch.mergeIndex ].mergeIndex;
391  // follow trail of merges to the end
392  while ( iMergeDest != kNoMerge )
393  {
394  int iNext = batches[ iMergeDest ].mergeIndex;
395  if ( iNext == kNoMerge )
396  {
397  batch.mergeIndex = iMergeDest;
398  break;
399  }
400  iMergeDest = iNext;
401  }
402  }
403  }
404 }
405 
406 
407 static void updateConstraintBatchIdsForMerges(int* constraintBatchIds, int numConstraints, const btBatchInfo* batches, int numBatches)
408 {
409  BT_PROFILE("updateConstraintBatchIdsForMerges");
410  // update batchIds to account for merges
411  for (int i = 0; i < numConstraints; ++i)
412  {
413  int iBatch = constraintBatchIds[i];
414  btAssert(iBatch < numBatches);
415  // if this constraint references a batch that was merged into another batch
416  if (batches[iBatch].mergeIndex != kNoMerge)
417  {
418  // update batchId
419  constraintBatchIds[i] = batches[iBatch].mergeIndex;
420  }
421  }
422 }
423 
424 
426 {
430 
431  UpdateConstraintBatchIdsForMergesLoop( int* constraintBatchIds, const btBatchInfo* batches, int numBatches )
432  {
433  m_constraintBatchIds = constraintBatchIds;
434  m_batches = batches;
435  m_numBatches = numBatches;
436  }
437  void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
438  {
439  BT_PROFILE( "UpdateConstraintBatchIdsForMergesLoop" );
440  updateConstraintBatchIdsForMerges( m_constraintBatchIds + iBegin, iEnd - iBegin, m_batches, m_numBatches );
441  }
442 };
443 
444 
445 static void updateConstraintBatchIdsForMergesMt(int* constraintBatchIds, int numConstraints, const btBatchInfo* batches, int numBatches)
446 {
447  BT_PROFILE( "updateConstraintBatchIdsForMergesMt" );
448  UpdateConstraintBatchIdsForMergesLoop loop(constraintBatchIds, batches, numBatches);
449  int grainSize = 800;
450  btParallelFor(0, numConstraints, grainSize, loop);
451 }
452 
453 
455 {
456  int lenA = a.end - a.begin;
457  int lenB = b.end - b.begin;
458  return lenA > lenB;
459 }
460 
461 
463  const int* constraintBatchIds,
464  int numConstraints,
465  int* constraintIdPerBatch,
466  int batchBegin,
467  int batchEnd
468  )
469 {
470  BT_PROFILE("writeOutConstraintIndicesForRangeOfBatches");
471  for ( int iCon = 0; iCon < numConstraints; ++iCon )
472  {
473  int iBatch = constraintBatchIds[ iCon ];
474  if (iBatch >= batchBegin && iBatch < batchEnd)
475  {
476  int iDestCon = constraintIdPerBatch[ iBatch ];
477  constraintIdPerBatch[ iBatch ] = iDestCon + 1;
478  bc->m_constraintIndices[ iDestCon ] = iCon;
479  }
480  }
481 }
482 
483 
485 {
491 
492  WriteOutConstraintIndicesLoop( btBatchedConstraints* bc, const int* constraintBatchIds, int numConstraints, int* constraintIdPerBatch, int maxNumBatchesPerPhase )
493  {
494  m_batchedConstraints = bc;
495  m_constraintBatchIds = constraintBatchIds;
496  m_numConstraints = numConstraints;
497  m_constraintIdPerBatch = constraintIdPerBatch;
498  m_maxNumBatchesPerPhase = maxNumBatchesPerPhase;
499  }
500  void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
501  {
502  BT_PROFILE( "WriteOutConstraintIndicesLoop" );
503  int batchBegin = iBegin * m_maxNumBatchesPerPhase;
504  int batchEnd = iEnd * m_maxNumBatchesPerPhase;
505  writeOutConstraintIndicesForRangeOfBatches(m_batchedConstraints,
506  m_constraintBatchIds,
507  m_numConstraints,
508  m_constraintIdPerBatch,
509  batchBegin,
510  batchEnd
511  );
512  }
513 };
514 
515 
517  const int* constraintBatchIds,
518  int numConstraints,
519  int* constraintIdPerBatch,
520  int maxNumBatchesPerPhase,
521  int numPhases
522  )
523 {
524  BT_PROFILE("writeOutConstraintIndicesMt");
525  bool inParallel = true;
526  if (inParallel)
527  {
528  WriteOutConstraintIndicesLoop loop( bc, constraintBatchIds, numConstraints, constraintIdPerBatch, maxNumBatchesPerPhase );
529  btParallelFor( 0, numPhases, 1, loop );
530  }
531  else
532  {
533  for ( int iCon = 0; iCon < numConstraints; ++iCon )
534  {
535  int iBatch = constraintBatchIds[ iCon ];
536  int iDestCon = constraintIdPerBatch[ iBatch ];
537  constraintIdPerBatch[ iBatch ] = iDestCon + 1;
538  bc->m_constraintIndices[ iDestCon ] = iCon;
539  }
540  }
541 }
542 
543 
545 {
546  typedef btBatchedConstraints::Range Range;
547  int numPhases = bc->m_phases.size();
548  bc->m_phaseGrainSize.resizeNoInitialize(numPhases);
549  int numThreads = btGetTaskScheduler()->getNumThreads();
550  for (int iPhase = 0; iPhase < numPhases; ++iPhase)
551  {
552  const Range& phase = bc->m_phases[ iPhase ];
553  int numBatches = phase.end - phase.begin;
554  float grainSize = floor((0.25f*numBatches / float(numThreads)) + 0.0f);
555  bc->m_phaseGrainSize[ iPhase ] = btMax(1, int(grainSize));
556  }
557 }
558 
559 
561  const int* constraintBatchIds,
562  int numConstraints,
563  const btBatchInfo* batches,
564  int* batchWork,
565  int maxNumBatchesPerPhase,
566  int numPhases
567 )
568 {
569  BT_PROFILE("writeOutBatches");
570  typedef btBatchedConstraints::Range Range;
571  bc->m_constraintIndices.reserve( numConstraints );
572  bc->m_batches.resizeNoInitialize( 0 );
573  bc->m_phases.resizeNoInitialize( 0 );
574 
575  //int maxNumBatches = numPhases * maxNumBatchesPerPhase;
576  {
577  int* constraintIdPerBatch = batchWork; // for each batch, keep an index into the next available slot in the m_constraintIndices array
578  int iConstraint = 0;
579  for (int iPhase = 0; iPhase < numPhases; ++iPhase)
580  {
581  int curPhaseBegin = bc->m_batches.size();
582  int iBegin = iPhase * maxNumBatchesPerPhase;
583  int iEnd = iBegin + maxNumBatchesPerPhase;
584  for ( int i = iBegin; i < iEnd; ++i )
585  {
586  const btBatchInfo& batch = batches[ i ];
587  int curBatchBegin = iConstraint;
588  constraintIdPerBatch[ i ] = curBatchBegin; // record the start of each batch in m_constraintIndices array
589  int numConstraints = batch.numConstraints;
590  iConstraint += numConstraints;
591  if ( numConstraints > 0 )
592  {
593  bc->m_batches.push_back( Range( curBatchBegin, iConstraint ) );
594  }
595  }
596  // if any batches were emitted this phase,
597  if ( bc->m_batches.size() > curPhaseBegin )
598  {
599  // output phase
600  bc->m_phases.push_back( Range( curPhaseBegin, bc->m_batches.size() ) );
601  }
602  }
603 
604  btAssert(iConstraint == numConstraints);
605  bc->m_constraintIndices.resizeNoInitialize( numConstraints );
606  writeOutConstraintIndicesMt( bc, constraintBatchIds, numConstraints, constraintIdPerBatch, maxNumBatchesPerPhase, numPhases );
607  }
608  // for each phase
609  for (int iPhase = 0; iPhase < bc->m_phases.size(); ++iPhase)
610  {
611  // sort the batches from largest to smallest (can be helpful to some task schedulers)
612  const Range& curBatches = bc->m_phases[iPhase];
613  bc->m_batches.quickSortInternal(BatchCompare, curBatches.begin, curBatches.end-1);
614  }
615  bc->m_phaseOrder.resize(bc->m_phases.size());
616  for (int i = 0; i < bc->m_phases.size(); ++i)
617  {
618  bc->m_phaseOrder[i] = i;
619  }
620  writeGrainSizes(bc);
621 }
622 
623 
624 //
625 // PreallocatedMemoryHelper -- helper object for allocating a number of chunks of memory in a single contiguous block.
626 // It is generally more efficient to do a single larger allocation than many smaller allocations.
627 //
628 // Example Usage:
629 //
630 // btVector3* bodyPositions = NULL;
631 // btBatchedConstraintInfo* conInfos = NULL;
632 // {
633 // PreallocatedMemoryHelper<8> memHelper;
634 // memHelper.addChunk( (void**) &bodyPositions, sizeof( btVector3 ) * bodies.size() );
635 // memHelper.addChunk( (void**) &conInfos, sizeof( btBatchedConstraintInfo ) * numConstraints );
636 // void* memPtr = malloc( memHelper.getSizeToAllocate() ); // allocate the memory
637 // memHelper.setChunkPointers( memPtr ); // update pointers to chunks
638 // }
639 template <int N>
641 {
642  struct Chunk
643  {
644  void** ptr;
645  size_t size;
646  };
647  Chunk m_chunks[N];
649 public:
650  PreallocatedMemoryHelper() {m_numChunks=0;}
651  void addChunk( void** ptr, size_t sz )
652  {
653  btAssert( m_numChunks < N );
654  if ( m_numChunks < N )
655  {
656  Chunk& chunk = m_chunks[ m_numChunks ];
657  chunk.ptr = ptr;
658  chunk.size = sz;
659  m_numChunks++;
660  }
661  }
662  size_t getSizeToAllocate() const
663  {
664  size_t totalSize = 0;
665  for (int i = 0; i < m_numChunks; ++i)
666  {
667  totalSize += m_chunks[i].size;
668  }
669  return totalSize;
670  }
671  void setChunkPointers(void* mem) const
672  {
673  size_t totalSize = 0;
674  for (int i = 0; i < m_numChunks; ++i)
675  {
676  const Chunk& chunk = m_chunks[ i ];
677  char* chunkPtr = static_cast<char*>(mem) + totalSize;
678  *chunk.ptr = chunkPtr;
679  totalSize += chunk.size;
680  }
681  }
682 };
683 
684 
685 
687  btVector3* bodyPositions,
688  bool* bodyDynamicFlags,
689  btBatchedConstraintInfo* conInfos,
690  int numConstraints,
691  int numBodies
692  )
693 {
694  BT_PROFILE("findMaxDynamicConstraintExtent");
695  btVector3 consExtent = btVector3(1,1,1) * 0.001;
696  for (int iCon = 0; iCon < numConstraints; ++iCon)
697  {
698  const btBatchedConstraintInfo& con = conInfos[ iCon ];
699  int iBody0 = con.bodyIds[0];
700  int iBody1 = con.bodyIds[1];
701  btAssert(iBody0 >= 0 && iBody0 < numBodies);
702  btAssert(iBody1 >= 0 && iBody1 < numBodies);
703  // is it a dynamic constraint?
704  if (bodyDynamicFlags[iBody0] && bodyDynamicFlags[iBody1])
705  {
706  btVector3 delta = bodyPositions[iBody1] - bodyPositions[iBody0];
707  consExtent.setMax(delta.absolute());
708  }
709  }
710  return consExtent;
711 }
712 
713 
714 struct btIntVec3
715 {
716  int m_ints[ 3 ];
717 
718  SIMD_FORCE_INLINE const int& operator[](int i) const {return m_ints[i];}
719  SIMD_FORCE_INLINE int& operator[](int i) {return m_ints[i];}
720 };
721 
722 
724 {
734 
736  {
737  memset(this, 0, sizeof(*this));
738  }
739 };
740 
741 
742 static void assignConstraintsToGridBatches(const AssignConstraintsToGridBatchesParams& params, int iConBegin, int iConEnd)
743 {
744  BT_PROFILE("assignConstraintsToGridBatches");
745  // (can be done in parallel)
746  for ( int iCon = iConBegin; iCon < iConEnd; ++iCon )
747  {
748  const btBatchedConstraintInfo& con = params.conInfos[ iCon ];
749  int iBody0 = con.bodyIds[ 0 ];
750  int iBody1 = con.bodyIds[ 1 ];
751  int iPhase = iCon; //iBody0; // pseudorandom choice to distribute evenly amongst phases
752  iPhase &= params.phaseMask;
753  int gridCoord[ 3 ];
754  // is it a dynamic constraint?
755  if ( params.bodyDynamicFlags[ iBody0 ] && params.bodyDynamicFlags[ iBody1 ] )
756  {
757  const btIntVec3& body0Coords = params.bodyGridCoords[iBody0];
758  const btIntVec3& body1Coords = params.bodyGridCoords[iBody1];
759  // for each dimension x,y,z,
760  for (int i = 0; i < 3; ++i)
761  {
762  int coordMin = btMin(body0Coords.m_ints[i], body1Coords.m_ints[i]);
763  int coordMax = btMax(body0Coords.m_ints[i], body1Coords.m_ints[i]);
764  if (coordMin != coordMax)
765  {
766  btAssert( coordMax == coordMin + 1 );
767  if ((coordMin&1) == 0)
768  {
769  iPhase &= ~(1 << i); // force bit off
770  }
771  else
772  {
773  iPhase |= (1 << i); // force bit on
774  iPhase &= params.phaseMask;
775  }
776  }
777  gridCoord[ i ] = coordMin;
778  }
779  }
780  else
781  {
782  if ( !params.bodyDynamicFlags[ iBody0 ] )
783  {
784  iBody0 = con.bodyIds[ 1 ];
785  }
786  btAssert(params.bodyDynamicFlags[ iBody0 ]);
787  const btIntVec3& body0Coords = params.bodyGridCoords[iBody0];
788  // for each dimension x,y,z,
789  for ( int i = 0; i < 3; ++i )
790  {
791  gridCoord[ i ] = body0Coords.m_ints[ i ];
792  }
793  }
794  // calculate chunk coordinates
795  int chunkCoord[ 3 ];
796  btIntVec3 gridChunkDim = params.gridChunkDim;
797  // for each dimension x,y,z,
798  for ( int i = 0; i < 3; ++i )
799  {
800  int coordOffset = ( iPhase >> i ) & 1;
801  chunkCoord[ i ] = (gridCoord[ i ] - coordOffset)/2;
802  btClamp( chunkCoord[ i ], 0, gridChunkDim[ i ] - 1);
803  btAssert( chunkCoord[ i ] < gridChunkDim[ i ] );
804  }
805  int iBatch = iPhase * params.maxNumBatchesPerPhase + chunkCoord[ 0 ] + chunkCoord[ 1 ] * gridChunkDim[ 0 ] + chunkCoord[ 2 ] * gridChunkDim[ 0 ] * gridChunkDim[ 1 ];
806  btAssert(iBatch >= 0 && iBatch < params.maxNumBatchesPerPhase*params.numPhases);
807  params.constraintBatchIds[ iCon ] = iBatch;
808  }
809 }
810 
811 
813 {
815 
817  {
818  m_params = &params;
819  }
820  void forLoop( int iBegin, int iEnd ) const BT_OVERRIDE
821  {
822  assignConstraintsToGridBatches(*m_params, iBegin, iEnd);
823  }
824 };
825 
826 
827 //
828 // setupSpatialGridBatchesMt -- generate batches using a uniform 3D grid
829 //
830 /*
831 
832 Bodies are treated as 3D points at their center of mass. We only consider dynamic bodies at this stage,
833 because only dynamic bodies are mutated when a constraint is solved, thus subject to race conditions.
834 
835 1. Compute a bounding box around all dynamic bodies
836 2. Compute the maximum extent of all dynamic constraints. Each dynamic constraint is treated as a line segment, and we need the size of
837  box that will fully enclose any single dynamic constraint
838 
839 3. Establish the cell size of our grid, the cell size in each dimension must be at least as large as the dynamic constraints max-extent,
840  so that no dynamic constraint can span more than 2 cells of our grid on any axis of the grid. The cell size should be adjusted
841  larger in order to keep the total number of cells from being excessively high
842 
843 Key idea: Given that each constraint spans 1 or 2 grid cells in each dimension, we can handle all constraints by processing
844  in chunks of 2x2x2 cells with 8 different 1-cell offsets ((0,0,0),(0,0,1),(0,1,0),(0,1,1),(1,0,0)...).
845  For each of the 8 offsets, we create a phase, and for each 2x2x2 chunk with dynamic constraints becomes a batch in that phase.
846 
847 4. Once the grid is established, we can calculate for each constraint which phase and batch it belongs in.
848 
849 5. Do a merge small batches on the batches of each phase separately, to try to even out the sizes of batches
850 
851 Optionally, we can "collapse" one dimension of our 3D grid to turn it into a 2D grid, which reduces the number of phases
852 to 4. With fewer phases, there are more constraints per phase and this makes it easier to create batches of a useful size.
853 */
854 //
856  btBatchedConstraints* batchedConstraints,
857  btAlignedObjectArray<char>* scratchMemory,
858  btConstraintArray* constraints,
860  int minBatchSize,
861  int maxBatchSize,
862  bool use2DGrid
863 )
864 {
865  BT_PROFILE("setupSpatialGridBatchesMt");
866  const int numPhases = 8;
867  int numConstraints = constraints->size();
868  int numConstraintRows = constraints->size();
869 
870  const int maxGridChunkCount = 128;
871  int allocNumBatchesPerPhase = maxGridChunkCount;
872  int minNumBatchesPerPhase = 16;
873  int allocNumBatches = allocNumBatchesPerPhase * numPhases;
874 
875  btVector3* bodyPositions = NULL;
876  bool* bodyDynamicFlags = NULL;
877  btIntVec3* bodyGridCoords = NULL;
878  btBatchInfo* batches = NULL;
879  int* batchWork = NULL;
880  btBatchedConstraintInfo* conInfos = NULL;
881  int* constraintBatchIds = NULL;
882  int* constraintRowBatchIds = NULL;
883  {
885  memHelper.addChunk( (void**) &bodyPositions, sizeof( btVector3 ) * bodies.size() );
886  memHelper.addChunk( (void**) &bodyDynamicFlags, sizeof( bool ) * bodies.size() );
887  memHelper.addChunk( (void**) &bodyGridCoords, sizeof( btIntVec3 ) * bodies.size() );
888  memHelper.addChunk( (void**) &batches, sizeof( btBatchInfo )* allocNumBatches );
889  memHelper.addChunk( (void**) &batchWork, sizeof( int )* allocNumBatches );
890  memHelper.addChunk( (void**) &conInfos, sizeof( btBatchedConstraintInfo ) * numConstraints );
891  memHelper.addChunk( (void**) &constraintBatchIds, sizeof( int ) * numConstraints );
892  memHelper.addChunk( (void**) &constraintRowBatchIds, sizeof( int ) * numConstraintRows );
893  size_t scratchSize = memHelper.getSizeToAllocate();
894  // if we need to reallocate
895  if (scratchMemory->capacity() < scratchSize)
896  {
897  // allocate 6.25% extra to avoid repeated reallocs
898  scratchMemory->reserve( scratchSize + scratchSize/16 );
899  }
900  scratchMemory->resizeNoInitialize( scratchSize );
901  char* memPtr = &scratchMemory->at(0);
902  memHelper.setChunkPointers( memPtr );
903  }
904 
905  numConstraints = initBatchedConstraintInfo(conInfos, constraints);
906 
907  // compute bounding box around all dynamic bodies
908  // (could be done in parallel)
910  btVector3 bboxMax = -bboxMin;
911  //int dynamicBodyCount = 0;
912  for (int i = 0; i < bodies.size(); ++i)
913  {
914  const btSolverBody& body = bodies[i];
915  btVector3 bodyPos = body.getWorldTransform().getOrigin();
916  bool isDynamic = ( body.internalGetInvMass().x() > btScalar( 0 ) );
917  bodyPositions[i] = bodyPos;
918  bodyDynamicFlags[i] = isDynamic;
919  if (isDynamic)
920  {
921  //dynamicBodyCount++;
922  bboxMin.setMin(bodyPos);
923  bboxMax.setMax(bodyPos);
924  }
925  }
926 
927  // find max extent of all dynamic constraints
928  // (could be done in parallel)
929  btVector3 consExtent = findMaxDynamicConstraintExtent(bodyPositions, bodyDynamicFlags, conInfos, numConstraints, bodies.size());
930 
931  btVector3 gridExtent = bboxMax - bboxMin;
932 
933  btVector3 gridCellSize = consExtent;
934  int gridDim[3];
935  gridDim[ 0 ] = int( 1.0 + gridExtent.x() / gridCellSize.x() );
936  gridDim[ 1 ] = int( 1.0 + gridExtent.y() / gridCellSize.y() );
937  gridDim[ 2 ] = int( 1.0 + gridExtent.z() / gridCellSize.z() );
938 
939  // if we can collapse an axis, it will cut our number of phases in half which could be more efficient
940  int phaseMask = 7;
941  bool collapseAxis = use2DGrid;
942  if ( collapseAxis )
943  {
944  // pick the smallest axis to collapse, leaving us with the greatest number of cells in our grid
945  int iAxisToCollapse = 0;
946  int axisDim = gridDim[iAxisToCollapse];
947  //for each dimension
948  for ( int i = 0; i < 3; ++i )
949  {
950  if (gridDim[i] < axisDim)
951  {
952  iAxisToCollapse = i;
953  axisDim = gridDim[i];
954  }
955  }
956  // collapse it
957  gridCellSize[iAxisToCollapse] = gridExtent[iAxisToCollapse] * 2.0f;
958  phaseMask &= ~(1 << iAxisToCollapse);
959  }
960 
961  int numGridChunks = 0;
962  btIntVec3 gridChunkDim; // each chunk is 2x2x2 group of cells
963  while (true)
964  {
965  gridDim[0] = int( 1.0 + gridExtent.x() / gridCellSize.x() );
966  gridDim[1] = int( 1.0 + gridExtent.y() / gridCellSize.y() );
967  gridDim[2] = int( 1.0 + gridExtent.z() / gridCellSize.z() );
968  gridChunkDim[ 0 ] = btMax( 1, ( gridDim[ 0 ] + 0 ) / 2 );
969  gridChunkDim[ 1 ] = btMax( 1, ( gridDim[ 1 ] + 0 ) / 2 );
970  gridChunkDim[ 2 ] = btMax( 1, ( gridDim[ 2 ] + 0 ) / 2 );
971  numGridChunks = gridChunkDim[ 0 ] * gridChunkDim[ 1 ] * gridChunkDim[ 2 ];
972  float nChunks = float(gridChunkDim[0]) * float(gridChunkDim[1]) * float(gridChunkDim[2]); // suceptible to integer overflow
973  if ( numGridChunks <= maxGridChunkCount && nChunks <= maxGridChunkCount )
974  {
975  break;
976  }
977  gridCellSize *= 1.25; // should roughly cut numCells in half
978  }
979  btAssert(numGridChunks <= maxGridChunkCount );
980  int maxNumBatchesPerPhase = numGridChunks;
981 
982  // for each dynamic body, compute grid coords
983  btVector3 invGridCellSize = btVector3(1,1,1)/gridCellSize;
984  // (can be done in parallel)
985  for (int iBody = 0; iBody < bodies.size(); ++iBody)
986  {
987  btIntVec3& coords = bodyGridCoords[iBody];
988  if (bodyDynamicFlags[iBody])
989  {
990  btVector3 v = ( bodyPositions[ iBody ] - bboxMin )*invGridCellSize;
991  coords.m_ints[0] = int(v.x());
992  coords.m_ints[1] = int(v.y());
993  coords.m_ints[2] = int(v.z());
994  btAssert(coords.m_ints[0] >= 0 && coords.m_ints[0] < gridDim[0]);
995  btAssert(coords.m_ints[1] >= 0 && coords.m_ints[1] < gridDim[1]);
996  btAssert(coords.m_ints[2] >= 0 && coords.m_ints[2] < gridDim[2]);
997  }
998  else
999  {
1000  coords.m_ints[0] = -1;
1001  coords.m_ints[1] = -1;
1002  coords.m_ints[2] = -1;
1003  }
1004  }
1005 
1006  for (int iPhase = 0; iPhase < numPhases; ++iPhase)
1007  {
1008  int batchBegin = iPhase * maxNumBatchesPerPhase;
1009  int batchEnd = batchBegin + maxNumBatchesPerPhase;
1010  for ( int iBatch = batchBegin; iBatch < batchEnd; ++iBatch )
1011  {
1012  btBatchInfo& batch = batches[ iBatch ];
1013  batch = btBatchInfo();
1014  }
1015  }
1016 
1017  {
1019  params.bodyDynamicFlags = bodyDynamicFlags;
1020  params.bodyGridCoords = bodyGridCoords;
1021  params.numBodies = bodies.size();
1022  params.conInfos = conInfos;
1023  params.constraintBatchIds = constraintBatchIds;
1024  params.gridChunkDim = gridChunkDim;
1025  params.maxNumBatchesPerPhase = maxNumBatchesPerPhase;
1026  params.numPhases = numPhases;
1027  params.phaseMask = phaseMask;
1028  bool inParallel = true;
1029  if (inParallel)
1030  {
1032  int grainSize = 250;
1033  btParallelFor(0, numConstraints, grainSize, loop);
1034  }
1035  else
1036  {
1037  assignConstraintsToGridBatches( params, 0, numConstraints );
1038  }
1039  }
1040  for ( int iCon = 0; iCon < numConstraints; ++iCon )
1041  {
1042  const btBatchedConstraintInfo& con = conInfos[ iCon ];
1043  int iBatch = constraintBatchIds[ iCon ];
1044  btBatchInfo& batch = batches[iBatch];
1045  batch.numConstraints += con.numConstraintRows;
1046  }
1047 
1048  for (int iPhase = 0; iPhase < numPhases; ++iPhase)
1049  {
1050  // if phase is legit,
1051  if (iPhase == (iPhase&phaseMask))
1052  {
1053  int iBeginBatch = iPhase * maxNumBatchesPerPhase;
1054  int iEndBatch = iBeginBatch + maxNumBatchesPerPhase;
1055  mergeSmallBatches( batches, iBeginBatch, iEndBatch, minBatchSize, maxBatchSize );
1056  }
1057  }
1058  // all constraints have been assigned a batchId
1059  updateConstraintBatchIdsForMergesMt(constraintBatchIds, numConstraints, batches, maxNumBatchesPerPhase*numPhases);
1060 
1061  if (numConstraintRows > numConstraints)
1062  {
1063  expandConstraintRowsMt(&constraintRowBatchIds[0], &constraintBatchIds[0], &conInfos[0], numConstraints, numConstraintRows);
1064  }
1065  else
1066  {
1067  constraintRowBatchIds = constraintBatchIds;
1068  }
1069 
1070  writeOutBatches(batchedConstraints, constraintRowBatchIds, numConstraintRows, batches, batchWork, maxNumBatchesPerPhase, numPhases);
1071  btAssert(batchedConstraints->validate(constraints, bodies));
1072 }
1073 
1074 
1075 static void setupSingleBatch(
1077  int numConstraints
1078 )
1079 {
1080  BT_PROFILE("setupSingleBatch");
1081  typedef btBatchedConstraints::Range Range;
1082 
1083  bc->m_constraintIndices.resize( numConstraints );
1084  for ( int i = 0; i < numConstraints; ++i )
1085  {
1086  bc->m_constraintIndices[ i ] = i;
1087  }
1088 
1089  bc->m_batches.resizeNoInitialize( 0 );
1090  bc->m_phases.resizeNoInitialize( 0 );
1093 
1094  if (numConstraints > 0)
1095  {
1096  bc->m_batches.push_back( Range( 0, numConstraints ) );
1097  bc->m_phases.push_back( Range( 0, 1 ) );
1098  bc->m_phaseOrder.push_back(0);
1099  bc->m_phaseGrainSize.push_back(1);
1100  }
1101 }
1102 
1103 
1105  btConstraintArray* constraints,
1106  const btAlignedObjectArray<btSolverBody>& bodies,
1107  BatchingMethod batchingMethod,
1108  int minBatchSize,
1109  int maxBatchSize,
1110  btAlignedObjectArray<char>* scratchMemory
1111  )
1112 {
1113  if (constraints->size() >= minBatchSize*4)
1114  {
1115  bool use2DGrid = batchingMethod == BATCHING_METHOD_SPATIAL_GRID_2D;
1116  setupSpatialGridBatchesMt( this, scratchMemory, constraints, bodies, minBatchSize, maxBatchSize, use2DGrid );
1117  if (s_debugDrawBatches)
1118  {
1119  debugDrawAllBatches( this, constraints, bodies );
1120  }
1121  }
1122  else
1123  {
1124  setupSingleBatch( this, constraints->size() );
1125  }
1126 }
1127 
1128 
const AssignConstraintsToGridBatchesParams * m_params
void push_back(const T &_Val)
#define BT_LARGE_FLOAT
Definition: btScalar.h:294
static void writeGrainSizes(btBatchedConstraints *bc)
static void assignConstraintsToGridBatches(const AssignConstraintsToGridBatchesParams &params, int iConBegin, int iConEnd)
void forLoop(int iBegin, int iEnd) const BT_OVERRIDE
const T & at(int n) const
static void expandConstraintRows(int *destConstraintBatchIds, const int *srcConstraintBatchIds, const btBatchedConstraintInfo *conInfos, int numConstraints, int numConstraintRows)
ReadSolverConstraintsLoop(btBatchedConstraintInfo *outConInfos, btConstraintArray *constraints)
btITaskScheduler * btGetTaskScheduler()
Definition: btThreads.cpp:423
virtual void drawLine(const btVector3 &from, const btVector3 &to, const btVector3 &color)=0
1D constraint along a normal axis between bodyA and bodyB. It can be combined to solve contact and fr...
void resizeNoInitialize(int newsize)
resize changes the number of elements in the array.
#define btAssert(x)
Definition: btScalar.h:131
virtual int getNumThreads() const =0
static void writeOutBatches(btBatchedConstraints *bc, const int *constraintBatchIds, int numConstraints, const btBatchInfo *batches, int *batchWork, int maxNumBatchesPerPhase, int numPhases)
btVector3 absolute() const
Return a vector with the absolute values of each element.
Definition: btVector3.h:372
static void mergeSmallBatches(btBatchInfo *batches, int iBeginBatch, int iEndBatch, int minBatchSize, int maxBatchSize)
ExpandConstraintRowsLoop(int *destConstraintBatchIds, const int *srcConstraintBatchIds, const btBatchedConstraintInfo *conInfos, int numConstraintRows)
#define SIMD_FORCE_INLINE
Definition: btScalar.h:81
btAlignedObjectArray< int > m_phaseOrder
void btClamp(T &a, const T &lb, const T &ub)
Definition: btMinMax.h:59
static void setupSpatialGridBatchesMt(btBatchedConstraints *batchedConstraints, btAlignedObjectArray< char > *scratchMemory, btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies, int minBatchSize, int maxBatchSize, bool use2DGrid)
static void updateConstraintBatchIdsForMerges(int *constraintBatchIds, int numConstraints, const btBatchInfo *batches, int numBatches)
const btScalar & x() const
Return the x value.
Definition: btVector3.h:587
const int & operator[](int i) const
static void initBatchedConstraintInfoArray(btAlignedObjectArray< btBatchedConstraintInfo > *outConInfos, btConstraintArray *constraints)
int & operator[](int i)
static void debugDrawSingleBatch(const btBatchedConstraints *bc, btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies, int iBatch, const btVector3 &color, const btVector3 &offset)
static int runLengthEncodeConstraintInfo(btBatchedConstraintInfo *outConInfos, int numConstraints)
UpdateConstraintBatchIdsForMergesLoop(int *constraintBatchIds, const btBatchInfo *batches, int numBatches)
static int initBatchedConstraintInfo(btBatchedConstraintInfo *outConInfos, btConstraintArray *constraints)
int size() const
return the number of elements in the array
btVector3 & getOrigin()
Return the origin vector translation.
Definition: btTransform.h:117
#define BT_OVERRIDE
Definition: btThreads.h:28
void forLoop(int iBegin, int iEnd) const BT_OVERRIDE
btBatchedConstraintInfo * m_outConInfos
void setChunkPointers(void *mem) const
static void expandConstraintRowsMt(int *destConstraintBatchIds, const int *srcConstraintBatchIds, const btBatchedConstraintInfo *conInfos, int numConstraints, int numConstraintRows)
static void expandConstraintRowsInPlace(int *constraintBatchIds, const btBatchedConstraintInfo *conInfos, int numConstraints, int numConstraintRows)
static void initBatchedBodyDynamicFlags(btAlignedObjectArray< bool > *outBodyDynamicFlags, const btAlignedObjectArray< btSolverBody > &bodies)
static void debugDrawPhase(const btBatchedConstraints *bc, btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies, int iPhase, const btVector3 &color0, const btVector3 &color1, const btVector3 &offset)
btAlignedObjectArray< Range > m_phases
btAlignedObjectArray< int > m_constraintIndices
const btScalar & y() const
Return the y value.
Definition: btVector3.h:589
const btVector3 & internalGetInvMass() const
Definition: btSolverBody.h:223
int capacity() const
return the pre-allocated (reserved) elements, this is at least as large as the total number of elemen...
static btVector3 findMaxDynamicConstraintExtent(btVector3 *bodyPositions, bool *bodyDynamicFlags, btBatchedConstraintInfo *conInfos, int numConstraints, int numBodies)
WriteOutConstraintIndicesLoop(btBatchedConstraints *bc, const int *constraintBatchIds, int numConstraints, int *constraintIdPerBatch, int maxNumBatchesPerPhase)
btVector3 can be used to represent 3D points and vectors.
Definition: btVector3.h:83
bool isZero() const
Definition: btVector3.h:695
void forLoop(int iBegin, int iEnd) const BT_OVERRIDE
#define BT_PROFILE(name)
Definition: btQuickprof.h:216
btAlignedObjectArray< char > m_phaseGrainSize
bool BatchCompare(const btBatchedConstraints::Range &a, const btBatchedConstraints::Range &b)
void setup(btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies, BatchingMethod batchingMethod, int minBatchSize, int maxBatchSize, btAlignedObjectArray< char > *scratchMemory)
The btSolverBody is an internal datastructure for the constraint solver. Only necessary data is packe...
Definition: btSolverBody.h:108
AssignConstraintsToGridBatchesLoop(const AssignConstraintsToGridBatchesParams &params)
static void writeOutConstraintIndicesForRangeOfBatches(btBatchedConstraints *bc, const int *constraintBatchIds, int numConstraints, int *constraintIdPerBatch, int batchBegin, int batchEnd)
void resize(int newsize, const T &fillData=T())
static void setupSingleBatch(btBatchedConstraints *bc, int numConstraints)
const btBatchedConstraintInfo * m_conInfos
void forLoop(int iBegin, int iEnd) const BT_OVERRIDE
btIDebugDraw * m_debugDrawer
const T & btMax(const T &a, const T &b)
Definition: btMinMax.h:29
const btTransform & getWorldTransform() const
Definition: btSolverBody.h:130
void btParallelFor(int iBegin, int iEnd, int grainSize, const btIParallelForBody &body)
Definition: btThreads.cpp:429
static void writeOutConstraintIndicesMt(btBatchedConstraints *bc, const int *constraintBatchIds, int numConstraints, int *constraintIdPerBatch, int maxNumBatchesPerPhase, int numPhases)
void setMax(const btVector3 &other)
Set each element to the max of the current values and the values of another btVector3.
Definition: btVector3.h:621
void forLoop(int iBegin, int iEnd) const BT_OVERRIDE
const T & btMin(const T &a, const T &b)
Definition: btMinMax.h:23
bool validate(btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies) const
static void debugDrawAllBatches(const btBatchedConstraints *bc, btConstraintArray *constraints, const btAlignedObjectArray< btSolverBody > &bodies)
btVector3 lerp(const btVector3 &v1, const btVector3 &v2, const btScalar &t)
Return the linear interpolation between two vectors.
Definition: btVector3.h:949
btAlignedObjectArray< Range > m_batches
void addChunk(void **ptr, size_t sz)
void setMin(const btVector3 &other)
Set each element to the min of the current values and the values of another btVector3.
Definition: btVector3.h:638
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
Definition: btScalar.h:292
btBatchedConstraints * m_batchedConstraints
static void updateConstraintBatchIdsForMergesMt(int *constraintBatchIds, int numConstraints, const btBatchInfo *batches, int numBatches)
const btScalar & z() const
Return the z value.
Definition: btVector3.h:591
const int kNoMerge