Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: code/branches/physics/src/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp @ 2305

Last change on this file since 2305 was 2192, checked in by rgrieder, 16 years ago

Reverted all changes of attempt to update physics branch.

  • Property svn:eol-style set to native
File size: 36.3 KB
Line 
1/*
2Bullet Continuous Collision Detection and Physics Library
3Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4
5This software is provided 'as-is', without any express or implied warranty.
6In no event will the authors be held liable for any damages arising from the use of this software.
7Permission is granted to anyone to use this software for any purpose,
8including commercial applications, and to alter it and redistribute it freely,
9subject to the following restrictions:
10
111. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
122. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
133. This notice may not be removed or altered from any source distribution.
14*/
15
16#include "btQuantizedBvh.h"
17
18#include "LinearMath/btAabbUtil2.h"
19#include "LinearMath/btIDebugDraw.h"
20
21
22btQuantizedBvh::btQuantizedBvh() : m_useQuantization(false), 
23                                        //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
24                                        m_traversalMode(TRAVERSAL_STACKLESS)
25                                        //m_traversalMode(TRAVERSAL_RECURSIVE)
26                                        ,m_subtreeHeaderCount(0) //PCK: add this line
27{ 
28
29}
30
31
32
33
34
35void btQuantizedBvh::buildInternal()
36{
37        ///assumes that caller filled in the m_quantizedLeafNodes
38        m_useQuantization = true;
39        int numLeafNodes = 0;
40       
41        if (m_useQuantization)
42        {
43                //now we have an array of leafnodes in m_leafNodes
44                numLeafNodes = m_quantizedLeafNodes.size();
45
46                m_quantizedContiguousNodes.resize(2*numLeafNodes);
47
48        }
49
50        m_curNodeIndex = 0;
51
52        buildTree(0,numLeafNodes);
53
54        ///if the entire tree is small then subtree size, we need to create a header info for the tree
55        if(m_useQuantization && !m_SubtreeHeaders.size())
56        {
57                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
58                subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
59                subtree.m_rootNodeIndex = 0;
60                subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
61        }
62
63        //PCK: update the copy of the size
64        m_subtreeHeaderCount = m_SubtreeHeaders.size();
65
66        //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
67        m_quantizedLeafNodes.clear();
68        m_leafNodes.clear();
69}
70
71
72
73///just for debugging, to visualize the individual patches/subtrees
74#ifdef DEBUG_PATCH_COLORS
75btVector3 color[4]=
76{
77        btVector3(255,0,0),
78        btVector3(0,255,0),
79        btVector3(0,0,255),
80        btVector3(0,255,255)
81};
82#endif //DEBUG_PATCH_COLORS
83
84
85
86void    btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
87{
88        //enlarge the AABB to avoid division by zero when initializing the quantization values
89        btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
90        m_bvhAabbMin = bvhAabbMin - clampValue;
91        m_bvhAabbMax = bvhAabbMax + clampValue;
92        btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
93        m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
94        m_useQuantization = true;
95}
96
97
98
99
100btQuantizedBvh::~btQuantizedBvh()
101{
102}
103
104#ifdef DEBUG_TREE_BUILDING
105int gStackDepth = 0;
106int gMaxStackDepth = 0;
107#endif //DEBUG_TREE_BUILDING
108
109void    btQuantizedBvh::buildTree       (int startIndex,int endIndex)
110{
111#ifdef DEBUG_TREE_BUILDING
112        gStackDepth++;
113        if (gStackDepth > gMaxStackDepth)
114                gMaxStackDepth = gStackDepth;
115#endif //DEBUG_TREE_BUILDING
116
117
118        int splitAxis, splitIndex, i;
119        int numIndices =endIndex-startIndex;
120        int curIndex = m_curNodeIndex;
121
122        assert(numIndices>0);
123
124        if (numIndices==1)
125        {
126#ifdef DEBUG_TREE_BUILDING
127                gStackDepth--;
128#endif //DEBUG_TREE_BUILDING
129               
130                assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
131
132                m_curNodeIndex++;
133                return; 
134        }
135        //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
136       
137        splitAxis = calcSplittingAxis(startIndex,endIndex);
138
139        splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
140
141        int internalNodeIndex = m_curNodeIndex;
142       
143        setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);
144        setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);
145       
146        for (i=startIndex;i<endIndex;i++)
147        {
148                mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
149        }
150
151        m_curNodeIndex++;
152       
153
154        //internalNode->m_escapeIndex;
155       
156        int leftChildNodexIndex = m_curNodeIndex;
157
158        //build left child tree
159        buildTree(startIndex,splitIndex);
160
161        int rightChildNodexIndex = m_curNodeIndex;
162        //build right child tree
163        buildTree(splitIndex,endIndex);
164
165#ifdef DEBUG_TREE_BUILDING
166        gStackDepth--;
167#endif //DEBUG_TREE_BUILDING
168
169        int escapeIndex = m_curNodeIndex - curIndex;
170
171        if (m_useQuantization)
172        {
173                //escapeIndex is the number of nodes of this subtree
174                const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
175                const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
176                if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
177                {
178                        updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
179                }
180        }
181
182        setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
183
184}
185
186void    btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
187{
188        btAssert(m_useQuantization);
189
190        btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
191        int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
192        int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
193       
194        btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
195        int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
196        int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
197
198        if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
199        {
200                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
201                subtree.setAabbFromQuantizeNode(leftChildNode);
202                subtree.m_rootNodeIndex = leftChildNodexIndex;
203                subtree.m_subtreeSize = leftSubTreeSize;
204        }
205
206        if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
207        {
208                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
209                subtree.setAabbFromQuantizeNode(rightChildNode);
210                subtree.m_rootNodeIndex = rightChildNodexIndex;
211                subtree.m_subtreeSize = rightSubTreeSize;
212        }
213
214        //PCK: update the copy of the size
215        m_subtreeHeaderCount = m_SubtreeHeaders.size();
216}
217
218
219int     btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
220{
221        int i;
222        int splitIndex =startIndex;
223        int numIndices = endIndex - startIndex;
224        btScalar splitValue;
225
226        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
227        for (i=startIndex;i<endIndex;i++)
228        {
229                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
230                means+=center;
231        }
232        means *= (btScalar(1.)/(btScalar)numIndices);
233       
234        splitValue = means[splitAxis];
235       
236        //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
237        for (i=startIndex;i<endIndex;i++)
238        {
239                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
240                if (center[splitAxis] > splitValue)
241                {
242                        //swap
243                        swapLeafNodes(i,splitIndex);
244                        splitIndex++;
245                }
246        }
247
248        //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
249        //otherwise the tree-building might fail due to stack-overflows in certain cases.
250        //unbalanced1 is unsafe: it can cause stack overflows
251        //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
252
253        //unbalanced2 should work too: always use center (perfect balanced trees)       
254        //bool unbalanced2 = true;
255
256        //this should be safe too:
257        int rangeBalancedIndices = numIndices/3;
258        bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
259       
260        if (unbalanced)
261        {
262                splitIndex = startIndex+ (numIndices>>1);
263        }
264
265        bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
266        (void)unbal;
267        btAssert(!unbal);
268
269        return splitIndex;
270}
271
272
273int     btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
274{
275        int i;
276
277        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
278        btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
279        int numIndices = endIndex-startIndex;
280
281        for (i=startIndex;i<endIndex;i++)
282        {
283                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
284                means+=center;
285        }
286        means *= (btScalar(1.)/(btScalar)numIndices);
287               
288        for (i=startIndex;i<endIndex;i++)
289        {
290                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
291                btVector3 diff2 = center-means;
292                diff2 = diff2 * diff2;
293                variance += diff2;
294        }
295        variance *= (btScalar(1.)/      ((btScalar)numIndices-1)        );
296       
297        return variance.maxAxis();
298}
299
300
301
302void    btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
303{
304        //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
305
306        if (m_useQuantization)
307        {
308                ///quantize query AABB
309                unsigned short int quantizedQueryAabbMin[3];
310                unsigned short int quantizedQueryAabbMax[3];
311                quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
312                quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
313
314                switch (m_traversalMode)
315                {
316                case TRAVERSAL_STACKLESS:
317                                walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
318                        break;
319                case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
320                                walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
321                        break;
322                case TRAVERSAL_RECURSIVE:
323                        {
324                                const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
325                                walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
326                        }
327                        break;
328                default:
329                        //unsupported
330                        btAssert(0);
331                }
332        } else
333        {
334                walkStacklessTree(nodeCallback,aabbMin,aabbMax);
335        }
336}
337
338
339int maxIterations = 0;
340
341void    btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
342{
343        btAssert(!m_useQuantization);
344
345        const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
346        int escapeIndex, curIndex = 0;
347        int walkIterations = 0;
348        bool isLeafNode;
349        //PCK: unsigned instead of bool
350        unsigned aabbOverlap;
351
352        while (curIndex < m_curNodeIndex)
353        {
354                //catch bugs in tree data
355                assert (walkIterations < m_curNodeIndex);
356
357                walkIterations++;
358                aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
359                isLeafNode = rootNode->m_escapeIndex == -1;
360               
361                //PCK: unsigned instead of bool
362                if (isLeafNode && (aabbOverlap != 0))
363                {
364                        nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
365                } 
366               
367                //PCK: unsigned instead of bool
368                if ((aabbOverlap != 0) || isLeafNode)
369                {
370                        rootNode++;
371                        curIndex++;
372                } else
373                {
374                        escapeIndex = rootNode->m_escapeIndex;
375                        rootNode += escapeIndex;
376                        curIndex += escapeIndex;
377                }
378        }
379        if (maxIterations < walkIterations)
380                maxIterations = walkIterations;
381
382}
383
384/*
385///this was the original recursive traversal, before we optimized towards stackless traversal
386void    btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
387{
388        bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
389        if (aabbOverlap)
390        {
391                isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
392                if (isLeafNode)
393                {
394                        nodeCallback->processNode(rootNode);
395                } else
396                {
397                        walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
398                        walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
399                }
400        }
401
402}
403*/
404
405void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
406{
407        btAssert(m_useQuantization);
408       
409        bool isLeafNode;
410        //PCK: unsigned instead of bool
411        unsigned aabbOverlap;
412
413        //PCK: unsigned instead of bool
414        aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
415        isLeafNode = currentNode->isLeafNode();
416               
417        //PCK: unsigned instead of bool
418        if (aabbOverlap != 0)
419        {
420                if (isLeafNode)
421                {
422                        nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
423                } else
424                {
425                        //process left and right children
426                        const btQuantizedBvhNode* leftChildNode = currentNode+1;
427                        walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
428
429                        const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
430                        walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
431                }
432        }               
433}
434
435
436
437
438
439void    btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
440{
441        btAssert(m_useQuantization);
442       
443        int curIndex = startNodeIndex;
444        int walkIterations = 0;
445        int subTreeSize = endNodeIndex - startNodeIndex;
446        (void)subTreeSize;
447
448        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
449        int escapeIndex;
450       
451        bool isLeafNode;
452        //PCK: unsigned instead of bool
453        unsigned boxBoxOverlap = 0;
454        unsigned rayBoxOverlap = 0;
455
456        btScalar lambda_max = 1.0;
457#define RAYAABB2
458#ifdef RAYAABB2
459        btVector3 rayFrom = raySource;
460        btVector3 rayDirection = (rayTarget-raySource);
461        rayDirection.normalize ();
462        lambda_max = rayDirection.dot(rayTarget-raySource);
463        ///what about division by zero? --> just set rayDirection[i] to 1.0
464        rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[0];
465        rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[1];
466        rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[2];
467        unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
468#endif
469
470        /* Quick pruning by quantized box */
471        btVector3 rayAabbMin = raySource;
472        btVector3 rayAabbMax = raySource;
473        rayAabbMin.setMin(rayTarget);
474        rayAabbMax.setMax(rayTarget);
475
476        /* Add box cast extents to bounding box */
477        rayAabbMin += aabbMin;
478        rayAabbMax += aabbMax;
479
480        unsigned short int quantizedQueryAabbMin[3];
481        unsigned short int quantizedQueryAabbMax[3];
482        quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
483        quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
484
485        while (curIndex < endNodeIndex)
486        {
487
488//#define VISUALLY_ANALYZE_BVH 1
489#ifdef VISUALLY_ANALYZE_BVH
490                //some code snippet to debugDraw aabb, to visually analyze bvh structure
491                static int drawPatch = 0;
492                //need some global access to a debugDrawer
493                extern btIDebugDraw* debugDrawerPtr;
494                if (curIndex==drawPatch)
495                {
496                        btVector3 aabbMin,aabbMax;
497                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
498                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
499                        btVector3       color(1,0,0);
500                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
501                }
502#endif//VISUALLY_ANALYZE_BVH
503
504                //catch bugs in tree data
505                assert (walkIterations < subTreeSize);
506
507                walkIterations++;
508                //PCK: unsigned instead of bool
509                // only interested if this is closer than any previous hit
510                btScalar param = 1.0;
511                rayBoxOverlap = 0;
512                boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
513                isLeafNode = rootNode->isLeafNode();
514                if (boxBoxOverlap)
515                {
516                        btVector3 bounds[2];
517                        bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
518                        bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
519                        /* Add box cast extents */
520                        bounds[0] += aabbMin;
521                        bounds[1] += aabbMax;
522                        btVector3 normal;
523#if 0
524                        bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
525                        bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
526                        if (ra2 != ra)
527                        {
528                                printf("functions don't match\n");
529                        }
530#endif
531#ifdef RAYAABB2
532                        ///careful with this check: need to check division by zero (above) and fix the unQuantize method
533                        ///thanks Joerg/hiker for the reproduction case!
534                        ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
535
536                        rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
537#else
538                        rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
539#endif
540                }
541               
542                if (isLeafNode && rayBoxOverlap)
543                {
544                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
545                }
546               
547                //PCK: unsigned instead of bool
548                if ((rayBoxOverlap != 0) || isLeafNode)
549                {
550                        rootNode++;
551                        curIndex++;
552                } else
553                {
554                        escapeIndex = rootNode->getEscapeIndex();
555                        rootNode += escapeIndex;
556                        curIndex += escapeIndex;
557                }
558        }
559        if (maxIterations < walkIterations)
560                maxIterations = walkIterations;
561
562}
563
564void    btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
565{
566        btAssert(m_useQuantization);
567       
568        int curIndex = startNodeIndex;
569        int walkIterations = 0;
570        int subTreeSize = endNodeIndex - startNodeIndex;
571        (void)subTreeSize;
572
573        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
574        int escapeIndex;
575       
576        bool isLeafNode;
577        //PCK: unsigned instead of bool
578        unsigned aabbOverlap;
579
580        while (curIndex < endNodeIndex)
581        {
582
583//#define VISUALLY_ANALYZE_BVH 1
584#ifdef VISUALLY_ANALYZE_BVH
585                //some code snippet to debugDraw aabb, to visually analyze bvh structure
586                static int drawPatch = 0;
587                //need some global access to a debugDrawer
588                extern btIDebugDraw* debugDrawerPtr;
589                if (curIndex==drawPatch)
590                {
591                        btVector3 aabbMin,aabbMax;
592                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
593                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
594                        btVector3       color(1,0,0);
595                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
596                }
597#endif//VISUALLY_ANALYZE_BVH
598
599                //catch bugs in tree data
600                assert (walkIterations < subTreeSize);
601
602                walkIterations++;
603                //PCK: unsigned instead of bool
604                aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
605                isLeafNode = rootNode->isLeafNode();
606               
607                if (isLeafNode && aabbOverlap)
608                {
609                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
610                } 
611               
612                //PCK: unsigned instead of bool
613                if ((aabbOverlap != 0) || isLeafNode)
614                {
615                        rootNode++;
616                        curIndex++;
617                } else
618                {
619                        escapeIndex = rootNode->getEscapeIndex();
620                        rootNode += escapeIndex;
621                        curIndex += escapeIndex;
622                }
623        }
624        if (maxIterations < walkIterations)
625                maxIterations = walkIterations;
626
627}
628
629//This traversal can be called from Playstation 3 SPU
630void    btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
631{
632        btAssert(m_useQuantization);
633
634        int i;
635
636
637        for (i=0;i<this->m_SubtreeHeaders.size();i++)
638        {
639                const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
640
641                //PCK: unsigned instead of bool
642                unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
643                if (overlap != 0)
644                {
645                        walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
646                                subtree.m_rootNodeIndex,
647                                subtree.m_rootNodeIndex+subtree.m_subtreeSize);
648                }
649        }
650}
651
652
653void    btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
654{
655        bool fast_path = m_useQuantization && m_traversalMode == TRAVERSAL_STACKLESS;
656        if (fast_path)
657        {
658                walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, btVector3(0, 0, 0), btVector3(0, 0, 0), 0, m_curNodeIndex);
659        } else {
660                /* Otherwise fallback to AABB overlap test */
661                btVector3 aabbMin = raySource;
662                btVector3 aabbMax = raySource;
663                aabbMin.setMin(rayTarget);
664                aabbMax.setMax(rayTarget);
665                reportAabbOverlappingNodex(nodeCallback,aabbMin,aabbMax);
666        }
667}
668
669
670void    btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
671{
672        bool fast_path = m_useQuantization && m_traversalMode == TRAVERSAL_STACKLESS;
673        if (fast_path)
674        {
675                walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
676        } else {
677                /* Slow path:
678                   Construct the bounding box for the entire box cast and send that down the tree */
679                btVector3 qaabbMin = raySource;
680                btVector3 qaabbMax = raySource;
681                qaabbMin.setMin(rayTarget);
682                qaabbMax.setMax(rayTarget);
683                qaabbMin += aabbMin;
684                qaabbMax += aabbMax;
685                reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
686        }
687}
688
689
690void    btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
691{
692        if (m_useQuantization)
693        {
694                        btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
695                        m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
696                        m_quantizedLeafNodes[splitIndex] = tmp;
697        } else
698        {
699                        btOptimizedBvhNode tmp = m_leafNodes[i];
700                        m_leafNodes[i] = m_leafNodes[splitIndex];
701                        m_leafNodes[splitIndex] = tmp;
702        }
703}
704
705void    btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
706{
707        if (m_useQuantization)
708        {
709                m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
710        } else
711        {
712                m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
713        }
714}
715
716//PCK: include
717#include <new>
718
719//PCK: consts
720static const unsigned BVH_ALIGNMENT = 16;
721static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
722
723static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
724
725
726
727unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
728{
729        // I changed this to 0 since the extra padding is not needed or used.
730        return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
731}
732
733unsigned btQuantizedBvh::calculateSerializeBufferSize()
734{
735        unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
736        baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
737        if (m_useQuantization)
738        {
739                return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
740        }
741        return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
742}
743
744bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian)
745{
746        assert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
747        m_subtreeHeaderCount = m_SubtreeHeaders.size();
748
749/*      if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
750        {
751                ///check alignedment for buffer?
752                btAssert(0);
753                return false;
754        }
755*/
756
757        btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
758
759        // construct the class so the virtual function table, etc will be set up
760        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
761        new (targetBvh) btQuantizedBvh;
762
763        if (i_swapEndian)
764        {
765                targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
766
767
768                btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
769                btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
770                btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
771
772                targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
773                targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
774        }
775        else
776        {
777                targetBvh->m_curNodeIndex = m_curNodeIndex;
778                targetBvh->m_bvhAabbMin = m_bvhAabbMin;
779                targetBvh->m_bvhAabbMax = m_bvhAabbMax;
780                targetBvh->m_bvhQuantization = m_bvhQuantization;
781                targetBvh->m_traversalMode = m_traversalMode;
782                targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
783        }
784
785        targetBvh->m_useQuantization = m_useQuantization;
786
787        unsigned char *nodeData = (unsigned char *)targetBvh;
788        nodeData += sizeof(btQuantizedBvh);
789       
790        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
791        nodeData += sizeToAdd;
792       
793        int nodeCount = m_curNodeIndex;
794
795        if (m_useQuantization)
796        {
797                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
798
799                if (i_swapEndian)
800                {
801                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
802                        {
803                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
804                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
805                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
806
807                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
808                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
809                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
810
811                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
812                        }
813                }
814                else
815                {
816                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
817                        {
818       
819                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
820                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
821                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
822
823                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
824                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
825                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
826
827                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
828
829
830                        }
831                }
832                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
833
834                // this clears the pointer in the member variable it doesn't really do anything to the data
835                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
836                // so the memory (which is not freed) is left alone
837                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
838        }
839        else
840        {
841                targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
842
843                if (i_swapEndian)
844                {
845                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
846                        {
847                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
848                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
849
850                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
851                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
852                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
853                        }
854                }
855                else
856                {
857                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
858                        {
859                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
860                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
861
862                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
863                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
864                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
865                        }
866                }
867                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
868
869                // this clears the pointer in the member variable it doesn't really do anything to the data
870                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
871                // so the memory (which is not freed) is left alone
872                targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
873        }
874
875        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
876        nodeData += sizeToAdd;
877
878        // Now serialize the subtree headers
879        targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
880        if (i_swapEndian)
881        {
882                for (int i = 0; i < m_subtreeHeaderCount; i++)
883                {
884                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
885                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
886                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
887
888                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
889                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
890                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
891
892                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
893                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
894                }
895        }
896        else
897        {
898                for (int i = 0; i < m_subtreeHeaderCount; i++)
899                {
900                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
901                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
902                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
903
904                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
905                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
906                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
907
908                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
909                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
910
911                        // need to clear padding in destination buffer
912                        targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
913                        targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
914                        targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
915                }
916        }
917        nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
918
919        // this clears the pointer in the member variable it doesn't really do anything to the data
920        // it does call the destructor on the contained objects, but they are all classes with no destructor defined
921        // so the memory (which is not freed) is left alone
922        targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
923
924        // this wipes the virtual function table pointer at the start of the buffer for the class
925        *((void**)o_alignedDataBuffer) = NULL;
926
927        return true;
928}
929
930btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
931{
932
933        if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
934        {
935                return NULL;
936        }
937        btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
938
939        if (i_swapEndian)
940        {
941                bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
942
943                btUnSwapVector3Endian(bvh->m_bvhAabbMin);
944                btUnSwapVector3Endian(bvh->m_bvhAabbMax);
945                btUnSwapVector3Endian(bvh->m_bvhQuantization);
946
947                bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
948                bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
949        }
950
951        unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
952        btAssert(calculatedBufSize <= i_dataBufferSize);
953
954        if (calculatedBufSize > i_dataBufferSize)
955        {
956                return NULL;
957        }
958
959        unsigned char *nodeData = (unsigned char *)bvh;
960        nodeData += sizeof(btQuantizedBvh);
961       
962        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
963        nodeData += sizeToAdd;
964       
965        int nodeCount = bvh->m_curNodeIndex;
966
967        // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
968        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
969        new (bvh) btQuantizedBvh(*bvh, false);
970
971        if (bvh->m_useQuantization)
972        {
973                bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
974
975                if (i_swapEndian)
976                {
977                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
978                        {
979                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
980                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
981                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
982
983                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
984                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
985                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
986
987                                bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
988                        }
989                }
990                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
991        }
992        else
993        {
994                bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
995
996                if (i_swapEndian)
997                {
998                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
999                        {
1000                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1001                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1002                               
1003                                bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1004                                bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1005                                bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1006                        }
1007                }
1008                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1009        }
1010
1011        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1012        nodeData += sizeToAdd;
1013
1014        // Now serialize the subtree headers
1015        bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1016        if (i_swapEndian)
1017        {
1018                for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1019                {
1020                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1021                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1022                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1023
1024                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1025                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1026                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1027
1028                        bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1029                        bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1030                }
1031        }
1032
1033        return bvh;
1034}
1035
1036// Constructor that prevents btVector3's default constructor from being called
1037btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1038m_bvhAabbMin(self.m_bvhAabbMin),
1039m_bvhAabbMax(self.m_bvhAabbMax),
1040m_bvhQuantization(self.m_bvhQuantization)
1041{
1042
1043
1044}
1045
1046
1047
Note: See TracBrowser for help on using the repository browser.