Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: code/branches/presentation/src/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp @ 2662

Last change on this file since 2662 was 2459, checked in by rgrieder, 16 years ago

Merged physics_merge back to presentation branch.

  • Property svn:eol-style set to native
File size: 39.4 KB
Line 
1/*
2Bullet Continuous Collision Detection and Physics Library
3Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4
5This software is provided 'as-is', without any express or implied warranty.
6In no event will the authors be held liable for any damages arising from the use of this software.
7Permission is granted to anyone to use this software for any purpose,
8including commercial applications, and to alter it and redistribute it freely,
9subject to the following restrictions:
10
111. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
122. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
133. This notice may not be removed or altered from any source distribution.
14*/
15
16#include "btQuantizedBvh.h"
17
18#include "LinearMath/btAabbUtil2.h"
19#include "LinearMath/btIDebugDraw.h"
20
21#define RAYAABB2
22
23btQuantizedBvh::btQuantizedBvh() : 
24                                        m_bulletVersion(BT_BULLET_VERSION),
25                                        m_useQuantization(false), 
26                                        //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
27                                        m_traversalMode(TRAVERSAL_STACKLESS)
28                                        //m_traversalMode(TRAVERSAL_RECURSIVE)
29                                        ,m_subtreeHeaderCount(0) //PCK: add this line
30{
31        m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
32        m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
33}
34
35
36
37
38
39void btQuantizedBvh::buildInternal()
40{
41        ///assumes that caller filled in the m_quantizedLeafNodes
42        m_useQuantization = true;
43        int numLeafNodes = 0;
44       
45        if (m_useQuantization)
46        {
47                //now we have an array of leafnodes in m_leafNodes
48                numLeafNodes = m_quantizedLeafNodes.size();
49
50                m_quantizedContiguousNodes.resize(2*numLeafNodes);
51
52        }
53
54        m_curNodeIndex = 0;
55
56        buildTree(0,numLeafNodes);
57
58        ///if the entire tree is small then subtree size, we need to create a header info for the tree
59        if(m_useQuantization && !m_SubtreeHeaders.size())
60        {
61                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
62                subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
63                subtree.m_rootNodeIndex = 0;
64                subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
65        }
66
67        //PCK: update the copy of the size
68        m_subtreeHeaderCount = m_SubtreeHeaders.size();
69
70        //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
71        m_quantizedLeafNodes.clear();
72        m_leafNodes.clear();
73}
74
75
76
77///just for debugging, to visualize the individual patches/subtrees
78#ifdef DEBUG_PATCH_COLORS
79btVector3 color[4]=
80{
81        btVector3(255,0,0),
82        btVector3(0,255,0),
83        btVector3(0,0,255),
84        btVector3(0,255,255)
85};
86#endif //DEBUG_PATCH_COLORS
87
88
89
90void    btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
91{
92        //enlarge the AABB to avoid division by zero when initializing the quantization values
93        btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
94        m_bvhAabbMin = bvhAabbMin - clampValue;
95        m_bvhAabbMax = bvhAabbMax + clampValue;
96        btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
97        m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
98        m_useQuantization = true;
99}
100
101
102
103
104btQuantizedBvh::~btQuantizedBvh()
105{
106}
107
108#ifdef DEBUG_TREE_BUILDING
109int gStackDepth = 0;
110int gMaxStackDepth = 0;
111#endif //DEBUG_TREE_BUILDING
112
113void    btQuantizedBvh::buildTree       (int startIndex,int endIndex)
114{
115#ifdef DEBUG_TREE_BUILDING
116        gStackDepth++;
117        if (gStackDepth > gMaxStackDepth)
118                gMaxStackDepth = gStackDepth;
119#endif //DEBUG_TREE_BUILDING
120
121
122        int splitAxis, splitIndex, i;
123        int numIndices =endIndex-startIndex;
124        int curIndex = m_curNodeIndex;
125
126        btAssert(numIndices>0);
127
128        if (numIndices==1)
129        {
130#ifdef DEBUG_TREE_BUILDING
131                gStackDepth--;
132#endif //DEBUG_TREE_BUILDING
133               
134                assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
135
136                m_curNodeIndex++;
137                return; 
138        }
139        //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
140       
141        splitAxis = calcSplittingAxis(startIndex,endIndex);
142
143        splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
144
145        int internalNodeIndex = m_curNodeIndex;
146       
147        //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
148        //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
149        setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
150        setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
151       
152       
153        for (i=startIndex;i<endIndex;i++)
154        {
155                mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
156        }
157
158        m_curNodeIndex++;
159       
160
161        //internalNode->m_escapeIndex;
162       
163        int leftChildNodexIndex = m_curNodeIndex;
164
165        //build left child tree
166        buildTree(startIndex,splitIndex);
167
168        int rightChildNodexIndex = m_curNodeIndex;
169        //build right child tree
170        buildTree(splitIndex,endIndex);
171
172#ifdef DEBUG_TREE_BUILDING
173        gStackDepth--;
174#endif //DEBUG_TREE_BUILDING
175
176        int escapeIndex = m_curNodeIndex - curIndex;
177
178        if (m_useQuantization)
179        {
180                //escapeIndex is the number of nodes of this subtree
181                const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
182                const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
183                if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
184                {
185                        updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
186                }
187        } else
188        {
189
190        }
191
192        setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
193
194}
195
196void    btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
197{
198        btAssert(m_useQuantization);
199
200        btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
201        int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
202        int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
203       
204        btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
205        int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
206        int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
207
208        if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
209        {
210                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
211                subtree.setAabbFromQuantizeNode(leftChildNode);
212                subtree.m_rootNodeIndex = leftChildNodexIndex;
213                subtree.m_subtreeSize = leftSubTreeSize;
214        }
215
216        if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
217        {
218                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
219                subtree.setAabbFromQuantizeNode(rightChildNode);
220                subtree.m_rootNodeIndex = rightChildNodexIndex;
221                subtree.m_subtreeSize = rightSubTreeSize;
222        }
223
224        //PCK: update the copy of the size
225        m_subtreeHeaderCount = m_SubtreeHeaders.size();
226}
227
228
229int     btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
230{
231        int i;
232        int splitIndex =startIndex;
233        int numIndices = endIndex - startIndex;
234        btScalar splitValue;
235
236        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
237        for (i=startIndex;i<endIndex;i++)
238        {
239                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
240                means+=center;
241        }
242        means *= (btScalar(1.)/(btScalar)numIndices);
243       
244        splitValue = means[splitAxis];
245       
246        //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
247        for (i=startIndex;i<endIndex;i++)
248        {
249                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
250                if (center[splitAxis] > splitValue)
251                {
252                        //swap
253                        swapLeafNodes(i,splitIndex);
254                        splitIndex++;
255                }
256        }
257
258        //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
259        //otherwise the tree-building might fail due to stack-overflows in certain cases.
260        //unbalanced1 is unsafe: it can cause stack overflows
261        //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
262
263        //unbalanced2 should work too: always use center (perfect balanced trees)       
264        //bool unbalanced2 = true;
265
266        //this should be safe too:
267        int rangeBalancedIndices = numIndices/3;
268        bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
269       
270        if (unbalanced)
271        {
272                splitIndex = startIndex+ (numIndices>>1);
273        }
274
275        bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
276        (void)unbal;
277        btAssert(!unbal);
278
279        return splitIndex;
280}
281
282
283int     btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
284{
285        int i;
286
287        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
288        btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
289        int numIndices = endIndex-startIndex;
290
291        for (i=startIndex;i<endIndex;i++)
292        {
293                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
294                means+=center;
295        }
296        means *= (btScalar(1.)/(btScalar)numIndices);
297               
298        for (i=startIndex;i<endIndex;i++)
299        {
300                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
301                btVector3 diff2 = center-means;
302                diff2 = diff2 * diff2;
303                variance += diff2;
304        }
305        variance *= (btScalar(1.)/      ((btScalar)numIndices-1)        );
306       
307        return variance.maxAxis();
308}
309
310
311
312void    btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
313{
314        //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
315
316        if (m_useQuantization)
317        {
318                ///quantize query AABB
319                unsigned short int quantizedQueryAabbMin[3];
320                unsigned short int quantizedQueryAabbMax[3];
321                quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
322                quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
323
324                switch (m_traversalMode)
325                {
326                case TRAVERSAL_STACKLESS:
327                                walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
328                        break;
329                case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
330                                walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
331                        break;
332                case TRAVERSAL_RECURSIVE:
333                        {
334                                const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
335                                walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
336                        }
337                        break;
338                default:
339                        //unsupported
340                        btAssert(0);
341                }
342        } else
343        {
344                walkStacklessTree(nodeCallback,aabbMin,aabbMax);
345        }
346}
347
348
349int maxIterations = 0;
350
351
352void    btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
353{
354        btAssert(!m_useQuantization);
355
356        const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
357        int escapeIndex, curIndex = 0;
358        int walkIterations = 0;
359        bool isLeafNode;
360        //PCK: unsigned instead of bool
361        unsigned aabbOverlap;
362
363        while (curIndex < m_curNodeIndex)
364        {
365                //catch bugs in tree data
366                btAssert (walkIterations < m_curNodeIndex);
367
368                walkIterations++;
369                aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
370                isLeafNode = rootNode->m_escapeIndex == -1;
371               
372                //PCK: unsigned instead of bool
373                if (isLeafNode && (aabbOverlap != 0))
374                {
375                        nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
376                } 
377               
378                //PCK: unsigned instead of bool
379                if ((aabbOverlap != 0) || isLeafNode)
380                {
381                        rootNode++;
382                        curIndex++;
383                } else
384                {
385                        escapeIndex = rootNode->m_escapeIndex;
386                        rootNode += escapeIndex;
387                        curIndex += escapeIndex;
388                }
389        }
390        if (maxIterations < walkIterations)
391                maxIterations = walkIterations;
392
393}
394
395/*
396///this was the original recursive traversal, before we optimized towards stackless traversal
397void    btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
398{
399        bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
400        if (aabbOverlap)
401        {
402                isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
403                if (isLeafNode)
404                {
405                        nodeCallback->processNode(rootNode);
406                } else
407                {
408                        walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
409                        walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
410                }
411        }
412
413}
414*/
415
416void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
417{
418        btAssert(m_useQuantization);
419       
420        bool isLeafNode;
421        //PCK: unsigned instead of bool
422        unsigned aabbOverlap;
423
424        //PCK: unsigned instead of bool
425        aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
426        isLeafNode = currentNode->isLeafNode();
427               
428        //PCK: unsigned instead of bool
429        if (aabbOverlap != 0)
430        {
431                if (isLeafNode)
432                {
433                        nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
434                } else
435                {
436                        //process left and right children
437                        const btQuantizedBvhNode* leftChildNode = currentNode+1;
438                        walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
439
440                        const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
441                        walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
442                }
443        }               
444}
445
446
447
448void    btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
449{
450        btAssert(!m_useQuantization);
451
452        const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
453        int escapeIndex, curIndex = 0;
454        int walkIterations = 0;
455        bool isLeafNode;
456        //PCK: unsigned instead of bool
457        unsigned aabbOverlap=0;
458        unsigned rayBoxOverlap=0;
459        btScalar lambda_max = 1.0;
460       
461                /* Quick pruning by quantized box */
462        btVector3 rayAabbMin = raySource;
463        btVector3 rayAabbMax = raySource;
464        rayAabbMin.setMin(rayTarget);
465        rayAabbMax.setMax(rayTarget);
466
467        /* Add box cast extents to bounding box */
468        rayAabbMin += aabbMin;
469        rayAabbMax += aabbMax;
470
471#ifdef RAYAABB2
472        btVector3 rayFrom = raySource;
473        btVector3 rayDir = (rayTarget-raySource);
474        rayDir.normalize ();
475        lambda_max = rayDir.dot(rayTarget-raySource);
476        ///what about division by zero? --> just set rayDirection[i] to 1.0
477        btVector3 rayDirectionInverse;
478        rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[0];
479        rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[1];
480        rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[2];
481        unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
482#endif
483
484        btVector3 bounds[2];
485
486        while (curIndex < m_curNodeIndex)
487        {
488                btScalar param = 1.0;
489                //catch bugs in tree data
490                btAssert (walkIterations < m_curNodeIndex);
491
492                walkIterations++;
493
494                bounds[0] = rootNode->m_aabbMinOrg;
495                bounds[1] = rootNode->m_aabbMaxOrg;
496                /* Add box cast extents */
497                bounds[0] += aabbMin;
498                bounds[1] += aabbMax;
499
500                aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
501                //perhaps profile if it is worth doing the aabbOverlap test first
502
503#ifdef RAYAABB2
504                        ///careful with this check: need to check division by zero (above) and fix the unQuantize method
505                        ///thanks Joerg/hiker for the reproduction case!
506                        ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
507                rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
508
509#else
510                btVector3 normal;
511                rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
512#endif
513
514                isLeafNode = rootNode->m_escapeIndex == -1;
515               
516                //PCK: unsigned instead of bool
517                if (isLeafNode && (rayBoxOverlap != 0))
518                {
519                        nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
520                } 
521               
522                //PCK: unsigned instead of bool
523                if ((rayBoxOverlap != 0) || isLeafNode)
524                {
525                        rootNode++;
526                        curIndex++;
527                } else
528                {
529                        escapeIndex = rootNode->m_escapeIndex;
530                        rootNode += escapeIndex;
531                        curIndex += escapeIndex;
532                }
533        }
534        if (maxIterations < walkIterations)
535                maxIterations = walkIterations;
536
537}
538
539
540
541void    btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
542{
543        btAssert(m_useQuantization);
544       
545        int curIndex = startNodeIndex;
546        int walkIterations = 0;
547        int subTreeSize = endNodeIndex - startNodeIndex;
548        (void)subTreeSize;
549
550        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
551        int escapeIndex;
552       
553        bool isLeafNode;
554        //PCK: unsigned instead of bool
555        unsigned boxBoxOverlap = 0;
556        unsigned rayBoxOverlap = 0;
557
558        btScalar lambda_max = 1.0;
559
560#ifdef RAYAABB2
561        btVector3 rayFrom = raySource;
562        btVector3 rayDirection = (rayTarget-raySource);
563        rayDirection.normalize ();
564        lambda_max = rayDirection.dot(rayTarget-raySource);
565        ///what about division by zero? --> just set rayDirection[i] to 1.0
566        rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[0];
567        rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[1];
568        rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[2];
569        unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
570#endif
571
572        /* Quick pruning by quantized box */
573        btVector3 rayAabbMin = raySource;
574        btVector3 rayAabbMax = raySource;
575        rayAabbMin.setMin(rayTarget);
576        rayAabbMax.setMax(rayTarget);
577
578        /* Add box cast extents to bounding box */
579        rayAabbMin += aabbMin;
580        rayAabbMax += aabbMax;
581
582        unsigned short int quantizedQueryAabbMin[3];
583        unsigned short int quantizedQueryAabbMax[3];
584        quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
585        quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
586
587        while (curIndex < endNodeIndex)
588        {
589
590//#define VISUALLY_ANALYZE_BVH 1
591#ifdef VISUALLY_ANALYZE_BVH
592                //some code snippet to debugDraw aabb, to visually analyze bvh structure
593                static int drawPatch = 0;
594                //need some global access to a debugDrawer
595                extern btIDebugDraw* debugDrawerPtr;
596                if (curIndex==drawPatch)
597                {
598                        btVector3 aabbMin,aabbMax;
599                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
600                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
601                        btVector3       color(1,0,0);
602                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
603                }
604#endif//VISUALLY_ANALYZE_BVH
605
606                //catch bugs in tree data
607                btAssert (walkIterations < subTreeSize);
608
609                walkIterations++;
610                //PCK: unsigned instead of bool
611                // only interested if this is closer than any previous hit
612                btScalar param = 1.0;
613                rayBoxOverlap = 0;
614                boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
615                isLeafNode = rootNode->isLeafNode();
616                if (boxBoxOverlap)
617                {
618                        btVector3 bounds[2];
619                        bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
620                        bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
621                        /* Add box cast extents */
622                        bounds[0] += aabbMin;
623                        bounds[1] += aabbMax;
624                        btVector3 normal;
625#if 0
626                        bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
627                        bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
628                        if (ra2 != ra)
629                        {
630                                printf("functions don't match\n");
631                        }
632#endif
633#ifdef RAYAABB2
634                        ///careful with this check: need to check division by zero (above) and fix the unQuantize method
635                        ///thanks Joerg/hiker for the reproduction case!
636                        ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
637
638                        //BT_PROFILE("btRayAabb2");
639                        rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
640                       
641#else
642                        rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
643#endif
644                }
645               
646                if (isLeafNode && rayBoxOverlap)
647                {
648                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
649                }
650               
651                //PCK: unsigned instead of bool
652                if ((rayBoxOverlap != 0) || isLeafNode)
653                {
654                        rootNode++;
655                        curIndex++;
656                } else
657                {
658                        escapeIndex = rootNode->getEscapeIndex();
659                        rootNode += escapeIndex;
660                        curIndex += escapeIndex;
661                }
662        }
663        if (maxIterations < walkIterations)
664                maxIterations = walkIterations;
665
666}
667
668void    btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
669{
670        btAssert(m_useQuantization);
671       
672        int curIndex = startNodeIndex;
673        int walkIterations = 0;
674        int subTreeSize = endNodeIndex - startNodeIndex;
675        (void)subTreeSize;
676
677        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
678        int escapeIndex;
679       
680        bool isLeafNode;
681        //PCK: unsigned instead of bool
682        unsigned aabbOverlap;
683
684        while (curIndex < endNodeIndex)
685        {
686
687//#define VISUALLY_ANALYZE_BVH 1
688#ifdef VISUALLY_ANALYZE_BVH
689                //some code snippet to debugDraw aabb, to visually analyze bvh structure
690                static int drawPatch = 0;
691                //need some global access to a debugDrawer
692                extern btIDebugDraw* debugDrawerPtr;
693                if (curIndex==drawPatch)
694                {
695                        btVector3 aabbMin,aabbMax;
696                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
697                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
698                        btVector3       color(1,0,0);
699                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
700                }
701#endif//VISUALLY_ANALYZE_BVH
702
703                //catch bugs in tree data
704                btAssert (walkIterations < subTreeSize);
705
706                walkIterations++;
707                //PCK: unsigned instead of bool
708                aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
709                isLeafNode = rootNode->isLeafNode();
710               
711                if (isLeafNode && aabbOverlap)
712                {
713                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
714                } 
715               
716                //PCK: unsigned instead of bool
717                if ((aabbOverlap != 0) || isLeafNode)
718                {
719                        rootNode++;
720                        curIndex++;
721                } else
722                {
723                        escapeIndex = rootNode->getEscapeIndex();
724                        rootNode += escapeIndex;
725                        curIndex += escapeIndex;
726                }
727        }
728        if (maxIterations < walkIterations)
729                maxIterations = walkIterations;
730
731}
732
733//This traversal can be called from Playstation 3 SPU
734void    btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
735{
736        btAssert(m_useQuantization);
737
738        int i;
739
740
741        for (i=0;i<this->m_SubtreeHeaders.size();i++)
742        {
743                const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
744
745                //PCK: unsigned instead of bool
746                unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
747                if (overlap != 0)
748                {
749                        walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
750                                subtree.m_rootNodeIndex,
751                                subtree.m_rootNodeIndex+subtree.m_subtreeSize);
752                }
753        }
754}
755
756
757void    btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
758{
759        reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
760}
761
762
763void    btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
764{
765        //always use stackless
766
767        if (m_useQuantization)
768        {
769                walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
770        }
771        else
772        {
773                walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
774        }
775        /*
776        {
777                //recursive traversal
778                btVector3 qaabbMin = raySource;
779                btVector3 qaabbMax = raySource;
780                qaabbMin.setMin(rayTarget);
781                qaabbMax.setMax(rayTarget);
782                qaabbMin += aabbMin;
783                qaabbMax += aabbMax;
784                reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
785        }
786        */
787
788}
789
790
791void    btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
792{
793        if (m_useQuantization)
794        {
795                        btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
796                        m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
797                        m_quantizedLeafNodes[splitIndex] = tmp;
798        } else
799        {
800                        btOptimizedBvhNode tmp = m_leafNodes[i];
801                        m_leafNodes[i] = m_leafNodes[splitIndex];
802                        m_leafNodes[splitIndex] = tmp;
803        }
804}
805
806void    btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
807{
808        if (m_useQuantization)
809        {
810                m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
811        } else
812        {
813                m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
814        }
815}
816
817//PCK: include
818#include <new>
819
820//PCK: consts
821static const unsigned BVH_ALIGNMENT = 16;
822static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
823
824static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
825
826
827
828unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
829{
830        // I changed this to 0 since the extra padding is not needed or used.
831        return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
832}
833
834unsigned btQuantizedBvh::calculateSerializeBufferSize()
835{
836        unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
837        baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
838        if (m_useQuantization)
839        {
840                return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
841        }
842        return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
843}
844
845bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian)
846{
847        btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
848        m_subtreeHeaderCount = m_SubtreeHeaders.size();
849
850/*      if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
851        {
852                ///check alignedment for buffer?
853                btAssert(0);
854                return false;
855        }
856*/
857
858        btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
859
860        // construct the class so the virtual function table, etc will be set up
861        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
862        new (targetBvh) btQuantizedBvh;
863
864        if (i_swapEndian)
865        {
866                targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
867
868
869                btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
870                btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
871                btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
872
873                targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
874                targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
875        }
876        else
877        {
878                targetBvh->m_curNodeIndex = m_curNodeIndex;
879                targetBvh->m_bvhAabbMin = m_bvhAabbMin;
880                targetBvh->m_bvhAabbMax = m_bvhAabbMax;
881                targetBvh->m_bvhQuantization = m_bvhQuantization;
882                targetBvh->m_traversalMode = m_traversalMode;
883                targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
884        }
885
886        targetBvh->m_useQuantization = m_useQuantization;
887
888        unsigned char *nodeData = (unsigned char *)targetBvh;
889        nodeData += sizeof(btQuantizedBvh);
890       
891        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
892        nodeData += sizeToAdd;
893       
894        int nodeCount = m_curNodeIndex;
895
896        if (m_useQuantization)
897        {
898                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
899
900                if (i_swapEndian)
901                {
902                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
903                        {
904                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
905                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
906                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
907
908                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
909                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
910                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
911
912                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
913                        }
914                }
915                else
916                {
917                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
918                        {
919       
920                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
921                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
922                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
923
924                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
925                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
926                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
927
928                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
929
930
931                        }
932                }
933                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
934
935                // this clears the pointer in the member variable it doesn't really do anything to the data
936                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
937                // so the memory (which is not freed) is left alone
938                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
939        }
940        else
941        {
942                targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
943
944                if (i_swapEndian)
945                {
946                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
947                        {
948                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
949                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
950
951                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
952                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
953                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
954                        }
955                }
956                else
957                {
958                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
959                        {
960                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
961                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
962
963                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
964                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
965                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
966                        }
967                }
968                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
969
970                // this clears the pointer in the member variable it doesn't really do anything to the data
971                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
972                // so the memory (which is not freed) is left alone
973                targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
974        }
975
976        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
977        nodeData += sizeToAdd;
978
979        // Now serialize the subtree headers
980        targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
981        if (i_swapEndian)
982        {
983                for (int i = 0; i < m_subtreeHeaderCount; i++)
984                {
985                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
986                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
987                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
988
989                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
990                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
991                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
992
993                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
994                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
995                }
996        }
997        else
998        {
999                for (int i = 0; i < m_subtreeHeaderCount; i++)
1000                {
1001                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1002                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1003                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1004
1005                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1006                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1007                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1008
1009                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1010                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1011
1012                        // need to clear padding in destination buffer
1013                        targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1014                        targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1015                        targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1016                }
1017        }
1018        nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1019
1020        // this clears the pointer in the member variable it doesn't really do anything to the data
1021        // it does call the destructor on the contained objects, but they are all classes with no destructor defined
1022        // so the memory (which is not freed) is left alone
1023        targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1024
1025        // this wipes the virtual function table pointer at the start of the buffer for the class
1026        *((void**)o_alignedDataBuffer) = NULL;
1027
1028        return true;
1029}
1030
1031btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1032{
1033
1034        if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1035        {
1036                return NULL;
1037        }
1038        btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1039
1040        if (i_swapEndian)
1041        {
1042                bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1043
1044                btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1045                btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1046                btUnSwapVector3Endian(bvh->m_bvhQuantization);
1047
1048                bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1049                bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1050        }
1051
1052        unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1053        btAssert(calculatedBufSize <= i_dataBufferSize);
1054
1055        if (calculatedBufSize > i_dataBufferSize)
1056        {
1057                return NULL;
1058        }
1059
1060        unsigned char *nodeData = (unsigned char *)bvh;
1061        nodeData += sizeof(btQuantizedBvh);
1062       
1063        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1064        nodeData += sizeToAdd;
1065       
1066        int nodeCount = bvh->m_curNodeIndex;
1067
1068        // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1069        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1070        new (bvh) btQuantizedBvh(*bvh, false);
1071
1072        if (bvh->m_useQuantization)
1073        {
1074                bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1075
1076                if (i_swapEndian)
1077                {
1078                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1079                        {
1080                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1081                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1082                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1083
1084                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1085                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1086                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1087
1088                                bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1089                        }
1090                }
1091                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1092        }
1093        else
1094        {
1095                bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1096
1097                if (i_swapEndian)
1098                {
1099                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1100                        {
1101                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1102                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1103                               
1104                                bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1105                                bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1106                                bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1107                        }
1108                }
1109                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1110        }
1111
1112        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1113        nodeData += sizeToAdd;
1114
1115        // Now serialize the subtree headers
1116        bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1117        if (i_swapEndian)
1118        {
1119                for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1120                {
1121                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1122                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1123                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1124
1125                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1126                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1127                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1128
1129                        bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1130                        bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1131                }
1132        }
1133
1134        return bvh;
1135}
1136
1137// Constructor that prevents btVector3's default constructor from being called
1138btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1139m_bvhAabbMin(self.m_bvhAabbMin),
1140m_bvhAabbMax(self.m_bvhAabbMax),
1141m_bvhQuantization(self.m_bvhQuantization),
1142m_bulletVersion(BT_BULLET_VERSION)
1143{
1144
1145}
1146
1147
1148
Note: See TracBrowser for help on using the repository browser.