Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: code/trunk/src/external/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp @ 6333

Last change on this file since 6333 was 5781, checked in by rgrieder, 15 years ago

Reverted trunk again. We might want to find a way to delete these revisions again (x3n's changes are still available as diff in the commit mails).

  • Property svn:eol-style set to native
File size: 39.4 KB
Line 
1/*
2Bullet Continuous Collision Detection and Physics Library
3Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4
5This software is provided 'as-is', without any express or implied warranty.
6In no event will the authors be held liable for any damages arising from the use of this software.
7Permission is granted to anyone to use this software for any purpose,
8including commercial applications, and to alter it and redistribute it freely,
9subject to the following restrictions:
10
111. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
122. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
133. This notice may not be removed or altered from any source distribution.
14*/
15
16#include "btQuantizedBvh.h"
17
18#include "LinearMath/btAabbUtil2.h"
19#include "LinearMath/btIDebugDraw.h"
20
21#define RAYAABB2
22
23btQuantizedBvh::btQuantizedBvh() : 
24                                        m_bulletVersion(BT_BULLET_VERSION),
25                                        m_useQuantization(false), 
26                                        //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
27                                        m_traversalMode(TRAVERSAL_STACKLESS)
28                                        //m_traversalMode(TRAVERSAL_RECURSIVE)
29                                        ,m_subtreeHeaderCount(0) //PCK: add this line
30{
31        m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
32        m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
33}
34
35
36
37
38
39void btQuantizedBvh::buildInternal()
40{
41        ///assumes that caller filled in the m_quantizedLeafNodes
42        m_useQuantization = true;
43        int numLeafNodes = 0;
44       
45        if (m_useQuantization)
46        {
47                //now we have an array of leafnodes in m_leafNodes
48                numLeafNodes = m_quantizedLeafNodes.size();
49
50                m_quantizedContiguousNodes.resize(2*numLeafNodes);
51
52        }
53
54        m_curNodeIndex = 0;
55
56        buildTree(0,numLeafNodes);
57
58        ///if the entire tree is small then subtree size, we need to create a header info for the tree
59        if(m_useQuantization && !m_SubtreeHeaders.size())
60        {
61                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
62                subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
63                subtree.m_rootNodeIndex = 0;
64                subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
65        }
66
67        //PCK: update the copy of the size
68        m_subtreeHeaderCount = m_SubtreeHeaders.size();
69
70        //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
71        m_quantizedLeafNodes.clear();
72        m_leafNodes.clear();
73}
74
75
76
77///just for debugging, to visualize the individual patches/subtrees
78#ifdef DEBUG_PATCH_COLORS
79btVector3 color[4]=
80{
81        btVector3(255,0,0),
82        btVector3(0,255,0),
83        btVector3(0,0,255),
84        btVector3(0,255,255)
85};
86#endif //DEBUG_PATCH_COLORS
87
88
89
90void    btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
91{
92        //enlarge the AABB to avoid division by zero when initializing the quantization values
93        btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
94        m_bvhAabbMin = bvhAabbMin - clampValue;
95        m_bvhAabbMax = bvhAabbMax + clampValue;
96        btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
97        m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
98        m_useQuantization = true;
99}
100
101
102
103
104btQuantizedBvh::~btQuantizedBvh()
105{
106}
107
108#ifdef DEBUG_TREE_BUILDING
109int gStackDepth = 0;
110int gMaxStackDepth = 0;
111#endif //DEBUG_TREE_BUILDING
112
113void    btQuantizedBvh::buildTree       (int startIndex,int endIndex)
114{
115#ifdef DEBUG_TREE_BUILDING
116        gStackDepth++;
117        if (gStackDepth > gMaxStackDepth)
118                gMaxStackDepth = gStackDepth;
119#endif //DEBUG_TREE_BUILDING
120
121
122        int splitAxis, splitIndex, i;
123        int numIndices =endIndex-startIndex;
124        int curIndex = m_curNodeIndex;
125
126        btAssert(numIndices>0);
127
128        if (numIndices==1)
129        {
130#ifdef DEBUG_TREE_BUILDING
131                gStackDepth--;
132#endif //DEBUG_TREE_BUILDING
133               
134                assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
135
136                m_curNodeIndex++;
137                return; 
138        }
139        //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
140       
141        splitAxis = calcSplittingAxis(startIndex,endIndex);
142
143        splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
144
145        int internalNodeIndex = m_curNodeIndex;
146       
147        //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
148        //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
149        setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
150        setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
151       
152       
153        for (i=startIndex;i<endIndex;i++)
154        {
155                mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
156        }
157
158        m_curNodeIndex++;
159       
160
161        //internalNode->m_escapeIndex;
162       
163        int leftChildNodexIndex = m_curNodeIndex;
164
165        //build left child tree
166        buildTree(startIndex,splitIndex);
167
168        int rightChildNodexIndex = m_curNodeIndex;
169        //build right child tree
170        buildTree(splitIndex,endIndex);
171
172#ifdef DEBUG_TREE_BUILDING
173        gStackDepth--;
174#endif //DEBUG_TREE_BUILDING
175
176        int escapeIndex = m_curNodeIndex - curIndex;
177
178        if (m_useQuantization)
179        {
180                //escapeIndex is the number of nodes of this subtree
181                const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
182                const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
183                if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
184                {
185                        updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
186                }
187        } else
188        {
189
190        }
191
192        setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
193
194}
195
196void    btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
197{
198        btAssert(m_useQuantization);
199
200        btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
201        int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
202        int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
203       
204        btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
205        int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
206        int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
207
208        if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
209        {
210                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
211                subtree.setAabbFromQuantizeNode(leftChildNode);
212                subtree.m_rootNodeIndex = leftChildNodexIndex;
213                subtree.m_subtreeSize = leftSubTreeSize;
214        }
215
216        if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
217        {
218                btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
219                subtree.setAabbFromQuantizeNode(rightChildNode);
220                subtree.m_rootNodeIndex = rightChildNodexIndex;
221                subtree.m_subtreeSize = rightSubTreeSize;
222        }
223
224        //PCK: update the copy of the size
225        m_subtreeHeaderCount = m_SubtreeHeaders.size();
226}
227
228
229int     btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
230{
231        int i;
232        int splitIndex =startIndex;
233        int numIndices = endIndex - startIndex;
234        btScalar splitValue;
235
236        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
237        for (i=startIndex;i<endIndex;i++)
238        {
239                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
240                means+=center;
241        }
242        means *= (btScalar(1.)/(btScalar)numIndices);
243       
244        splitValue = means[splitAxis];
245       
246        //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
247        for (i=startIndex;i<endIndex;i++)
248        {
249                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
250                if (center[splitAxis] > splitValue)
251                {
252                        //swap
253                        swapLeafNodes(i,splitIndex);
254                        splitIndex++;
255                }
256        }
257
258        //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
259        //otherwise the tree-building might fail due to stack-overflows in certain cases.
260        //unbalanced1 is unsafe: it can cause stack overflows
261        //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
262
263        //unbalanced2 should work too: always use center (perfect balanced trees)       
264        //bool unbalanced2 = true;
265
266        //this should be safe too:
267        int rangeBalancedIndices = numIndices/3;
268        bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
269       
270        if (unbalanced)
271        {
272                splitIndex = startIndex+ (numIndices>>1);
273        }
274
275        bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
276        (void)unbal;
277        btAssert(!unbal);
278
279        return splitIndex;
280}
281
282
283int     btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
284{
285        int i;
286
287        btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
288        btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
289        int numIndices = endIndex-startIndex;
290
291        for (i=startIndex;i<endIndex;i++)
292        {
293                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
294                means+=center;
295        }
296        means *= (btScalar(1.)/(btScalar)numIndices);
297               
298        for (i=startIndex;i<endIndex;i++)
299        {
300                btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
301                btVector3 diff2 = center-means;
302                diff2 = diff2 * diff2;
303                variance += diff2;
304        }
305        variance *= (btScalar(1.)/      ((btScalar)numIndices-1)        );
306       
307        return variance.maxAxis();
308}
309
310
311
312void    btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
313{
314        //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
315
316        if (m_useQuantization)
317        {
318                ///quantize query AABB
319                unsigned short int quantizedQueryAabbMin[3];
320                unsigned short int quantizedQueryAabbMax[3];
321                quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
322                quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
323
324                switch (m_traversalMode)
325                {
326                case TRAVERSAL_STACKLESS:
327                                walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
328                        break;
329                case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
330                                walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
331                        break;
332                case TRAVERSAL_RECURSIVE:
333                        {
334                                const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
335                                walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
336                        }
337                        break;
338                default:
339                        //unsupported
340                        btAssert(0);
341                }
342        } else
343        {
344                walkStacklessTree(nodeCallback,aabbMin,aabbMax);
345        }
346}
347
348
349int maxIterations = 0;
350
351
352void    btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
353{
354        btAssert(!m_useQuantization);
355
356        const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
357        int escapeIndex, curIndex = 0;
358        int walkIterations = 0;
359        bool isLeafNode;
360        //PCK: unsigned instead of bool
361        unsigned aabbOverlap;
362
363        while (curIndex < m_curNodeIndex)
364        {
365                //catch bugs in tree data
366                btAssert (walkIterations < m_curNodeIndex);
367
368                walkIterations++;
369                aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
370                isLeafNode = rootNode->m_escapeIndex == -1;
371               
372                //PCK: unsigned instead of bool
373                if (isLeafNode && (aabbOverlap != 0))
374                {
375                        nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
376                } 
377               
378                //PCK: unsigned instead of bool
379                if ((aabbOverlap != 0) || isLeafNode)
380                {
381                        rootNode++;
382                        curIndex++;
383                } else
384                {
385                        escapeIndex = rootNode->m_escapeIndex;
386                        rootNode += escapeIndex;
387                        curIndex += escapeIndex;
388                }
389        }
390        if (maxIterations < walkIterations)
391                maxIterations = walkIterations;
392
393}
394
395/*
396///this was the original recursive traversal, before we optimized towards stackless traversal
397void    btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
398{
399        bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
400        if (aabbOverlap)
401        {
402                isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
403                if (isLeafNode)
404                {
405                        nodeCallback->processNode(rootNode);
406                } else
407                {
408                        walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
409                        walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
410                }
411        }
412
413}
414*/
415
416void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
417{
418        btAssert(m_useQuantization);
419       
420        bool isLeafNode;
421        //PCK: unsigned instead of bool
422        unsigned aabbOverlap;
423
424        //PCK: unsigned instead of bool
425        aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
426        isLeafNode = currentNode->isLeafNode();
427               
428        //PCK: unsigned instead of bool
429        if (aabbOverlap != 0)
430        {
431                if (isLeafNode)
432                {
433                        nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
434                } else
435                {
436                        //process left and right children
437                        const btQuantizedBvhNode* leftChildNode = currentNode+1;
438                        walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
439
440                        const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
441                        walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
442                }
443        }               
444}
445
446
447
448void    btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
449{
450        btAssert(!m_useQuantization);
451
452        const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
453        int escapeIndex, curIndex = 0;
454        int walkIterations = 0;
455        bool isLeafNode;
456        //PCK: unsigned instead of bool
457        unsigned aabbOverlap=0;
458        unsigned rayBoxOverlap=0;
459        btScalar lambda_max = 1.0;
460       
461                /* Quick pruning by quantized box */
462        btVector3 rayAabbMin = raySource;
463        btVector3 rayAabbMax = raySource;
464        rayAabbMin.setMin(rayTarget);
465        rayAabbMax.setMax(rayTarget);
466
467        /* Add box cast extents to bounding box */
468        rayAabbMin += aabbMin;
469        rayAabbMax += aabbMax;
470
471#ifdef RAYAABB2
472        btVector3 rayDir = (rayTarget-raySource);
473        rayDir.normalize ();
474        lambda_max = rayDir.dot(rayTarget-raySource);
475        ///what about division by zero? --> just set rayDirection[i] to 1.0
476        btVector3 rayDirectionInverse;
477        rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[0];
478        rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[1];
479        rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDir[2];
480        unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
481#endif
482
483        btVector3 bounds[2];
484
485        while (curIndex < m_curNodeIndex)
486        {
487                btScalar param = 1.0;
488                //catch bugs in tree data
489                btAssert (walkIterations < m_curNodeIndex);
490
491                walkIterations++;
492
493                bounds[0] = rootNode->m_aabbMinOrg;
494                bounds[1] = rootNode->m_aabbMaxOrg;
495                /* Add box cast extents */
496                bounds[0] += aabbMin;
497                bounds[1] += aabbMax;
498
499                aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
500                //perhaps profile if it is worth doing the aabbOverlap test first
501
502#ifdef RAYAABB2
503                        ///careful with this check: need to check division by zero (above) and fix the unQuantize method
504                        ///thanks Joerg/hiker for the reproduction case!
505                        ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
506                rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
507
508#else
509                btVector3 normal;
510                rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
511#endif
512
513                isLeafNode = rootNode->m_escapeIndex == -1;
514               
515                //PCK: unsigned instead of bool
516                if (isLeafNode && (rayBoxOverlap != 0))
517                {
518                        nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
519                } 
520               
521                //PCK: unsigned instead of bool
522                if ((rayBoxOverlap != 0) || isLeafNode)
523                {
524                        rootNode++;
525                        curIndex++;
526                } else
527                {
528                        escapeIndex = rootNode->m_escapeIndex;
529                        rootNode += escapeIndex;
530                        curIndex += escapeIndex;
531                }
532        }
533        if (maxIterations < walkIterations)
534                maxIterations = walkIterations;
535
536}
537
538
539
540void    btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
541{
542        btAssert(m_useQuantization);
543       
544        int curIndex = startNodeIndex;
545        int walkIterations = 0;
546        int subTreeSize = endNodeIndex - startNodeIndex;
547        (void)subTreeSize;
548
549        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
550        int escapeIndex;
551       
552        bool isLeafNode;
553        //PCK: unsigned instead of bool
554        unsigned boxBoxOverlap = 0;
555        unsigned rayBoxOverlap = 0;
556
557        btScalar lambda_max = 1.0;
558
559#ifdef RAYAABB2
560        btVector3 rayDirection = (rayTarget-raySource);
561        rayDirection.normalize ();
562        lambda_max = rayDirection.dot(rayTarget-raySource);
563        ///what about division by zero? --> just set rayDirection[i] to 1.0
564        rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[0];
565        rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[1];
566        rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(1e30) : btScalar(1.0) / rayDirection[2];
567        unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
568#endif
569
570        /* Quick pruning by quantized box */
571        btVector3 rayAabbMin = raySource;
572        btVector3 rayAabbMax = raySource;
573        rayAabbMin.setMin(rayTarget);
574        rayAabbMax.setMax(rayTarget);
575
576        /* Add box cast extents to bounding box */
577        rayAabbMin += aabbMin;
578        rayAabbMax += aabbMax;
579
580        unsigned short int quantizedQueryAabbMin[3];
581        unsigned short int quantizedQueryAabbMax[3];
582        quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
583        quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
584
585        while (curIndex < endNodeIndex)
586        {
587
588//#define VISUALLY_ANALYZE_BVH 1
589#ifdef VISUALLY_ANALYZE_BVH
590                //some code snippet to debugDraw aabb, to visually analyze bvh structure
591                static int drawPatch = 0;
592                //need some global access to a debugDrawer
593                extern btIDebugDraw* debugDrawerPtr;
594                if (curIndex==drawPatch)
595                {
596                        btVector3 aabbMin,aabbMax;
597                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
598                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
599                        btVector3       color(1,0,0);
600                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
601                }
602#endif//VISUALLY_ANALYZE_BVH
603
604                //catch bugs in tree data
605                btAssert (walkIterations < subTreeSize);
606
607                walkIterations++;
608                //PCK: unsigned instead of bool
609                // only interested if this is closer than any previous hit
610                btScalar param = 1.0;
611                rayBoxOverlap = 0;
612                boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
613                isLeafNode = rootNode->isLeafNode();
614                if (boxBoxOverlap)
615                {
616                        btVector3 bounds[2];
617                        bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
618                        bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
619                        /* Add box cast extents */
620                        bounds[0] += aabbMin;
621                        bounds[1] += aabbMax;
622                        btVector3 normal;
623#if 0
624                        bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
625                        bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
626                        if (ra2 != ra)
627                        {
628                                printf("functions don't match\n");
629                        }
630#endif
631#ifdef RAYAABB2
632                        ///careful with this check: need to check division by zero (above) and fix the unQuantize method
633                        ///thanks Joerg/hiker for the reproduction case!
634                        ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
635
636                        //BT_PROFILE("btRayAabb2");
637                        rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
638                       
639#else
640                        rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
641#endif
642                }
643               
644                if (isLeafNode && rayBoxOverlap)
645                {
646                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
647                }
648               
649                //PCK: unsigned instead of bool
650                if ((rayBoxOverlap != 0) || isLeafNode)
651                {
652                        rootNode++;
653                        curIndex++;
654                } else
655                {
656                        escapeIndex = rootNode->getEscapeIndex();
657                        rootNode += escapeIndex;
658                        curIndex += escapeIndex;
659                }
660        }
661        if (maxIterations < walkIterations)
662                maxIterations = walkIterations;
663
664}
665
666void    btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
667{
668        btAssert(m_useQuantization);
669       
670        int curIndex = startNodeIndex;
671        int walkIterations = 0;
672        int subTreeSize = endNodeIndex - startNodeIndex;
673        (void)subTreeSize;
674
675        const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
676        int escapeIndex;
677       
678        bool isLeafNode;
679        //PCK: unsigned instead of bool
680        unsigned aabbOverlap;
681
682        while (curIndex < endNodeIndex)
683        {
684
685//#define VISUALLY_ANALYZE_BVH 1
686#ifdef VISUALLY_ANALYZE_BVH
687                //some code snippet to debugDraw aabb, to visually analyze bvh structure
688                static int drawPatch = 0;
689                //need some global access to a debugDrawer
690                extern btIDebugDraw* debugDrawerPtr;
691                if (curIndex==drawPatch)
692                {
693                        btVector3 aabbMin,aabbMax;
694                        aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
695                        aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
696                        btVector3       color(1,0,0);
697                        debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
698                }
699#endif//VISUALLY_ANALYZE_BVH
700
701                //catch bugs in tree data
702                btAssert (walkIterations < subTreeSize);
703
704                walkIterations++;
705                //PCK: unsigned instead of bool
706                aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
707                isLeafNode = rootNode->isLeafNode();
708               
709                if (isLeafNode && aabbOverlap)
710                {
711                        nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
712                } 
713               
714                //PCK: unsigned instead of bool
715                if ((aabbOverlap != 0) || isLeafNode)
716                {
717                        rootNode++;
718                        curIndex++;
719                } else
720                {
721                        escapeIndex = rootNode->getEscapeIndex();
722                        rootNode += escapeIndex;
723                        curIndex += escapeIndex;
724                }
725        }
726        if (maxIterations < walkIterations)
727                maxIterations = walkIterations;
728
729}
730
731//This traversal can be called from Playstation 3 SPU
732void    btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
733{
734        btAssert(m_useQuantization);
735
736        int i;
737
738
739        for (i=0;i<this->m_SubtreeHeaders.size();i++)
740        {
741                const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
742
743                //PCK: unsigned instead of bool
744                unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
745                if (overlap != 0)
746                {
747                        walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
748                                subtree.m_rootNodeIndex,
749                                subtree.m_rootNodeIndex+subtree.m_subtreeSize);
750                }
751        }
752}
753
754
755void    btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
756{
757        reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
758}
759
760
761void    btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
762{
763        //always use stackless
764
765        if (m_useQuantization)
766        {
767                walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
768        }
769        else
770        {
771                walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
772        }
773        /*
774        {
775                //recursive traversal
776                btVector3 qaabbMin = raySource;
777                btVector3 qaabbMax = raySource;
778                qaabbMin.setMin(rayTarget);
779                qaabbMax.setMax(rayTarget);
780                qaabbMin += aabbMin;
781                qaabbMax += aabbMax;
782                reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
783        }
784        */
785
786}
787
788
789void    btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
790{
791        if (m_useQuantization)
792        {
793                        btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
794                        m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
795                        m_quantizedLeafNodes[splitIndex] = tmp;
796        } else
797        {
798                        btOptimizedBvhNode tmp = m_leafNodes[i];
799                        m_leafNodes[i] = m_leafNodes[splitIndex];
800                        m_leafNodes[splitIndex] = tmp;
801        }
802}
803
804void    btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
805{
806        if (m_useQuantization)
807        {
808                m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
809        } else
810        {
811                m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
812        }
813}
814
815//PCK: include
816#include <new>
817
818#if 0
819//PCK: consts
820static const unsigned BVH_ALIGNMENT = 16;
821static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
822
823static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
824#endif
825
826
827unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
828{
829        // I changed this to 0 since the extra padding is not needed or used.
830        return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
831}
832
833unsigned btQuantizedBvh::calculateSerializeBufferSize()
834{
835        unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
836        baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
837        if (m_useQuantization)
838        {
839                return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
840        }
841        return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
842}
843
844bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian)
845{
846        btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
847        m_subtreeHeaderCount = m_SubtreeHeaders.size();
848
849/*      if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
850        {
851                ///check alignedment for buffer?
852                btAssert(0);
853                return false;
854        }
855*/
856
857        btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
858
859        // construct the class so the virtual function table, etc will be set up
860        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
861        new (targetBvh) btQuantizedBvh;
862
863        if (i_swapEndian)
864        {
865                targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
866
867
868                btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
869                btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
870                btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
871
872                targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
873                targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
874        }
875        else
876        {
877                targetBvh->m_curNodeIndex = m_curNodeIndex;
878                targetBvh->m_bvhAabbMin = m_bvhAabbMin;
879                targetBvh->m_bvhAabbMax = m_bvhAabbMax;
880                targetBvh->m_bvhQuantization = m_bvhQuantization;
881                targetBvh->m_traversalMode = m_traversalMode;
882                targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
883        }
884
885        targetBvh->m_useQuantization = m_useQuantization;
886
887        unsigned char *nodeData = (unsigned char *)targetBvh;
888        nodeData += sizeof(btQuantizedBvh);
889       
890        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
891        nodeData += sizeToAdd;
892       
893        int nodeCount = m_curNodeIndex;
894
895        if (m_useQuantization)
896        {
897                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
898
899                if (i_swapEndian)
900                {
901                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
902                        {
903                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
904                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
905                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
906
907                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
908                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
909                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
910
911                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
912                        }
913                }
914                else
915                {
916                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
917                        {
918       
919                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
920                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
921                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
922
923                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
924                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
925                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
926
927                                targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
928
929
930                        }
931                }
932                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
933
934                // this clears the pointer in the member variable it doesn't really do anything to the data
935                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
936                // so the memory (which is not freed) is left alone
937                targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
938        }
939        else
940        {
941                targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
942
943                if (i_swapEndian)
944                {
945                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
946                        {
947                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
948                                btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
949
950                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
951                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
952                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
953                        }
954                }
955                else
956                {
957                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
958                        {
959                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
960                                targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
961
962                                targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
963                                targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
964                                targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
965                        }
966                }
967                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
968
969                // this clears the pointer in the member variable it doesn't really do anything to the data
970                // it does call the destructor on the contained objects, but they are all classes with no destructor defined
971                // so the memory (which is not freed) is left alone
972                targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
973        }
974
975        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
976        nodeData += sizeToAdd;
977
978        // Now serialize the subtree headers
979        targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
980        if (i_swapEndian)
981        {
982                for (int i = 0; i < m_subtreeHeaderCount; i++)
983                {
984                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
985                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
986                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
987
988                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
989                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
990                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
991
992                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
993                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
994                }
995        }
996        else
997        {
998                for (int i = 0; i < m_subtreeHeaderCount; i++)
999                {
1000                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1001                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1002                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1003
1004                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1005                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1006                        targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1007
1008                        targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1009                        targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1010
1011                        // need to clear padding in destination buffer
1012                        targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1013                        targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1014                        targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1015                }
1016        }
1017        nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1018
1019        // this clears the pointer in the member variable it doesn't really do anything to the data
1020        // it does call the destructor on the contained objects, but they are all classes with no destructor defined
1021        // so the memory (which is not freed) is left alone
1022        targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1023
1024        // this wipes the virtual function table pointer at the start of the buffer for the class
1025        *((void**)o_alignedDataBuffer) = NULL;
1026
1027        return true;
1028}
1029
1030btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1031{
1032
1033        if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1034        {
1035                return NULL;
1036        }
1037        btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1038
1039        if (i_swapEndian)
1040        {
1041                bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1042
1043                btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1044                btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1045                btUnSwapVector3Endian(bvh->m_bvhQuantization);
1046
1047                bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1048                bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1049        }
1050
1051        unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1052        btAssert(calculatedBufSize <= i_dataBufferSize);
1053
1054        if (calculatedBufSize > i_dataBufferSize)
1055        {
1056                return NULL;
1057        }
1058
1059        unsigned char *nodeData = (unsigned char *)bvh;
1060        nodeData += sizeof(btQuantizedBvh);
1061       
1062        unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1063        nodeData += sizeToAdd;
1064       
1065        int nodeCount = bvh->m_curNodeIndex;
1066
1067        // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1068        // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1069        new (bvh) btQuantizedBvh(*bvh, false);
1070
1071        if (bvh->m_useQuantization)
1072        {
1073                bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1074
1075                if (i_swapEndian)
1076                {
1077                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1078                        {
1079                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1080                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1081                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1082
1083                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1084                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1085                                bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1086
1087                                bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1088                        }
1089                }
1090                nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1091        }
1092        else
1093        {
1094                bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1095
1096                if (i_swapEndian)
1097                {
1098                        for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1099                        {
1100                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1101                                btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1102                               
1103                                bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1104                                bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1105                                bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1106                        }
1107                }
1108                nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1109        }
1110
1111        sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1112        nodeData += sizeToAdd;
1113
1114        // Now serialize the subtree headers
1115        bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1116        if (i_swapEndian)
1117        {
1118                for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1119                {
1120                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1121                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1122                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1123
1124                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1125                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1126                        bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1127
1128                        bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1129                        bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1130                }
1131        }
1132
1133        return bvh;
1134}
1135
1136// Constructor that prevents btVector3's default constructor from being called
1137btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1138m_bvhAabbMin(self.m_bvhAabbMin),
1139m_bvhAabbMax(self.m_bvhAabbMax),
1140m_bvhQuantization(self.m_bvhQuantization),
1141m_bulletVersion(BT_BULLET_VERSION)
1142{
1143
1144}
1145
1146
1147
1148
Note: See TracBrowser for help on using the repository browser.