Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: code/branches/png2/src/external/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.h @ 7324

Last change on this file since 7324 was 5781, checked in by rgrieder, 15 years ago

Reverted trunk again. We might want to find a way to delete these revisions again (x3n's changes are still available as diff in the commit mails).

  • Property svn:eol-style set to native
File size: 15.1 KB
Line 
1/*
2Bullet Continuous Collision Detection and Physics Library
3Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4
5This software is provided 'as-is', without any express or implied warranty.
6In no event will the authors be held liable for any damages arising from the use of this software.
7Permission is granted to anyone to use this software for any purpose,
8including commercial applications, and to alter it and redistribute it freely,
9subject to the following restrictions:
10
111. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
122. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
133. This notice may not be removed or altered from any source distribution.
14*/
15
16#ifndef QUANTIZED_BVH_H
17#define QUANTIZED_BVH_H
18
19//#define DEBUG_CHECK_DEQUANTIZATION 1
20#ifdef DEBUG_CHECK_DEQUANTIZATION
21#ifdef __SPU__
22#define printf spu_printf
23#endif //__SPU__
24
25#include <stdio.h>
26#include <stdlib.h>
27#endif //DEBUG_CHECK_DEQUANTIZATION
28
29#include "LinearMath/btVector3.h"
30#include "LinearMath/btAlignedAllocator.h"
31
32
33//http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrf__m128.asp
34
35
36//Note: currently we have 16 bytes per quantized node
37#define MAX_SUBTREE_SIZE_IN_BYTES  2048
38
39// 10 gives the potential for 1024 parts, with at most 2^21 (2097152) (minus one
40// actually) triangles each (since the sign bit is reserved
41#define MAX_NUM_PARTS_IN_BITS 10
42
43///btQuantizedBvhNode is a compressed aabb node, 16 bytes.
44///Node can be used for leafnode or internal node. Leafnodes can point to 32-bit triangle index (non-negative range).
45ATTRIBUTE_ALIGNED16     (struct) btQuantizedBvhNode
46{
47        BT_DECLARE_ALIGNED_ALLOCATOR();
48
49        //12 bytes
50        unsigned short int      m_quantizedAabbMin[3];
51        unsigned short int      m_quantizedAabbMax[3];
52        //4 bytes
53        int     m_escapeIndexOrTriangleIndex;
54
55        bool isLeafNode() const
56        {
57                //skipindex is negative (internal node), triangleindex >=0 (leafnode)
58                return (m_escapeIndexOrTriangleIndex >= 0);
59        }
60        int getEscapeIndex() const
61        {
62                btAssert(!isLeafNode());
63                return -m_escapeIndexOrTriangleIndex;
64        }
65        int     getTriangleIndex() const
66        {
67                btAssert(isLeafNode());
68                // Get only the lower bits where the triangle index is stored
69                return (m_escapeIndexOrTriangleIndex&~((~0)<<(31-MAX_NUM_PARTS_IN_BITS)));
70        }
71        int     getPartId() const
72        {
73                btAssert(isLeafNode());
74                // Get only the highest bits where the part index is stored
75                return (m_escapeIndexOrTriangleIndex>>(31-MAX_NUM_PARTS_IN_BITS));
76        }
77}
78;
79
80/// btOptimizedBvhNode contains both internal and leaf node information.
81/// Total node size is 44 bytes / node. You can use the compressed version of 16 bytes.
82ATTRIBUTE_ALIGNED16 (struct) btOptimizedBvhNode
83{
84        BT_DECLARE_ALIGNED_ALLOCATOR();
85
86        //32 bytes
87        btVector3       m_aabbMinOrg;
88        btVector3       m_aabbMaxOrg;
89
90        //4
91        int     m_escapeIndex;
92
93        //8
94        //for child nodes
95        int     m_subPart;
96        int     m_triangleIndex;
97        int     m_padding[5];//bad, due to alignment
98
99
100};
101
102
103///btBvhSubtreeInfo provides info to gather a subtree of limited size
104ATTRIBUTE_ALIGNED16(class) btBvhSubtreeInfo
105{
106public:
107        BT_DECLARE_ALIGNED_ALLOCATOR();
108
109        //12 bytes
110        unsigned short int      m_quantizedAabbMin[3];
111        unsigned short int      m_quantizedAabbMax[3];
112        //4 bytes, points to the root of the subtree
113        int                     m_rootNodeIndex;
114        //4 bytes
115        int                     m_subtreeSize;
116        int                     m_padding[3];
117
118        btBvhSubtreeInfo()
119        {
120                //memset(&m_padding[0], 0, sizeof(m_padding));
121        }
122
123
124        void    setAabbFromQuantizeNode(const btQuantizedBvhNode& quantizedNode)
125        {
126                m_quantizedAabbMin[0] = quantizedNode.m_quantizedAabbMin[0];
127                m_quantizedAabbMin[1] = quantizedNode.m_quantizedAabbMin[1];
128                m_quantizedAabbMin[2] = quantizedNode.m_quantizedAabbMin[2];
129                m_quantizedAabbMax[0] = quantizedNode.m_quantizedAabbMax[0];
130                m_quantizedAabbMax[1] = quantizedNode.m_quantizedAabbMax[1];
131                m_quantizedAabbMax[2] = quantizedNode.m_quantizedAabbMax[2];
132        }
133}
134;
135
136
137class btNodeOverlapCallback
138{
139public:
140        virtual ~btNodeOverlapCallback() {};
141
142        virtual void processNode(int subPart, int triangleIndex) = 0;
143};
144
145#include "LinearMath/btAlignedAllocator.h"
146#include "LinearMath/btAlignedObjectArray.h"
147
148
149
150///for code readability:
151typedef btAlignedObjectArray<btOptimizedBvhNode>        NodeArray;
152typedef btAlignedObjectArray<btQuantizedBvhNode>        QuantizedNodeArray;
153typedef btAlignedObjectArray<btBvhSubtreeInfo>          BvhSubtreeInfoArray;
154
155
156///The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU.
157///It is used by the btBvhTriangleMeshShape as midphase, and by the btMultiSapBroadphase.
158///It is recommended to use quantization for better performance and lower memory requirements.
159ATTRIBUTE_ALIGNED16(class) btQuantizedBvh
160{
161public:
162        enum btTraversalMode
163        {
164                TRAVERSAL_STACKLESS = 0,
165                TRAVERSAL_STACKLESS_CACHE_FRIENDLY,
166                TRAVERSAL_RECURSIVE
167        };
168
169protected:
170
171
172        btVector3                       m_bvhAabbMin;
173        btVector3                       m_bvhAabbMax;
174        btVector3                       m_bvhQuantization;
175
176        int                                     m_bulletVersion;        //for serialization versioning. It could also be used to detect endianess.
177
178        int                                     m_curNodeIndex;
179        //quantization data
180        bool                            m_useQuantization;
181
182
183
184        NodeArray                       m_leafNodes;
185        NodeArray                       m_contiguousNodes;
186        QuantizedNodeArray      m_quantizedLeafNodes;
187        QuantizedNodeArray      m_quantizedContiguousNodes;
188       
189        btTraversalMode m_traversalMode;
190        BvhSubtreeInfoArray             m_SubtreeHeaders;
191
192        //This is only used for serialization so we don't have to add serialization directly to btAlignedObjectArray
193        int m_subtreeHeaderCount;
194
195       
196
197
198
199        ///two versions, one for quantized and normal nodes. This allows code-reuse while maintaining readability (no template/macro!)
200        ///this might be refactored into a virtual, it is usually not calculated at run-time
201        void    setInternalNodeAabbMin(int nodeIndex, const btVector3& aabbMin)
202        {
203                if (m_useQuantization)
204                {
205                        quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] ,aabbMin,0);
206                } else
207                {
208                        m_contiguousNodes[nodeIndex].m_aabbMinOrg = aabbMin;
209
210                }
211        }
212        void    setInternalNodeAabbMax(int nodeIndex,const btVector3& aabbMax)
213        {
214                if (m_useQuantization)
215                {
216                        quantize(&m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0],aabbMax,1);
217                } else
218                {
219                        m_contiguousNodes[nodeIndex].m_aabbMaxOrg = aabbMax;
220                }
221        }
222
223        btVector3 getAabbMin(int nodeIndex) const
224        {
225                if (m_useQuantization)
226                {
227                        return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMin[0]);
228                }
229                //non-quantized
230                return m_leafNodes[nodeIndex].m_aabbMinOrg;
231
232        }
233        btVector3 getAabbMax(int nodeIndex) const
234        {
235                if (m_useQuantization)
236                {
237                        return unQuantize(&m_quantizedLeafNodes[nodeIndex].m_quantizedAabbMax[0]);
238                } 
239                //non-quantized
240                return m_leafNodes[nodeIndex].m_aabbMaxOrg;
241               
242        }
243
244       
245        void    setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
246        {
247                if (m_useQuantization)
248                {
249                        m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = -escapeIndex;
250                } 
251                else
252                {
253                        m_contiguousNodes[nodeIndex].m_escapeIndex = escapeIndex;
254                }
255
256        }
257
258        void mergeInternalNodeAabb(int nodeIndex,const btVector3& newAabbMin,const btVector3& newAabbMax) 
259        {
260                if (m_useQuantization)
261                {
262                        unsigned short int quantizedAabbMin[3];
263                        unsigned short int quantizedAabbMax[3];
264                        quantize(quantizedAabbMin,newAabbMin,0);
265                        quantize(quantizedAabbMax,newAabbMax,1);
266                        for (int i=0;i<3;i++)
267                        {
268                                if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] > quantizedAabbMin[i])
269                                        m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[i] = quantizedAabbMin[i];
270
271                                if (m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] < quantizedAabbMax[i])
272                                        m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[i] = quantizedAabbMax[i];
273
274                        }
275                } else
276                {
277                        //non-quantized
278                        m_contiguousNodes[nodeIndex].m_aabbMinOrg.setMin(newAabbMin);
279                        m_contiguousNodes[nodeIndex].m_aabbMaxOrg.setMax(newAabbMax);           
280                }
281        }
282
283        void    swapLeafNodes(int firstIndex,int secondIndex);
284
285        void    assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex);
286
287protected:
288
289       
290
291        void    buildTree       (int startIndex,int endIndex);
292
293        int     calcSplittingAxis(int startIndex,int endIndex);
294
295        int     sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis);
296       
297        void    walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
298
299        void    walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
300        void    walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const;
301        void    walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const;
302
303        ///tree traversal designed for small-memory processors like PS3 SPU
304        void    walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
305
306        ///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
307        void    walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const;
308
309        ///use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
310        void    walkRecursiveQuantizedTreeAgainstQuantizedTree(const btQuantizedBvhNode* treeNodeA,const btQuantizedBvhNode* treeNodeB,btNodeOverlapCallback* nodeCallback) const;
311       
312
313
314
315        void    updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex);
316
317public:
318       
319        BT_DECLARE_ALIGNED_ALLOCATOR();
320
321        btQuantizedBvh();
322
323        virtual ~btQuantizedBvh();
324
325       
326        ///***************************************** expert/internal use only *************************
327        void    setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin=btScalar(1.0));
328        QuantizedNodeArray&     getLeafNodeArray() {                    return  m_quantizedLeafNodes;   }
329        ///buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialized
330        void    buildInternal();
331        ///***************************************** expert/internal use only *************************
332
333        void    reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const;
334        void    reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const;
335        void    reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const;
336
337                SIMD_FORCE_INLINE void quantize(unsigned short* out, const btVector3& point,int isMax) const
338        {
339
340                btAssert(m_useQuantization);
341
342                btAssert(point.getX() <= m_bvhAabbMax.getX());
343                btAssert(point.getY() <= m_bvhAabbMax.getY());
344                btAssert(point.getZ() <= m_bvhAabbMax.getZ());
345
346                btAssert(point.getX() >= m_bvhAabbMin.getX());
347                btAssert(point.getY() >= m_bvhAabbMin.getY());
348                btAssert(point.getZ() >= m_bvhAabbMin.getZ());
349
350                btVector3 v = (point - m_bvhAabbMin) * m_bvhQuantization;
351                ///Make sure rounding is done in a way that unQuantize(quantizeWithClamp(...)) is conservative
352                ///end-points always set the first bit, so that they are sorted properly (so that neighbouring AABBs overlap properly)
353                ///@todo: double-check this
354                if (isMax)
355                {
356                        out[0] = (unsigned short) (((unsigned short)(v.getX()+btScalar(1.)) | 1));
357                        out[1] = (unsigned short) (((unsigned short)(v.getY()+btScalar(1.)) | 1));
358                        out[2] = (unsigned short) (((unsigned short)(v.getZ()+btScalar(1.)) | 1));
359                } else
360                {
361                        out[0] = (unsigned short) (((unsigned short)(v.getX()) & 0xfffe));
362                        out[1] = (unsigned short) (((unsigned short)(v.getY()) & 0xfffe));
363                        out[2] = (unsigned short) (((unsigned short)(v.getZ()) & 0xfffe));
364                }
365
366
367#ifdef DEBUG_CHECK_DEQUANTIZATION
368                btVector3 newPoint = unQuantize(out);
369                if (isMax)
370                {
371                        if (newPoint.getX() < point.getX())
372                        {
373                                printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
374                        }
375                        if (newPoint.getY() < point.getY())
376                        {
377                                printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
378                        }
379                        if (newPoint.getZ() < point.getZ())
380                        {
381
382                                printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
383                        }
384                } else
385                {
386                        if (newPoint.getX() > point.getX())
387                        {
388                                printf("unconservative X, diffX = %f, oldX=%f,newX=%f\n",newPoint.getX()-point.getX(), newPoint.getX(),point.getX());
389                        }
390                        if (newPoint.getY() > point.getY())
391                        {
392                                printf("unconservative Y, diffY = %f, oldY=%f,newY=%f\n",newPoint.getY()-point.getY(), newPoint.getY(),point.getY());
393                        }
394                        if (newPoint.getZ() > point.getZ())
395                        {
396                                printf("unconservative Z, diffZ = %f, oldZ=%f,newZ=%f\n",newPoint.getZ()-point.getZ(), newPoint.getZ(),point.getZ());
397                        }
398                }
399#endif //DEBUG_CHECK_DEQUANTIZATION
400
401        }
402
403
404        SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short* out, const btVector3& point2,int isMax) const
405        {
406
407                btAssert(m_useQuantization);
408
409                btVector3 clampedPoint(point2);
410                clampedPoint.setMax(m_bvhAabbMin);
411                clampedPoint.setMin(m_bvhAabbMax);
412
413                quantize(out,clampedPoint,isMax);
414
415        }
416       
417        SIMD_FORCE_INLINE btVector3     unQuantize(const unsigned short* vecIn) const
418        {
419                        btVector3       vecOut;
420                        vecOut.setValue(
421                        (btScalar)(vecIn[0]) / (m_bvhQuantization.getX()),
422                        (btScalar)(vecIn[1]) / (m_bvhQuantization.getY()),
423                        (btScalar)(vecIn[2]) / (m_bvhQuantization.getZ()));
424                        vecOut += m_bvhAabbMin;
425                        return vecOut;
426        }
427
428        ///setTraversalMode let's you choose between stackless, recursive or stackless cache friendly tree traversal. Note this is only implemented for quantized trees.
429        void    setTraversalMode(btTraversalMode        traversalMode)
430        {
431                m_traversalMode = traversalMode;
432        }
433
434
435        SIMD_FORCE_INLINE QuantizedNodeArray&   getQuantizedNodeArray()
436        {       
437                return  m_quantizedContiguousNodes;
438        }
439
440
441        SIMD_FORCE_INLINE BvhSubtreeInfoArray&  getSubtreeInfoArray()
442        {
443                return m_SubtreeHeaders;
444        }
445
446
447        /////Calculate space needed to store BVH for serialization
448        unsigned calculateSerializeBufferSize();
449
450        /// Data buffer MUST be 16 byte aligned
451        virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian);
452
453        ///deSerializeInPlace loads and initializes a BVH from a buffer in memory 'in place'
454        static btQuantizedBvh *deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian);
455
456        static unsigned int getAlignmentSerializationPadding();
457
458        SIMD_FORCE_INLINE bool isQuantized()
459        {
460                return m_useQuantization;
461        }
462
463private:
464        // Special "copy" constructor that allows for in-place deserialization
465        // Prevents btVector3's default constructor from being called, but doesn't inialize much else
466        // ownsMemory should most likely be false if deserializing, and if you are not, don't call this (it also changes the function signature, which we need)
467        btQuantizedBvh(btQuantizedBvh &other, bool ownsMemory);
468
469}
470;
471
472
473#endif //QUANTIZED_BVH_H
Note: See TracBrowser for help on using the repository browser.