Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: downloads/tcl8.5.2/generic/tclAlloc.c @ 35

Last change on this file since 35 was 25, checked in by landauf, 17 years ago

added tcl to libs

File size: 17.5 KB
Line 
1/*
2 * tclAlloc.c --
3 *
4 *      This is a very fast storage allocator. It allocates blocks of a small
5 *      number of different sizes, and keeps free lists of each size. Blocks
6 *      that don't exactly fit are passed up to the next larger size. Blocks
7 *      over a certain size are directly allocated from the system.
8 *
9 * Copyright (c) 1983 Regents of the University of California.
10 * Copyright (c) 1996-1997 Sun Microsystems, Inc.
11 * Copyright (c) 1998-1999 by Scriptics Corporation.
12 *
13 * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
14 *
15 * See the file "license.terms" for information on usage and redistribution of
16 * this file, and for a DISCLAIMER OF ALL WARRANTIES.
17 *
18 * RCS: @(#) $Id: tclAlloc.c,v 1.27 2007/12/17 15:28:27 msofer Exp $
19 */
20
21/*
22 * Windows and Unix use an alternative allocator when building with threads
23 * that has significantly reduced lock contention.
24 */
25
26#include "tclInt.h"
27#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC)
28
29#if USE_TCLALLOC
30
31#ifdef TCL_DEBUG
32#   define DEBUG
33/* #define MSTATS */
34#   define RCHECK
35#endif
36
37/*
38 * We should really make use of AC_CHECK_TYPE(caddr_t) here, but it can wait
39 * until Tcl uses config.h properly.
40 */
41
42#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
43typedef unsigned long caddr_t;
44#endif
45
46/*
47 * The overhead on a block is at least 8 bytes. When free, this space contains
48 * a pointer to the next free block, and the bottom two bits must be zero.
49 * When in use, the first byte is set to MAGIC, and the second byte is the
50 * size index. The remaining bytes are for alignment. If range checking is
51 * enabled then a second word holds the size of the requested block, less 1,
52 * rounded up to a multiple of sizeof(RMAGIC). The order of elements is
53 * critical: ov.magic must overlay the low order bits of ov.next, and ov.magic
54 * can not be a valid ov.next bit pattern.
55 */
56
57union overhead {
58    union overhead *next;               /* when free */
59    unsigned char padding[TCL_ALLOCALIGN];      /* align struct to TCL_ALLOCALIGN bytes */
60    struct {
61        unsigned char magic0;           /* magic number */
62        unsigned char index;            /* bucket # */
63        unsigned char unused;           /* unused */
64        unsigned char magic1;           /* other magic number */
65#ifdef RCHECK
66        unsigned short rmagic;          /* range magic number */
67        unsigned long size;             /* actual block size */
68        unsigned short unused2;         /* padding to 8-byte align */
69#endif
70    } ovu;
71#define overMagic0      ovu.magic0
72#define overMagic1      ovu.magic1
73#define bucketIndex     ovu.index
74#define rangeCheckMagic ovu.rmagic
75#define realBlockSize   ovu.size
76};
77
78
79#define MAGIC           0xef    /* magic # on accounting info */
80#define RMAGIC          0x5555  /* magic # on range info */
81
82#ifdef RCHECK
83#define RSLOP           sizeof (unsigned short)
84#else
85#define RSLOP           0
86#endif
87
88#define OVERHEAD (sizeof(union overhead) + RSLOP)
89
90/*
91 * Macro to make it easier to refer to the end-of-block guard magic.
92 */
93
94#define BLOCK_END(overPtr) \
95    (*(unsigned short *)((caddr_t)((overPtr) + 1) + (overPtr)->realBlockSize))
96
97/*
98 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
99 * smallest allocatable block is MINBLOCK bytes. The overhead information
100 * precedes the data area returned to the user.
101 */
102
103#define MINBLOCK        ((sizeof(union overhead) + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
104#define NBUCKETS        (13 - (MINBLOCK >> 4))
105#define MAXMALLOC       (1<<(NBUCKETS+2))
106static union overhead *nextf[NBUCKETS];
107
108/*
109 * The following structure is used to keep track of all system memory
110 * currently owned by Tcl. When finalizing, all this memory will be returned
111 * to the system.
112 */
113
114struct block {
115    struct block *nextPtr;      /* Linked list. */
116    struct block *prevPtr;      /* Linked list for big blocks, ensures 8-byte
117                                 * alignment for suballocated blocks. */
118};
119
120static struct block *blockList; /* Tracks the suballocated blocks. */
121static struct block bigBlocks={ /* Big blocks aren't suballocated. */
122    &bigBlocks, &bigBlocks
123};
124
125/*
126 * The allocator is protected by a special mutex that must be explicitly
127 * initialized. Futhermore, because Tcl_Alloc may be used before anything else
128 * in Tcl, we make this module self-initializing after all with the allocInit
129 * variable.
130 */
131
132#ifdef TCL_THREADS
133static Tcl_Mutex *allocMutexPtr;
134#endif
135static int allocInit = 0;
136
137#ifdef MSTATS
138
139/*
140 * numMallocs[i] is the difference between the number of mallocs and frees for
141 * a given block size.
142 */
143
144static  unsigned int numMallocs[NBUCKETS+1];
145#include <stdio.h>
146#endif
147
148#if defined(DEBUG) || defined(RCHECK)
149#define ASSERT(p)       if (!(p)) Tcl_Panic(# p)
150#define RANGE_ASSERT(p) if (!(p)) Tcl_Panic(# p)
151#else
152#define ASSERT(p)
153#define RANGE_ASSERT(p)
154#endif
155
156/*
157 * Prototypes for functions used only in this file.
158 */
159
160static void             MoreCore(int bucket);
161
162/*
163 *-------------------------------------------------------------------------
164 *
165 * TclInitAlloc --
166 *
167 *      Initialize the memory system.
168 *
169 * Results:
170 *      None.
171 *
172 * Side effects:
173 *      Initialize the mutex used to serialize allocations.
174 *
175 *-------------------------------------------------------------------------
176 */
177
178void
179TclInitAlloc(void)
180{
181    if (!allocInit) {
182        allocInit = 1;
183#ifdef TCL_THREADS
184        allocMutexPtr = Tcl_GetAllocMutex();
185#endif
186    }
187}
188
189/*
190 *-------------------------------------------------------------------------
191 *
192 * TclFinalizeAllocSubsystem --
193 *
194 *      Release all resources being used by this subsystem, including
195 *      aggressively freeing all memory allocated by TclpAlloc() that has not
196 *      yet been released with TclpFree().
197 *
198 *      After this function is called, all memory allocated with TclpAlloc()
199 *      should be considered unusable.
200 *
201 * Results:
202 *      None.
203 *
204 * Side effects:
205 *      This subsystem is self-initializing, since memory can be allocated
206 *      before Tcl is formally initialized. After this call, this subsystem
207 *      has been reset to its initial state and is usable again.
208 *
209 *-------------------------------------------------------------------------
210 */
211
212void
213TclFinalizeAllocSubsystem(void)
214{
215    unsigned int i;
216    struct block *blockPtr, *nextPtr;
217
218    Tcl_MutexLock(allocMutexPtr);
219    for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
220        nextPtr = blockPtr->nextPtr;
221        TclpSysFree(blockPtr);
222    }
223    blockList = NULL;
224
225    for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
226        nextPtr = blockPtr->nextPtr;
227        TclpSysFree(blockPtr);
228        blockPtr = nextPtr;
229    }
230    bigBlocks.nextPtr = &bigBlocks;
231    bigBlocks.prevPtr = &bigBlocks;
232
233    for (i=0 ; i<NBUCKETS ; i++) {
234        nextf[i] = NULL;
235#ifdef MSTATS
236        numMallocs[i] = 0;
237#endif
238    }
239#ifdef MSTATS
240    numMallocs[i] = 0;
241#endif
242    Tcl_MutexUnlock(allocMutexPtr);
243}
244
245/*
246 *----------------------------------------------------------------------
247 *
248 * TclpAlloc --
249 *
250 *      Allocate more memory.
251 *
252 * Results:
253 *      None.
254 *
255 * Side effects:
256 *      None.
257 *
258 *----------------------------------------------------------------------
259 */
260
261char *
262TclpAlloc(
263    unsigned int numBytes)      /* Number of bytes to allocate. */
264{
265    register union overhead *overPtr;
266    register long bucket;
267    register unsigned amount;
268    struct block *bigBlockPtr;
269
270    if (!allocInit) {
271        /*
272         * We have to make the "self initializing" because Tcl_Alloc may be
273         * used before any other part of Tcl. E.g., see main() for tclsh!
274         */
275
276        TclInitAlloc();
277    }
278    Tcl_MutexLock(allocMutexPtr);
279
280    /*
281     * First the simple case: we simple allocate big blocks directly.
282     */
283
284    if (numBytes + OVERHEAD >= MAXMALLOC) {
285        bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
286                (sizeof(struct block) + OVERHEAD + numBytes), 0);
287        if (bigBlockPtr == NULL) {
288            Tcl_MutexUnlock(allocMutexPtr);
289            return NULL;
290        }
291        bigBlockPtr->nextPtr = bigBlocks.nextPtr;
292        bigBlocks.nextPtr = bigBlockPtr;
293        bigBlockPtr->prevPtr = &bigBlocks;
294        bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
295
296        overPtr = (union overhead *) (bigBlockPtr + 1);
297        overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
298        overPtr->bucketIndex = 0xff;
299#ifdef MSTATS
300        numMallocs[NBUCKETS]++;
301#endif
302
303#ifdef RCHECK
304        /*
305         * Record allocated size of block and bound space with magic numbers.
306         */
307
308        overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
309        overPtr->rangeCheckMagic = RMAGIC;
310        BLOCK_END(overPtr) = RMAGIC;
311#endif
312
313        Tcl_MutexUnlock(allocMutexPtr);
314        return (void *)(overPtr+1);
315    }
316
317    /*
318     * Convert amount of memory requested into closest block size stored in
319     * hash buckets which satisfies request. Account for space used per block
320     * for accounting.
321     */
322
323    amount = MINBLOCK;          /* size of first bucket */
324    bucket = MINBLOCK >> 4;
325
326    while (numBytes + OVERHEAD > amount) {
327        amount <<= 1;
328        if (amount == 0) {
329            Tcl_MutexUnlock(allocMutexPtr);
330            return NULL;
331        }
332        bucket++;
333    }
334    ASSERT(bucket < NBUCKETS);
335
336    /*
337     * If nothing in hash bucket right now, request more memory from the
338     * system.
339     */
340
341    if ((overPtr = nextf[bucket]) == NULL) {
342        MoreCore(bucket);
343        if ((overPtr = nextf[bucket]) == NULL) {
344            Tcl_MutexUnlock(allocMutexPtr);
345            return NULL;
346        }
347    }
348
349    /*
350     * Remove from linked list
351     */
352
353    nextf[bucket] = overPtr->next;
354    overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
355    overPtr->bucketIndex = (unsigned char) bucket;
356
357#ifdef MSTATS
358    numMallocs[bucket]++;
359#endif
360
361#ifdef RCHECK
362    /*
363     * Record allocated size of block and bound space with magic numbers.
364     */
365
366    overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
367    overPtr->rangeCheckMagic = RMAGIC;
368    BLOCK_END(overPtr) = RMAGIC;
369#endif
370
371    Tcl_MutexUnlock(allocMutexPtr);
372    return ((char *)(overPtr + 1));
373}
374
375/*
376 *----------------------------------------------------------------------
377 *
378 * MoreCore --
379 *
380 *      Allocate more memory to the indicated bucket.
381 *
382 *      Assumes Mutex is already held.
383 *
384 * Results:
385 *      None.
386 *
387 * Side effects:
388 *      Attempts to get more memory from the system.
389 *
390 *----------------------------------------------------------------------
391 */
392
393static void
394MoreCore(
395    int bucket)                 /* What bucket to allocat to. */
396{
397    register union overhead *overPtr;
398    register long size;         /* size of desired block */
399    long amount;                /* amount to allocate */
400    int numBlocks;              /* how many blocks we get */
401    struct block *blockPtr;
402
403    /*
404     * sbrk_size <= 0 only for big, FLUFFY, requests (about 2^30 bytes on a
405     * VAX, I think) or for a negative arg.
406     */
407
408    size = 1 << (bucket + 3);
409    ASSERT(size > 0);
410
411    amount = MAXMALLOC;
412    numBlocks = amount / size;
413    ASSERT(numBlocks*size == amount);
414
415    blockPtr = (struct block *) TclpSysAlloc((unsigned)
416            (sizeof(struct block) + amount), 1);
417    /* no more room! */
418    if (blockPtr == NULL) {
419        return;
420    }
421    blockPtr->nextPtr = blockList;
422    blockList = blockPtr;
423
424    overPtr = (union overhead *) (blockPtr + 1);
425
426    /*
427     * Add new memory allocated to that on free list for this hash bucket.
428     */
429
430    nextf[bucket] = overPtr;
431    while (--numBlocks > 0) {
432        overPtr->next = (union overhead *)((caddr_t)overPtr + size);
433        overPtr = (union overhead *)((caddr_t)overPtr + size);
434    }
435    overPtr->next = NULL;
436}
437
438/*
439 *----------------------------------------------------------------------
440 *
441 * TclpFree --
442 *
443 *      Free memory.
444 *
445 * Results:
446 *      None.
447 *
448 * Side effects:
449 *      None.
450 *
451 *----------------------------------------------------------------------
452 */
453
454void
455TclpFree(
456    char *oldPtr)               /* Pointer to memory to free. */
457{
458    register long size;
459    register union overhead *overPtr;
460    struct block *bigBlockPtr;
461
462    if (oldPtr == NULL) {
463        return;
464    }
465
466    Tcl_MutexLock(allocMutexPtr);
467    overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
468
469    ASSERT(overPtr->overMagic0 == MAGIC);       /* make sure it was in use */
470    ASSERT(overPtr->overMagic1 == MAGIC);
471    if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
472        Tcl_MutexUnlock(allocMutexPtr);
473        return;
474    }
475
476    RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
477    RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
478    size = overPtr->bucketIndex;
479    if (size == 0xff) {
480#ifdef MSTATS
481        numMallocs[NBUCKETS]--;
482#endif
483
484        bigBlockPtr = (struct block *) overPtr - 1;
485        bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
486        bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
487        TclpSysFree(bigBlockPtr);
488
489        Tcl_MutexUnlock(allocMutexPtr);
490        return;
491    }
492    ASSERT(size < NBUCKETS);
493    overPtr->next = nextf[size];        /* also clobbers overMagic */
494    nextf[size] = overPtr;
495
496#ifdef MSTATS
497    numMallocs[size]--;
498#endif
499
500    Tcl_MutexUnlock(allocMutexPtr);
501}
502
503/*
504 *----------------------------------------------------------------------
505 *
506 * TclpRealloc --
507 *
508 *      Reallocate memory.
509 *
510 * Results:
511 *      None.
512 *
513 * Side effects:
514 *      None.
515 *
516 *----------------------------------------------------------------------
517 */
518
519char *
520TclpRealloc(
521    char *oldPtr,               /* Pointer to alloced block. */
522    unsigned int numBytes)      /* New size of memory. */
523{
524    int i;
525    union overhead *overPtr;
526    struct block *bigBlockPtr;
527    int expensive;
528    unsigned long maxSize;
529
530    if (oldPtr == NULL) {
531        return TclpAlloc(numBytes);
532    }
533
534    Tcl_MutexLock(allocMutexPtr);
535
536    overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
537
538    ASSERT(overPtr->overMagic0 == MAGIC);       /* make sure it was in use */
539    ASSERT(overPtr->overMagic1 == MAGIC);
540    if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
541        Tcl_MutexUnlock(allocMutexPtr);
542        return NULL;
543    }
544
545    RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
546    RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
547    i = overPtr->bucketIndex;
548
549    /*
550     * If the block isn't in a bin, just realloc it.
551     */
552
553    if (i == 0xff) {
554        struct block *prevPtr, *nextPtr;
555        bigBlockPtr = (struct block *) overPtr - 1;
556        prevPtr = bigBlockPtr->prevPtr;
557        nextPtr = bigBlockPtr->nextPtr;
558        bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
559                sizeof(struct block) + OVERHEAD + numBytes);
560        if (bigBlockPtr == NULL) {
561            Tcl_MutexUnlock(allocMutexPtr);
562            return NULL;
563        }
564
565        if (prevPtr->nextPtr != bigBlockPtr) {
566            /*
567             * If the block has moved, splice the new block into the list
568             * where the old block used to be.
569             */
570
571            prevPtr->nextPtr = bigBlockPtr;
572            nextPtr->prevPtr = bigBlockPtr;
573        }
574
575        overPtr = (union overhead *) (bigBlockPtr + 1);
576
577#ifdef MSTATS
578        numMallocs[NBUCKETS]++;
579#endif
580
581#ifdef RCHECK
582        /*
583         * Record allocated size of block and update magic number bounds.
584         */
585
586        overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
587        BLOCK_END(overPtr) = RMAGIC;
588#endif
589
590        Tcl_MutexUnlock(allocMutexPtr);
591        return (char *)(overPtr+1);
592    }
593    maxSize = 1 << (i+3);
594    expensive = 0;
595    if (numBytes+OVERHEAD > maxSize) {
596        expensive = 1;
597    } else if (i>0 && numBytes+OVERHEAD < maxSize/2) {
598        expensive = 1;
599    }
600
601    if (expensive) {
602        void *newPtr;
603
604        Tcl_MutexUnlock(allocMutexPtr);
605
606        newPtr = TclpAlloc(numBytes);
607        if (newPtr == NULL) {
608            return NULL;
609        }
610        maxSize -= OVERHEAD;
611        if (maxSize < numBytes) {
612            numBytes = maxSize;
613        }
614        memcpy(newPtr, oldPtr, (size_t) numBytes);
615        TclpFree(oldPtr);
616        return newPtr;
617    }
618
619    /*
620     * Ok, we don't have to copy, it fits as-is
621     */
622
623#ifdef RCHECK
624    overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
625    BLOCK_END(overPtr) = RMAGIC;
626#endif
627
628    Tcl_MutexUnlock(allocMutexPtr);
629    return(oldPtr);
630}
631
632/*
633 *----------------------------------------------------------------------
634 *
635 * mstats --
636 *
637 *      Prints two lines of numbers, one showing the length of the free list
638 *      for each size category, the second showing the number of mallocs -
639 *      frees for each size category.
640 *
641 * Results:
642 *      None.
643 *
644 * Side effects:
645 *      None.
646 *
647 *----------------------------------------------------------------------
648 */
649
650#ifdef MSTATS
651void
652mstats(
653    char *s)                    /* Where to write info. */
654{
655    register int i, j;
656    register union overhead *overPtr;
657    int totalFree = 0, totalUsed = 0;
658
659    Tcl_MutexLock(allocMutexPtr);
660
661    fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
662    for (i = 0; i < NBUCKETS; i++) {
663        for (j=0, overPtr=nextf[i]; overPtr; overPtr=overPtr->next, j++) {
664            fprintf(stderr, " %d", j);
665        }
666        totalFree += j * (1 << (i + 3));
667    }
668
669    fprintf(stderr, "\nused:\t");
670    for (i = 0; i < NBUCKETS; i++) {
671        fprintf(stderr, " %d", numMallocs[i]);
672        totalUsed += numMallocs[i] * (1 << (i + 3));
673    }
674
675    fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
676            totalUsed, totalFree);
677    fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
678            MAXMALLOC, numMallocs[NBUCKETS]);
679
680    Tcl_MutexUnlock(allocMutexPtr);
681}
682#endif
683
684#else   /* !USE_TCLALLOC */
685
686/*
687 *----------------------------------------------------------------------
688 *
689 * TclpAlloc --
690 *
691 *      Allocate more memory.
692 *
693 * Results:
694 *      None.
695 *
696 * Side effects:
697 *      None.
698 *
699 *----------------------------------------------------------------------
700 */
701
702char *
703TclpAlloc(
704    unsigned int numBytes)      /* Number of bytes to allocate. */
705{
706    return (char*) malloc(numBytes);
707}
708
709/*
710 *----------------------------------------------------------------------
711 *
712 * TclpFree --
713 *
714 *      Free memory.
715 *
716 * Results:
717 *      None.
718 *
719 * Side effects:
720 *      None.
721 *
722 *----------------------------------------------------------------------
723 */
724
725void
726TclpFree(
727    char *oldPtr)               /* Pointer to memory to free. */
728{
729    free(oldPtr);
730    return;
731}
732
733/*
734 *----------------------------------------------------------------------
735 *
736 * TclpRealloc --
737 *
738 *      Reallocate memory.
739 *
740 * Results:
741 *      None.
742 *
743 * Side effects:
744 *      None.
745 *
746 *----------------------------------------------------------------------
747 */
748
749char *
750TclpRealloc(
751    char *oldPtr,               /* Pointer to alloced block. */
752    unsigned int numBytes)      /* New size of memory. */
753{
754    return (char*) realloc(oldPtr, numBytes);
755}
756
757#endif /* !USE_TCLALLOC */
758#endif /* !TCL_THREADS */
759
760/*
761 * Local Variables:
762 * mode: c
763 * c-basic-offset: 4
764 * fill-column: 78
765 * End:
766 */
Note: See TracBrowser for help on using the repository browser.