tclThreadAlloc.cGo to the documentation of this file.00001 /* 00002 * tclThreadAlloc.c -- 00003 * 00004 * This is a very fast storage allocator for used with threads (designed 00005 * avoid lock contention). The basic strategy is to allocate memory in 00006 * fixed size blocks from block caches. 00007 * 00008 * The Initial Developer of the Original Code is America Online, Inc. 00009 * Portions created by AOL are Copyright (C) 1999 America Online, Inc. 00010 * 00011 * See the file "license.terms" for information on usage and redistribution of 00012 * this file, and for a DISCLAIMER OF ALL WARRANTIES. 00013 * 00014 * RCS: @(#) $Id: tclThreadAlloc.c,v 1.25 2007/12/17 15:28:28 msofer Exp $ 00015 */ 00016 00017 #include "tclInt.h" 00018 #if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) 00019 00020 /* 00021 * If range checking is enabled, an additional byte will be allocated to store 00022 * the magic number at the end of the requested memory. 00023 */ 00024 00025 #ifndef RCHECK 00026 #ifdef NDEBUG 00027 #define RCHECK 0 00028 #else 00029 #define RCHECK 1 00030 #endif 00031 #endif 00032 00033 /* 00034 * The following define the number of Tcl_Obj's to allocate/move at a time and 00035 * the high water mark to prune a per-thread cache. On a 32 bit system, 00036 * sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k. 00037 */ 00038 00039 #define NOBJALLOC 800 00040 #define NOBJHIGH 1200 00041 00042 /* 00043 * The following union stores accounting information for each block including 00044 * two small magic numbers and a bucket number when in use or a next pointer 00045 * when free. The original requested size (not including the Block overhead) 00046 * is also maintained. 00047 */ 00048 00049 typedef union Block { 00050 struct { 00051 union { 00052 union Block *next; /* Next in free list. */ 00053 struct { 00054 unsigned char magic1; /* First magic number. */ 00055 unsigned char bucket; /* Bucket block allocated from. */ 00056 unsigned char unused; /* Padding. */ 00057 unsigned char magic2; /* Second magic number. */ 00058 } s; 00059 } u; 00060 size_t reqSize; /* Requested allocation size. */ 00061 } b; 00062 unsigned char padding[TCL_ALLOCALIGN]; 00063 } Block; 00064 #define nextBlock b.u.next 00065 #define sourceBucket b.u.s.bucket 00066 #define magicNum1 b.u.s.magic1 00067 #define magicNum2 b.u.s.magic2 00068 #define MAGIC 0xEF 00069 #define blockReqSize b.reqSize 00070 00071 /* 00072 * The following defines the minimum and and maximum block sizes and the number 00073 * of buckets in the bucket cache. 00074 */ 00075 00076 #define MINALLOC ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1)) 00077 #define NBUCKETS (11 - (MINALLOC >> 5)) 00078 #define MAXALLOC (MINALLOC << (NBUCKETS - 1)) 00079 00080 /* 00081 * The following structure defines a bucket of blocks with various accounting 00082 * and statistics information. 00083 */ 00084 00085 typedef struct Bucket { 00086 Block *firstPtr; /* First block available */ 00087 long numFree; /* Number of blocks available */ 00088 00089 /* All fields below for accounting only */ 00090 00091 long numRemoves; /* Number of removes from bucket */ 00092 long numInserts; /* Number of inserts into bucket */ 00093 long numWaits; /* Number of waits to acquire a lock */ 00094 long numLocks; /* Number of locks acquired */ 00095 long totalAssigned; /* Total space assigned to bucket */ 00096 } Bucket; 00097 00098 /* 00099 * The following structure defines a cache of buckets and objs, of which there 00100 * will be (at most) one per thread. 00101 */ 00102 00103 typedef struct Cache { 00104 struct Cache *nextPtr; /* Linked list of cache entries */ 00105 Tcl_ThreadId owner; /* Which thread's cache is this? */ 00106 Tcl_Obj *firstObjPtr; /* List of free objects for thread */ 00107 int numObjects; /* Number of objects for thread */ 00108 int totalAssigned; /* Total space assigned to thread */ 00109 Bucket buckets[NBUCKETS]; /* The buckets for this thread */ 00110 } Cache; 00111 00112 /* 00113 * The following array specifies various per-bucket limits and locks. The 00114 * values are statically initialized to avoid calculating them repeatedly. 00115 */ 00116 00117 static struct { 00118 size_t blockSize; /* Bucket blocksize. */ 00119 int maxBlocks; /* Max blocks before move to share. */ 00120 int numMove; /* Num blocks to move to share. */ 00121 Tcl_Mutex *lockPtr; /* Share bucket lock. */ 00122 } bucketInfo[NBUCKETS]; 00123 00124 /* 00125 * Static functions defined in this file. 00126 */ 00127 00128 static Cache * GetCache(void); 00129 static void LockBucket(Cache *cachePtr, int bucket); 00130 static void UnlockBucket(Cache *cachePtr, int bucket); 00131 static void PutBlocks(Cache *cachePtr, int bucket, int numMove); 00132 static int GetBlocks(Cache *cachePtr, int bucket); 00133 static Block * Ptr2Block(char *ptr); 00134 static char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize); 00135 static void MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove); 00136 00137 /* 00138 * Local variables defined in this file and initialized at startup. 00139 */ 00140 00141 static Tcl_Mutex *listLockPtr; 00142 static Tcl_Mutex *objLockPtr; 00143 static Cache sharedCache; 00144 static Cache *sharedPtr = &sharedCache; 00145 static Cache *firstCachePtr = &sharedCache; 00146 00147 /* 00148 *---------------------------------------------------------------------- 00149 * 00150 * GetCache --- 00151 * 00152 * Gets per-thread memory cache, allocating it if necessary. 00153 * 00154 * Results: 00155 * Pointer to cache. 00156 * 00157 * Side effects: 00158 * None. 00159 * 00160 *---------------------------------------------------------------------- 00161 */ 00162 00163 static Cache * 00164 GetCache(void) 00165 { 00166 Cache *cachePtr; 00167 00168 /* 00169 * Check for first-time initialization. 00170 */ 00171 00172 if (listLockPtr == NULL) { 00173 Tcl_Mutex *initLockPtr; 00174 unsigned int i; 00175 00176 initLockPtr = Tcl_GetAllocMutex(); 00177 Tcl_MutexLock(initLockPtr); 00178 if (listLockPtr == NULL) { 00179 listLockPtr = TclpNewAllocMutex(); 00180 objLockPtr = TclpNewAllocMutex(); 00181 for (i = 0; i < NBUCKETS; ++i) { 00182 bucketInfo[i].blockSize = MINALLOC << i; 00183 bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i); 00184 bucketInfo[i].numMove = i < NBUCKETS - 1 ? 00185 1 << (NBUCKETS - 2 - i) : 1; 00186 bucketInfo[i].lockPtr = TclpNewAllocMutex(); 00187 } 00188 } 00189 Tcl_MutexUnlock(initLockPtr); 00190 } 00191 00192 /* 00193 * Get this thread's cache, allocating if necessary. 00194 */ 00195 00196 cachePtr = TclpGetAllocCache(); 00197 if (cachePtr == NULL) { 00198 cachePtr = calloc(1, sizeof(Cache)); 00199 if (cachePtr == NULL) { 00200 Tcl_Panic("alloc: could not allocate new cache"); 00201 } 00202 Tcl_MutexLock(listLockPtr); 00203 cachePtr->nextPtr = firstCachePtr; 00204 firstCachePtr = cachePtr; 00205 Tcl_MutexUnlock(listLockPtr); 00206 cachePtr->owner = Tcl_GetCurrentThread(); 00207 TclpSetAllocCache(cachePtr); 00208 } 00209 return cachePtr; 00210 } 00211 00212 /* 00213 *---------------------------------------------------------------------- 00214 * 00215 * TclFreeAllocCache -- 00216 * 00217 * Flush and delete a cache, removing from list of caches. 00218 * 00219 * Results: 00220 * None. 00221 * 00222 * Side effects: 00223 * None. 00224 * 00225 *---------------------------------------------------------------------- 00226 */ 00227 00228 void 00229 TclFreeAllocCache( 00230 void *arg) 00231 { 00232 Cache *cachePtr = arg; 00233 Cache **nextPtrPtr; 00234 register unsigned int bucket; 00235 00236 /* 00237 * Flush blocks. 00238 */ 00239 00240 for (bucket = 0; bucket < NBUCKETS; ++bucket) { 00241 if (cachePtr->buckets[bucket].numFree > 0) { 00242 PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree); 00243 } 00244 } 00245 00246 /* 00247 * Flush objs. 00248 */ 00249 00250 if (cachePtr->numObjects > 0) { 00251 Tcl_MutexLock(objLockPtr); 00252 MoveObjs(cachePtr, sharedPtr, cachePtr->numObjects); 00253 Tcl_MutexUnlock(objLockPtr); 00254 } 00255 00256 /* 00257 * Remove from pool list. 00258 */ 00259 00260 Tcl_MutexLock(listLockPtr); 00261 nextPtrPtr = &firstCachePtr; 00262 while (*nextPtrPtr != cachePtr) { 00263 nextPtrPtr = &(*nextPtrPtr)->nextPtr; 00264 } 00265 *nextPtrPtr = cachePtr->nextPtr; 00266 cachePtr->nextPtr = NULL; 00267 Tcl_MutexUnlock(listLockPtr); 00268 free(cachePtr); 00269 } 00270 00271 /* 00272 *---------------------------------------------------------------------- 00273 * 00274 * TclpAlloc -- 00275 * 00276 * Allocate memory. 00277 * 00278 * Results: 00279 * Pointer to memory just beyond Block pointer. 00280 * 00281 * Side effects: 00282 * May allocate more blocks for a bucket. 00283 * 00284 *---------------------------------------------------------------------- 00285 */ 00286 00287 char * 00288 TclpAlloc( 00289 unsigned int reqSize) 00290 { 00291 Cache *cachePtr = TclpGetAllocCache(); 00292 Block *blockPtr; 00293 register int bucket; 00294 size_t size; 00295 00296 if (cachePtr == NULL) { 00297 cachePtr = GetCache(); 00298 } 00299 00300 /* 00301 * Increment the requested size to include room for the Block structure. 00302 * Call malloc() directly if the required amount is greater than the 00303 * largest block, otherwise pop the smallest block large enough, 00304 * allocating more blocks if necessary. 00305 */ 00306 00307 blockPtr = NULL; 00308 size = reqSize + sizeof(Block); 00309 #if RCHECK 00310 ++size; 00311 #endif 00312 if (size > MAXALLOC) { 00313 bucket = NBUCKETS; 00314 blockPtr = malloc(size); 00315 if (blockPtr != NULL) { 00316 cachePtr->totalAssigned += reqSize; 00317 } 00318 } else { 00319 bucket = 0; 00320 while (bucketInfo[bucket].blockSize < size) { 00321 ++bucket; 00322 } 00323 if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) { 00324 blockPtr = cachePtr->buckets[bucket].firstPtr; 00325 cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock; 00326 --cachePtr->buckets[bucket].numFree; 00327 ++cachePtr->buckets[bucket].numRemoves; 00328 cachePtr->buckets[bucket].totalAssigned += reqSize; 00329 } 00330 } 00331 if (blockPtr == NULL) { 00332 return NULL; 00333 } 00334 return Block2Ptr(blockPtr, bucket, reqSize); 00335 } 00336 00337 /* 00338 *---------------------------------------------------------------------- 00339 * 00340 * TclpFree -- 00341 * 00342 * Return blocks to the thread block cache. 00343 * 00344 * Results: 00345 * None. 00346 * 00347 * Side effects: 00348 * May move blocks to shared cache. 00349 * 00350 *---------------------------------------------------------------------- 00351 */ 00352 00353 void 00354 TclpFree( 00355 char *ptr) 00356 { 00357 Cache *cachePtr; 00358 Block *blockPtr; 00359 int bucket; 00360 00361 if (ptr == NULL) { 00362 return; 00363 } 00364 00365 cachePtr = TclpGetAllocCache(); 00366 if (cachePtr == NULL) { 00367 cachePtr = GetCache(); 00368 } 00369 00370 /* 00371 * Get the block back from the user pointer and call system free directly 00372 * for large blocks. Otherwise, push the block back on the bucket and move 00373 * blocks to the shared cache if there are now too many free. 00374 */ 00375 00376 blockPtr = Ptr2Block(ptr); 00377 bucket = blockPtr->sourceBucket; 00378 if (bucket == NBUCKETS) { 00379 cachePtr->totalAssigned -= blockPtr->blockReqSize; 00380 free(blockPtr); 00381 return; 00382 } 00383 00384 cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize; 00385 blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr; 00386 cachePtr->buckets[bucket].firstPtr = blockPtr; 00387 ++cachePtr->buckets[bucket].numFree; 00388 ++cachePtr->buckets[bucket].numInserts; 00389 00390 if (cachePtr != sharedPtr && 00391 cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) { 00392 PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove); 00393 } 00394 } 00395 00396 /* 00397 *---------------------------------------------------------------------- 00398 * 00399 * TclpRealloc -- 00400 * 00401 * Re-allocate memory to a larger or smaller size. 00402 * 00403 * Results: 00404 * Pointer to memory just beyond Block pointer. 00405 * 00406 * Side effects: 00407 * Previous memory, if any, may be freed. 00408 * 00409 *---------------------------------------------------------------------- 00410 */ 00411 00412 char * 00413 TclpRealloc( 00414 char *ptr, 00415 unsigned int reqSize) 00416 { 00417 Cache *cachePtr = TclpGetAllocCache(); 00418 Block *blockPtr; 00419 void *newPtr; 00420 size_t size, min; 00421 int bucket; 00422 00423 if (ptr == NULL) { 00424 return TclpAlloc(reqSize); 00425 } 00426 00427 if (cachePtr == NULL) { 00428 cachePtr = GetCache(); 00429 } 00430 00431 /* 00432 * If the block is not a system block and fits in place, simply return the 00433 * existing pointer. Otherwise, if the block is a system block and the new 00434 * size would also require a system block, call realloc() directly. 00435 */ 00436 00437 blockPtr = Ptr2Block(ptr); 00438 size = reqSize + sizeof(Block); 00439 #if RCHECK 00440 ++size; 00441 #endif 00442 bucket = blockPtr->sourceBucket; 00443 if (bucket != NBUCKETS) { 00444 if (bucket > 0) { 00445 min = bucketInfo[bucket-1].blockSize; 00446 } else { 00447 min = 0; 00448 } 00449 if (size > min && size <= bucketInfo[bucket].blockSize) { 00450 cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize; 00451 cachePtr->buckets[bucket].totalAssigned += reqSize; 00452 return Block2Ptr(blockPtr, bucket, reqSize); 00453 } 00454 } else if (size > MAXALLOC) { 00455 cachePtr->totalAssigned -= blockPtr->blockReqSize; 00456 cachePtr->totalAssigned += reqSize; 00457 blockPtr = realloc(blockPtr, size); 00458 if (blockPtr == NULL) { 00459 return NULL; 00460 } 00461 return Block2Ptr(blockPtr, NBUCKETS, reqSize); 00462 } 00463 00464 /* 00465 * Finally, perform an expensive malloc/copy/free. 00466 */ 00467 00468 newPtr = TclpAlloc(reqSize); 00469 if (newPtr != NULL) { 00470 if (reqSize > blockPtr->blockReqSize) { 00471 reqSize = blockPtr->blockReqSize; 00472 } 00473 memcpy(newPtr, ptr, reqSize); 00474 TclpFree(ptr); 00475 } 00476 return newPtr; 00477 } 00478 00479 /* 00480 *---------------------------------------------------------------------- 00481 * 00482 * TclThreadAllocObj -- 00483 * 00484 * Allocate a Tcl_Obj from the per-thread cache. 00485 * 00486 * Results: 00487 * Pointer to uninitialized Tcl_Obj. 00488 * 00489 * Side effects: 00490 * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if 00491 * list is empty. 00492 * 00493 *---------------------------------------------------------------------- 00494 */ 00495 00496 Tcl_Obj * 00497 TclThreadAllocObj(void) 00498 { 00499 register Cache *cachePtr = TclpGetAllocCache(); 00500 register Tcl_Obj *objPtr; 00501 00502 if (cachePtr == NULL) { 00503 cachePtr = GetCache(); 00504 } 00505 00506 /* 00507 * Get this thread's obj list structure and move or allocate new objs if 00508 * necessary. 00509 */ 00510 00511 if (cachePtr->numObjects == 0) { 00512 register int numMove; 00513 00514 Tcl_MutexLock(objLockPtr); 00515 numMove = sharedPtr->numObjects; 00516 if (numMove > 0) { 00517 if (numMove > NOBJALLOC) { 00518 numMove = NOBJALLOC; 00519 } 00520 MoveObjs(sharedPtr, cachePtr, numMove); 00521 } 00522 Tcl_MutexUnlock(objLockPtr); 00523 if (cachePtr->numObjects == 0) { 00524 Tcl_Obj *newObjsPtr; 00525 00526 cachePtr->numObjects = numMove = NOBJALLOC; 00527 newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove); 00528 if (newObjsPtr == NULL) { 00529 Tcl_Panic("alloc: could not allocate %d new objects", numMove); 00530 } 00531 while (--numMove >= 0) { 00532 objPtr = &newObjsPtr[numMove]; 00533 objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; 00534 cachePtr->firstObjPtr = objPtr; 00535 } 00536 } 00537 } 00538 00539 /* 00540 * Pop the first object. 00541 */ 00542 00543 objPtr = cachePtr->firstObjPtr; 00544 cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; 00545 --cachePtr->numObjects; 00546 return objPtr; 00547 } 00548 00549 /* 00550 *---------------------------------------------------------------------- 00551 * 00552 * TclThreadFreeObj -- 00553 * 00554 * Return a free Tcl_Obj to the per-thread cache. 00555 * 00556 * Results: 00557 * None. 00558 * 00559 * Side effects: 00560 * May move free Tcl_Obj's to shared list upon hitting high water mark. 00561 * 00562 *---------------------------------------------------------------------- 00563 */ 00564 00565 void 00566 TclThreadFreeObj( 00567 Tcl_Obj *objPtr) 00568 { 00569 Cache *cachePtr = TclpGetAllocCache(); 00570 00571 if (cachePtr == NULL) { 00572 cachePtr = GetCache(); 00573 } 00574 00575 /* 00576 * Get this thread's list and push on the free Tcl_Obj. 00577 */ 00578 00579 objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; 00580 cachePtr->firstObjPtr = objPtr; 00581 ++cachePtr->numObjects; 00582 00583 /* 00584 * If the number of free objects has exceeded the high water mark, move 00585 * some blocks to the shared list. 00586 */ 00587 00588 if (cachePtr->numObjects > NOBJHIGH) { 00589 Tcl_MutexLock(objLockPtr); 00590 MoveObjs(cachePtr, sharedPtr, NOBJALLOC); 00591 Tcl_MutexUnlock(objLockPtr); 00592 } 00593 } 00594 00595 /* 00596 *---------------------------------------------------------------------- 00597 * 00598 * Tcl_GetMemoryInfo -- 00599 * 00600 * Return a list-of-lists of memory stats. 00601 * 00602 * Results: 00603 * None. 00604 * 00605 * Side effects: 00606 * List appended to given dstring. 00607 * 00608 *---------------------------------------------------------------------- 00609 */ 00610 00611 MODULE_SCOPE void 00612 Tcl_GetMemoryInfo( 00613 Tcl_DString *dsPtr) 00614 { 00615 Cache *cachePtr; 00616 char buf[200]; 00617 unsigned int n; 00618 00619 Tcl_MutexLock(listLockPtr); 00620 cachePtr = firstCachePtr; 00621 while (cachePtr != NULL) { 00622 Tcl_DStringStartSublist(dsPtr); 00623 if (cachePtr == sharedPtr) { 00624 Tcl_DStringAppendElement(dsPtr, "shared"); 00625 } else { 00626 sprintf(buf, "thread%p", cachePtr->owner); 00627 Tcl_DStringAppendElement(dsPtr, buf); 00628 } 00629 for (n = 0; n < NBUCKETS; ++n) { 00630 sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld", 00631 (unsigned long) bucketInfo[n].blockSize, 00632 cachePtr->buckets[n].numFree, 00633 cachePtr->buckets[n].numRemoves, 00634 cachePtr->buckets[n].numInserts, 00635 cachePtr->buckets[n].totalAssigned, 00636 cachePtr->buckets[n].numLocks, 00637 cachePtr->buckets[n].numWaits); 00638 Tcl_DStringAppendElement(dsPtr, buf); 00639 } 00640 Tcl_DStringEndSublist(dsPtr); 00641 cachePtr = cachePtr->nextPtr; 00642 } 00643 Tcl_MutexUnlock(listLockPtr); 00644 } 00645 00646 /* 00647 *---------------------------------------------------------------------- 00648 * 00649 * MoveObjs -- 00650 * 00651 * Move Tcl_Obj's between caches. 00652 * 00653 * Results: 00654 * None. 00655 * 00656 * Side effects: 00657 * None. 00658 * 00659 *---------------------------------------------------------------------- 00660 */ 00661 00662 static void 00663 MoveObjs( 00664 Cache *fromPtr, 00665 Cache *toPtr, 00666 int numMove) 00667 { 00668 register Tcl_Obj *objPtr = fromPtr->firstObjPtr; 00669 Tcl_Obj *fromFirstObjPtr = objPtr; 00670 00671 toPtr->numObjects += numMove; 00672 fromPtr->numObjects -= numMove; 00673 00674 /* 00675 * Find the last object to be moved; set the next one (the first one not 00676 * to be moved) as the first object in the 'from' cache. 00677 */ 00678 00679 while (--numMove) { 00680 objPtr = objPtr->internalRep.otherValuePtr; 00681 } 00682 fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr; 00683 00684 /* 00685 * Move all objects as a block - they are already linked to each other, we 00686 * just have to update the first and last. 00687 */ 00688 00689 objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr; 00690 toPtr->firstObjPtr = fromFirstObjPtr; 00691 } 00692 00693 /* 00694 *---------------------------------------------------------------------- 00695 * 00696 * Block2Ptr, Ptr2Block -- 00697 * 00698 * Convert between internal blocks and user pointers. 00699 * 00700 * Results: 00701 * User pointer or internal block. 00702 * 00703 * Side effects: 00704 * Invalid blocks will abort the server. 00705 * 00706 *---------------------------------------------------------------------- 00707 */ 00708 00709 static char * 00710 Block2Ptr( 00711 Block *blockPtr, 00712 int bucket, 00713 unsigned int reqSize) 00714 { 00715 register void *ptr; 00716 00717 blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC; 00718 blockPtr->sourceBucket = bucket; 00719 blockPtr->blockReqSize = reqSize; 00720 ptr = ((void *) (blockPtr + 1)); 00721 #if RCHECK 00722 ((unsigned char *)(ptr))[reqSize] = MAGIC; 00723 #endif 00724 return (char *) ptr; 00725 } 00726 00727 static Block * 00728 Ptr2Block( 00729 char *ptr) 00730 { 00731 register Block *blockPtr; 00732 00733 blockPtr = (((Block *) ptr) - 1); 00734 if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) { 00735 Tcl_Panic("alloc: invalid block: %p: %x %x", 00736 blockPtr, blockPtr->magicNum1, blockPtr->magicNum2); 00737 } 00738 #if RCHECK 00739 if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) { 00740 Tcl_Panic("alloc: invalid block: %p: %x %x %x", 00741 blockPtr, blockPtr->magicNum1, blockPtr->magicNum2, 00742 ((unsigned char *) ptr)[blockPtr->blockReqSize]); 00743 } 00744 #endif 00745 return blockPtr; 00746 } 00747 00748 /* 00749 *---------------------------------------------------------------------- 00750 * 00751 * LockBucket, UnlockBucket -- 00752 * 00753 * Set/unset the lock to access a bucket in the shared cache. 00754 * 00755 * Results: 00756 * None. 00757 * 00758 * Side effects: 00759 * Lock activity and contention are monitored globally and on a per-cache 00760 * basis. 00761 * 00762 *---------------------------------------------------------------------- 00763 */ 00764 00765 static void 00766 LockBucket( 00767 Cache *cachePtr, 00768 int bucket) 00769 { 00770 #if 0 00771 if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) { 00772 Tcl_MutexLock(bucketInfo[bucket].lockPtr); 00773 ++cachePtr->buckets[bucket].numWaits; 00774 ++sharedPtr->buckets[bucket].numWaits; 00775 } 00776 #else 00777 Tcl_MutexLock(bucketInfo[bucket].lockPtr); 00778 #endif 00779 ++cachePtr->buckets[bucket].numLocks; 00780 ++sharedPtr->buckets[bucket].numLocks; 00781 } 00782 00783 static void 00784 UnlockBucket( 00785 Cache *cachePtr, 00786 int bucket) 00787 { 00788 Tcl_MutexUnlock(bucketInfo[bucket].lockPtr); 00789 } 00790 00791 /* 00792 *---------------------------------------------------------------------- 00793 * 00794 * PutBlocks -- 00795 * 00796 * Return unused blocks to the shared cache. 00797 * 00798 * Results: 00799 * None. 00800 * 00801 * Side effects: 00802 * None. 00803 * 00804 *---------------------------------------------------------------------- 00805 */ 00806 00807 static void 00808 PutBlocks( 00809 Cache *cachePtr, 00810 int bucket, 00811 int numMove) 00812 { 00813 register Block *lastPtr, *firstPtr; 00814 register int n = numMove; 00815 00816 /* 00817 * Before acquiring the lock, walk the block list to find the last block 00818 * to be moved. 00819 */ 00820 00821 firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr; 00822 while (--n > 0) { 00823 lastPtr = lastPtr->nextBlock; 00824 } 00825 cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock; 00826 cachePtr->buckets[bucket].numFree -= numMove; 00827 00828 /* 00829 * Aquire the lock and place the list of blocks at the front of the shared 00830 * cache bucket. 00831 */ 00832 00833 LockBucket(cachePtr, bucket); 00834 lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr; 00835 sharedPtr->buckets[bucket].firstPtr = firstPtr; 00836 sharedPtr->buckets[bucket].numFree += numMove; 00837 UnlockBucket(cachePtr, bucket); 00838 } 00839 00840 /* 00841 *---------------------------------------------------------------------- 00842 * 00843 * GetBlocks -- 00844 * 00845 * Get more blocks for a bucket. 00846 * 00847 * Results: 00848 * 1 if blocks where allocated, 0 otherwise. 00849 * 00850 * Side effects: 00851 * Cache may be filled with available blocks. 00852 * 00853 *---------------------------------------------------------------------- 00854 */ 00855 00856 static int 00857 GetBlocks( 00858 Cache *cachePtr, 00859 int bucket) 00860 { 00861 register Block *blockPtr; 00862 register int n; 00863 00864 /* 00865 * First, atttempt to move blocks from the shared cache. Note the 00866 * potentially dirty read of numFree before acquiring the lock which is a 00867 * slight performance enhancement. The value is verified after the lock is 00868 * actually acquired. 00869 */ 00870 00871 if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) { 00872 LockBucket(cachePtr, bucket); 00873 if (sharedPtr->buckets[bucket].numFree > 0) { 00874 00875 /* 00876 * Either move the entire list or walk the list to find the last 00877 * block to move. 00878 */ 00879 00880 n = bucketInfo[bucket].numMove; 00881 if (n >= sharedPtr->buckets[bucket].numFree) { 00882 cachePtr->buckets[bucket].firstPtr = 00883 sharedPtr->buckets[bucket].firstPtr; 00884 cachePtr->buckets[bucket].numFree = 00885 sharedPtr->buckets[bucket].numFree; 00886 sharedPtr->buckets[bucket].firstPtr = NULL; 00887 sharedPtr->buckets[bucket].numFree = 0; 00888 } else { 00889 blockPtr = sharedPtr->buckets[bucket].firstPtr; 00890 cachePtr->buckets[bucket].firstPtr = blockPtr; 00891 sharedPtr->buckets[bucket].numFree -= n; 00892 cachePtr->buckets[bucket].numFree = n; 00893 while (--n > 0) { 00894 blockPtr = blockPtr->nextBlock; 00895 } 00896 sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock; 00897 blockPtr->nextBlock = NULL; 00898 } 00899 } 00900 UnlockBucket(cachePtr, bucket); 00901 } 00902 00903 if (cachePtr->buckets[bucket].numFree == 0) { 00904 register size_t size; 00905 00906 /* 00907 * If no blocks could be moved from shared, first look for a larger 00908 * block in this cache to split up. 00909 */ 00910 00911 blockPtr = NULL; 00912 n = NBUCKETS; 00913 size = 0; /* lint */ 00914 while (--n > bucket) { 00915 if (cachePtr->buckets[n].numFree > 0) { 00916 size = bucketInfo[n].blockSize; 00917 blockPtr = cachePtr->buckets[n].firstPtr; 00918 cachePtr->buckets[n].firstPtr = blockPtr->nextBlock; 00919 --cachePtr->buckets[n].numFree; 00920 break; 00921 } 00922 } 00923 00924 /* 00925 * Otherwise, allocate a big new block directly. 00926 */ 00927 00928 if (blockPtr == NULL) { 00929 size = MAXALLOC; 00930 blockPtr = malloc(size); 00931 if (blockPtr == NULL) { 00932 return 0; 00933 } 00934 } 00935 00936 /* 00937 * Split the larger block into smaller blocks for this bucket. 00938 */ 00939 00940 n = size / bucketInfo[bucket].blockSize; 00941 cachePtr->buckets[bucket].numFree = n; 00942 cachePtr->buckets[bucket].firstPtr = blockPtr; 00943 while (--n > 0) { 00944 blockPtr->nextBlock = (Block *) 00945 ((char *) blockPtr + bucketInfo[bucket].blockSize); 00946 blockPtr = blockPtr->nextBlock; 00947 } 00948 blockPtr->nextBlock = NULL; 00949 } 00950 return 1; 00951 } 00952 00953 /* 00954 *---------------------------------------------------------------------- 00955 * 00956 * TclFinalizeThreadAlloc -- 00957 * 00958 * This procedure is used to destroy all private resources used in this 00959 * file. 00960 * 00961 * Results: 00962 * None. 00963 * 00964 * Side effects: 00965 * None. 00966 * 00967 *---------------------------------------------------------------------- 00968 */ 00969 00970 void 00971 TclFinalizeThreadAlloc(void) 00972 { 00973 unsigned int i; 00974 00975 for (i = 0; i < NBUCKETS; ++i) { 00976 TclpFreeAllocMutex(bucketInfo[i].lockPtr); 00977 bucketInfo[i].lockPtr = NULL; 00978 } 00979 00980 TclpFreeAllocMutex(objLockPtr); 00981 objLockPtr = NULL; 00982 00983 TclpFreeAllocMutex(listLockPtr); 00984 listLockPtr = NULL; 00985 00986 TclpFreeAllocCache(NULL); 00987 } 00988 00989 #else 00990 /* 00991 *---------------------------------------------------------------------- 00992 * 00993 * TclFinalizeThreadAlloc -- 00994 * 00995 * This procedure is used to destroy all private resources used in this 00996 * file. 00997 * 00998 * Results: 00999 * None. 01000 * 01001 * Side effects: 01002 * None. 01003 * 01004 *---------------------------------------------------------------------- 01005 */ 01006 01007 void 01008 TclFinalizeThreadAlloc(void) 01009 { 01010 Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use"); 01011 } 01012 #endif /* TCL_THREADS */ 01013 01014 /* 01015 * Local Variables: 01016 * mode: c 01017 * c-basic-offset: 4 01018 * fill-column: 78 01019 * End: 01020 */
Generated on Wed Mar 12 12:18:22 2008 by 1.5.1 |