1
/*-------------------------------------------------------------------------
4
* Allocation set definitions.
6
* AllocSet is our standard implementation of the abstract MemoryContext
10
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
11
* Portions Copyright (c) 1994, Regents of the University of California
14
* src/backend/utils/mmgr/aset.c
17
* This is a new (Feb. 05, 1999) implementation of the allocation set
18
* routines. AllocSet...() does not use OrderedSet...() any more.
19
* Instead it manages allocations in a block pool by itself, combining
20
* many small allocations in a few bigger blocks. AllocSetFree() normally
21
* doesn't free() memory really. It just add's the free'd area to some
22
* list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23
* at once on AllocSetReset(), which happens when the memory context gets
27
* Performance improvement from Tom Lane, 8/99: for extremely large request
28
* sizes, we do want to be able to give the memory back to free() as soon
29
* as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30
* freelist entries that might never be usable. This is specially needed
31
* when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32
* the previous instances of the block were guaranteed to be wasted until
33
* AllocSetReset() under the old way.
35
* Further improvement 12/00: as the code stood, request sizes in the
36
* midrange between "small" and "large" were handled very inefficiently,
37
* because any sufficiently large free chunk would be used to satisfy a
38
* request, even if it was much larger than necessary. This led to more
39
* and more wasted space in allocated chunks over time. To fix, get rid
40
* of the midrange behavior: we now handle only "small" power-of-2-size
41
* chunks as chunks. Anything "large" is passed off to malloc(). Change
42
* the number of freelists to change the small/large boundary.
45
* About CLOBBER_FREED_MEMORY:
47
* If this symbol is defined, all freed memory is overwritten with 0x7F's.
48
* This is useful for catching places that reference already-freed memory.
50
* About MEMORY_CONTEXT_CHECKING:
52
* Since we usually round request sizes up to the next power of 2, there
53
* is often some unused space immediately after a requested data area.
54
* Thus, if someone makes the common error of writing past what they've
55
* requested, the problem is likely to go unnoticed ... until the day when
56
* there *isn't* any wasted space, perhaps because of different memory
57
* alignment on a new platform, or some other effect. To catch this sort
58
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
59
* the requested space whenever the request is less than the actual chunk
60
* size, and verifies that the byte is undamaged when the chunk is freed.
62
*-------------------------------------------------------------------------
67
#include "utils/memutils.h"
69
/* Define this to detail debug alloc information */
70
/* #define HAVE_ALLOCINFO */
72
/*--------------------
73
* Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
74
* for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
76
* Note that all chunks in the freelists have power-of-2 sizes. This
77
* improves recyclability: we may waste some space, but the wasted space
78
* should stay pretty constant as requests are made and released.
80
* A request too large for the last freelist is handled by allocating a
81
* dedicated block from malloc(). The block still has a block header and
82
* chunk header, but when the chunk is freed we'll return the whole block
83
* to malloc(), not put it on our freelists.
85
* CAUTION: ALLOC_MINBITS must be large enough so that
86
* 1<<ALLOC_MINBITS is at least MAXALIGN,
87
* or we may fail to align the smallest chunks adequately.
88
* 8-byte alignment is enough on all currently known machines.
90
* With the current parameters, request sizes up to 8K are treated as chunks,
91
* larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
92
* to adjust the boundary point.
96
#define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
97
#define ALLOCSET_NUM_FREELISTS 11
98
#define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
99
/* Size of largest chunk that we use a fixed size for */
101
/*--------------------
102
* The first block allocated for an allocset has size initBlockSize.
103
* Each time we have to allocate another block, we double the block size
104
* (if possible, and without exceeding maxBlockSize), so as to reduce
105
* the bookkeeping load on malloc().
107
* Blocks allocated to hold oversize chunks do not follow this rule, however;
108
* they are just however big they need to be to hold that single chunk.
109
*--------------------
112
#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
113
#define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData))
115
typedef struct AllocBlockData *AllocBlock; /* forward reference */
116
typedef struct AllocChunkData *AllocChunk;
120
* Aligned pointer which may be a member of an allocation set.
122
typedef void *AllocPointer;
125
* AllocSetContext is our standard implementation of MemoryContext.
127
* Note: isReset means there is nothing for AllocSetReset to do. This is
128
* different from the aset being physically empty (empty blocks list) because
129
* we may still have a keeper block. It's also different from the set being
130
* logically empty, because we don't attempt to detect pfree'ing the last
133
typedef struct AllocSetContext
135
MemoryContextData header; /* Standard memory-context fields */
136
/* Info about storage allocated in this context: */
137
AllocBlock blocks; /* head of list of blocks in this set */
138
AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
139
bool isReset; /* T = no space alloced since last reset */
140
/* Allocation parameters for this context: */
141
Size initBlockSize; /* initial block size */
142
Size maxBlockSize; /* maximum block size */
143
Size nextBlockSize; /* next block size to allocate */
144
Size allocChunkLimit; /* effective chunk size limit */
145
AllocBlock keeper; /* if not NULL, keep this block over resets */
148
typedef AllocSetContext *AllocSet;
152
* An AllocBlock is the unit of memory that is obtained by aset.c
153
* from malloc(). It contains one or more AllocChunks, which are
154
* the units requested by palloc() and freed by pfree(). AllocChunks
155
* cannot be returned to malloc() individually, instead they are put
156
* on freelists by pfree() and re-used by the next palloc() that has
157
* a matching request size.
159
* AllocBlockData is the header data for a block --- the usable space
160
* within the block begins at the next alignment boundary.
162
typedef struct AllocBlockData
164
AllocSet aset; /* aset that owns this block */
165
AllocBlock next; /* next block in aset's blocks list */
166
char *freeptr; /* start of free space in this block */
167
char *endptr; /* end of space in this block */
172
* The prefix of each piece of memory in an AllocBlock
174
* NB: this MUST match StandardChunkHeader as defined by utils/memutils.h.
176
typedef struct AllocChunkData
178
/* aset is the owning aset if allocated, or the freelist link if free */
180
/* size is always the size of the usable space in the chunk */
182
#ifdef MEMORY_CONTEXT_CHECKING
183
/* when debugging memory usage, also store actual requested size */
184
/* this is zero in a free chunk */
190
* AllocPointerIsValid
191
* True iff pointer is valid allocation pointer.
193
#define AllocPointerIsValid(pointer) PointerIsValid(pointer)
197
* True iff set is valid allocation set.
199
#define AllocSetIsValid(set) PointerIsValid(set)
201
#define AllocPointerGetChunk(ptr) \
202
((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
203
#define AllocChunkGetPointer(chk) \
204
((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
207
* These functions implement the MemoryContext API for AllocSet contexts.
209
static void *AllocSetAlloc(MemoryContext context, Size size);
210
static void AllocSetFree(MemoryContext context, void *pointer);
211
static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
212
static void AllocSetInit(MemoryContext context);
213
static void AllocSetReset(MemoryContext context);
214
static void AllocSetDelete(MemoryContext context);
215
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
216
static bool AllocSetIsEmpty(MemoryContext context);
217
static void AllocSetStats(MemoryContext context, int level);
219
#ifdef MEMORY_CONTEXT_CHECKING
220
static void AllocSetCheck(MemoryContext context);
224
* This is the virtual function table for AllocSet contexts.
226
static MemoryContextMethods AllocSetMethods = {
233
AllocSetGetChunkSpace,
236
#ifdef MEMORY_CONTEXT_CHECKING
242
* Table for AllocSetFreeIndex
244
#define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
246
static const unsigned char LogTable256[256] =
248
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
249
LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
250
LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
257
#ifdef HAVE_ALLOCINFO
258
#define AllocFreeInfo(_cxt, _chunk) \
259
fprintf(stderr, "AllocFree: %s: %p, %d\n", \
260
(_cxt)->header.name, (_chunk), (_chunk)->size)
261
#define AllocAllocInfo(_cxt, _chunk) \
262
fprintf(stderr, "AllocAlloc: %s: %p, %d\n", \
263
(_cxt)->header.name, (_chunk), (_chunk)->size)
265
#define AllocFreeInfo(_cxt, _chunk)
266
#define AllocAllocInfo(_cxt, _chunk)
270
* AllocSetFreeIndex -
272
* Depending on the size of an allocation compute which freechunk
273
* list of the alloc set it belongs to. Caller must have verified
274
* that size <= ALLOC_CHUNK_LIMIT.
278
AllocSetFreeIndex(Size size)
284
if (size > (1 << ALLOC_MINBITS))
286
tsize = (size - 1) >> ALLOC_MINBITS;
289
* At this point we need to obtain log2(tsize)+1, ie, the number of
290
* not-all-zero bits at the right. We used to do this with a
291
* shift-and-count loop, but this function is enough of a hotspot to
292
* justify micro-optimization effort. The best approach seems to be
293
* to use a lookup table. Note that this code assumes that
294
* ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
298
idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
300
Assert(idx < ALLOCSET_NUM_FREELISTS);
308
#ifdef RANDOMIZE_ALLOCATED_MEMORY
311
* Fill a just-allocated piece of memory with "random" data. It's not really
312
* very random, just a repeating sequence with a length that's prime. What
313
* we mainly want out of it is to have a good probability that two palloc's
314
* of the same number of bytes start out containing different data.
317
randomize_mem(char *ptr, size_t size)
319
static int save_ctr = 1;
331
#endif /* RANDOMIZE_ALLOCATED_MEMORY */
340
* AllocSetContextCreate
341
* Create a new AllocSet context.
343
* parent: parent context, or NULL if top-level context
344
* name: name of context (for debugging --- string will be copied)
345
* minContextSize: minimum context size
346
* initBlockSize: initial allocation block size
347
* maxBlockSize: maximum allocation block size
350
AllocSetContextCreate(MemoryContext parent,
358
/* Do the type-independent part of context creation */
359
context = (AllocSet) MemoryContextCreate(T_AllocSetContext,
360
sizeof(AllocSetContext),
366
* Make sure alloc parameters are reasonable, and save them.
368
* We somewhat arbitrarily enforce a minimum 1K block size.
370
initBlockSize = MAXALIGN(initBlockSize);
371
if (initBlockSize < 1024)
372
initBlockSize = 1024;
373
maxBlockSize = MAXALIGN(maxBlockSize);
374
if (maxBlockSize < initBlockSize)
375
maxBlockSize = initBlockSize;
376
context->initBlockSize = initBlockSize;
377
context->maxBlockSize = maxBlockSize;
378
context->nextBlockSize = initBlockSize;
381
* Compute the allocation chunk size limit for this context. It can't be
382
* more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
383
* If maxBlockSize is small then requests exceeding the maxBlockSize
384
* should be treated as large chunks, too. We have to have
385
* allocChunkLimit a power of two, because the requested and
386
* actually-allocated sizes of any chunk must be on the same side of the
387
* limit, else we get confused about whether the chunk is "big".
389
context->allocChunkLimit = ALLOC_CHUNK_LIMIT;
390
while (context->allocChunkLimit >
391
(Size) (maxBlockSize - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ))
392
context->allocChunkLimit >>= 1;
395
* Grab always-allocated space, if requested
397
if (minContextSize > ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ)
399
Size blksize = MAXALIGN(minContextSize);
402
block = (AllocBlock) malloc(blksize);
405
MemoryContextStats(TopMemoryContext);
407
(errcode(ERRCODE_OUT_OF_MEMORY),
408
errmsg("out of memory"),
409
errdetail("Failed while creating memory context \"%s\".",
412
block->aset = context;
413
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
414
block->endptr = ((char *) block) + blksize;
415
block->next = context->blocks;
416
context->blocks = block;
417
/* Mark block as not to be released at reset time */
418
context->keeper = block;
421
context->isReset = true;
423
return (MemoryContext) context;
428
* Context-type-specific initialization routine.
430
* This is called by MemoryContextCreate() after setting up the
431
* generic MemoryContext fields and before linking the new context
432
* into the context tree. We must do whatever is needed to make the
433
* new context minimally valid for deletion. We must *not* risk
434
* failure --- thus, for example, allocating more memory is not cool.
435
* (AllocSetContextCreate can allocate memory when it gets control
439
AllocSetInit(MemoryContext context)
442
* Since MemoryContextCreate already zeroed the context node, we don't
443
* have to do anything here: it's already OK.
449
* Frees all memory which is allocated in the given set.
451
* Actually, this routine has some discretion about what to do.
452
* It should mark all allocated chunks freed, but it need not necessarily
453
* give back all the resources the set owns. Our actual implementation is
454
* that we hang onto any "keeper" block specified for the set. In this way,
455
* we don't thrash malloc() when a context is repeatedly reset after small
456
* allocations, which is typical behavior for per-tuple contexts.
459
AllocSetReset(MemoryContext context)
461
AllocSet set = (AllocSet) context;
464
AssertArg(AllocSetIsValid(set));
466
/* Nothing to do if no pallocs since startup or last reset */
470
#ifdef MEMORY_CONTEXT_CHECKING
471
/* Check for corruption and leaks before freeing */
472
AllocSetCheck(context);
475
/* Clear chunk freelists */
476
MemSetAligned(set->freelist, 0, sizeof(set->freelist));
480
/* New blocks list is either empty or just the keeper block */
481
set->blocks = set->keeper;
483
while (block != NULL)
485
AllocBlock next = block->next;
487
if (block == set->keeper)
489
/* Reset the block, but don't return it to malloc */
490
char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
492
#ifdef CLOBBER_FREED_MEMORY
493
/* Wipe freed memory for debugging purposes */
494
memset(datastart, 0x7F, block->freeptr - datastart);
496
block->freeptr = datastart;
501
/* Normal case, release the block */
502
#ifdef CLOBBER_FREED_MEMORY
503
/* Wipe freed memory for debugging purposes */
504
memset(block, 0x7F, block->freeptr - ((char *) block));
511
/* Reset block size allocation sequence, too */
512
set->nextBlockSize = set->initBlockSize;
519
* Frees all memory which is allocated in the given set,
520
* in preparation for deletion of the set.
522
* Unlike AllocSetReset, this *must* free all resources of the set.
523
* But note we are not responsible for deleting the context node itself.
526
AllocSetDelete(MemoryContext context)
528
AllocSet set = (AllocSet) context;
529
AllocBlock block = set->blocks;
531
AssertArg(AllocSetIsValid(set));
533
#ifdef MEMORY_CONTEXT_CHECKING
534
/* Check for corruption and leaks before freeing */
535
AllocSetCheck(context);
538
/* Make it look empty, just in case... */
539
MemSetAligned(set->freelist, 0, sizeof(set->freelist));
543
while (block != NULL)
545
AllocBlock next = block->next;
547
#ifdef CLOBBER_FREED_MEMORY
548
/* Wipe freed memory for debugging purposes */
549
memset(block, 0x7F, block->freeptr - ((char *) block));
558
* Returns pointer to allocated memory of given size; memory is added
562
AllocSetAlloc(MemoryContext context, Size size)
564
AllocSet set = (AllocSet) context;
571
AssertArg(AllocSetIsValid(set));
574
* If requested size exceeds maximum for chunks, allocate an entire block
577
if (size > set->allocChunkLimit)
579
chunk_size = MAXALIGN(size);
580
blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
581
block = (AllocBlock) malloc(blksize);
584
MemoryContextStats(TopMemoryContext);
586
(errcode(ERRCODE_OUT_OF_MEMORY),
587
errmsg("out of memory"),
588
errdetail("Failed on request of size %lu.",
589
(unsigned long) size)));
592
block->freeptr = block->endptr = ((char *) block) + blksize;
594
chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
596
chunk->size = chunk_size;
597
#ifdef MEMORY_CONTEXT_CHECKING
598
chunk->requested_size = size;
599
/* set mark to catch clobber of "unused" space */
600
if (size < chunk_size)
601
((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
603
#ifdef RANDOMIZE_ALLOCATED_MEMORY
604
/* fill the allocated space with junk */
605
randomize_mem((char *) AllocChunkGetPointer(chunk), size);
609
* Stick the new block underneath the active allocation block, so that
610
* we don't lose the use of the space remaining therein.
612
if (set->blocks != NULL)
614
block->next = set->blocks->next;
615
set->blocks->next = block;
623
set->isReset = false;
625
AllocAllocInfo(set, chunk);
626
return AllocChunkGetPointer(chunk);
630
* Request is small enough to be treated as a chunk. Look in the
631
* corresponding free list to see if there is a free chunk we could reuse.
632
* If one is found, remove it from the free list, make it again a member
633
* of the alloc set and return its data address.
635
fidx = AllocSetFreeIndex(size);
636
chunk = set->freelist[fidx];
639
Assert(chunk->size >= size);
641
set->freelist[fidx] = (AllocChunk) chunk->aset;
643
chunk->aset = (void *) set;
645
#ifdef MEMORY_CONTEXT_CHECKING
646
chunk->requested_size = size;
647
/* set mark to catch clobber of "unused" space */
648
if (size < chunk->size)
649
((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
651
#ifdef RANDOMIZE_ALLOCATED_MEMORY
652
/* fill the allocated space with junk */
653
randomize_mem((char *) AllocChunkGetPointer(chunk), size);
656
/* isReset must be false already */
657
Assert(!set->isReset);
659
AllocAllocInfo(set, chunk);
660
return AllocChunkGetPointer(chunk);
664
* Choose the actual chunk size to allocate.
666
chunk_size = (1 << ALLOC_MINBITS) << fidx;
667
Assert(chunk_size >= size);
670
* If there is enough room in the active allocation block, we will put the
671
* chunk into that block. Else must start a new one.
673
if ((block = set->blocks) != NULL)
675
Size availspace = block->endptr - block->freeptr;
677
if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
680
* The existing active (top) block does not have enough room for
681
* the requested allocation, but it might still have a useful
682
* amount of space in it. Once we push it down in the block list,
683
* we'll never try to allocate more space from it. So, before we
684
* do that, carve up its free space into chunks that we can put on
685
* the set's freelists.
687
* Because we can only get here when there's less than
688
* ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
689
* more than ALLOCSET_NUM_FREELISTS-1 times.
691
while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
693
Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
694
int a_fidx = AllocSetFreeIndex(availchunk);
697
* In most cases, we'll get back the index of the next larger
698
* freelist than the one we need to put this chunk on. The
699
* exception is when availchunk is exactly a power of 2.
701
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
705
availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
708
chunk = (AllocChunk) (block->freeptr);
710
block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
711
availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
713
chunk->size = availchunk;
714
#ifdef MEMORY_CONTEXT_CHECKING
715
chunk->requested_size = 0; /* mark it free */
717
chunk->aset = (void *) set->freelist[a_fidx];
718
set->freelist[a_fidx] = chunk;
721
/* Mark that we need to create a new block */
727
* Time to create a new regular (multi-chunk) block?
734
* The first such block has size initBlockSize, and we double the
735
* space in each succeeding block, but not more than maxBlockSize.
737
blksize = set->nextBlockSize;
738
set->nextBlockSize <<= 1;
739
if (set->nextBlockSize > set->maxBlockSize)
740
set->nextBlockSize = set->maxBlockSize;
743
* If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
744
* space... but try to keep it a power of 2.
746
required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
747
while (blksize < required_size)
750
/* Try to allocate it */
751
block = (AllocBlock) malloc(blksize);
754
* We could be asking for pretty big blocks here, so cope if malloc
755
* fails. But give up if there's less than a meg or so available...
757
while (block == NULL && blksize > 1024 * 1024)
760
if (blksize < required_size)
762
block = (AllocBlock) malloc(blksize);
767
MemoryContextStats(TopMemoryContext);
769
(errcode(ERRCODE_OUT_OF_MEMORY),
770
errmsg("out of memory"),
771
errdetail("Failed on request of size %lu.",
772
(unsigned long) size)));
776
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
777
block->endptr = ((char *) block) + blksize;
780
* If this is the first block of the set, make it the "keeper" block.
781
* Formerly, a keeper block could only be created during context
782
* creation, but allowing it to happen here lets us have fast reset
783
* cycling even for contexts created with minContextSize = 0; that way
784
* we don't have to force space to be allocated in contexts that might
785
* never need any space. Don't mark an oversize block as a keeper,
788
if (set->keeper == NULL && blksize == set->initBlockSize)
791
block->next = set->blocks;
796
* OK, do the allocation
798
chunk = (AllocChunk) (block->freeptr);
800
block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
801
Assert(block->freeptr <= block->endptr);
803
chunk->aset = (void *) set;
804
chunk->size = chunk_size;
805
#ifdef MEMORY_CONTEXT_CHECKING
806
chunk->requested_size = size;
807
/* set mark to catch clobber of "unused" space */
808
if (size < chunk->size)
809
((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
811
#ifdef RANDOMIZE_ALLOCATED_MEMORY
812
/* fill the allocated space with junk */
813
randomize_mem((char *) AllocChunkGetPointer(chunk), size);
816
set->isReset = false;
818
AllocAllocInfo(set, chunk);
819
return AllocChunkGetPointer(chunk);
824
* Frees allocated memory; memory is removed from the set.
827
AllocSetFree(MemoryContext context, void *pointer)
829
AllocSet set = (AllocSet) context;
830
AllocChunk chunk = AllocPointerGetChunk(pointer);
832
AllocFreeInfo(set, chunk);
834
#ifdef MEMORY_CONTEXT_CHECKING
835
/* Test for someone scribbling on unused space in chunk */
836
if (chunk->requested_size < chunk->size)
837
if (((char *) pointer)[chunk->requested_size] != 0x7E)
838
elog(WARNING, "detected write past chunk end in %s %p",
839
set->header.name, chunk);
842
if (chunk->size > set->allocChunkLimit)
845
* Big chunks are certain to have been allocated as single-chunk
846
* blocks. Find the containing block and return it to malloc().
848
AllocBlock block = set->blocks;
849
AllocBlock prevblock = NULL;
851
while (block != NULL)
853
if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
859
elog(ERROR, "could not find block containing chunk %p", chunk);
860
/* let's just make sure chunk is the only one in the block */
861
Assert(block->freeptr == ((char *) block) +
862
(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ));
864
/* OK, remove block from aset's list and free it */
865
if (prevblock == NULL)
866
set->blocks = block->next;
868
prevblock->next = block->next;
869
#ifdef CLOBBER_FREED_MEMORY
870
/* Wipe freed memory for debugging purposes */
871
memset(block, 0x7F, block->freeptr - ((char *) block));
877
/* Normal case, put the chunk into appropriate freelist */
878
int fidx = AllocSetFreeIndex(chunk->size);
880
chunk->aset = (void *) set->freelist[fidx];
882
#ifdef CLOBBER_FREED_MEMORY
883
/* Wipe freed memory for debugging purposes */
884
memset(pointer, 0x7F, chunk->size);
887
#ifdef MEMORY_CONTEXT_CHECKING
888
/* Reset requested_size to 0 in chunks that are on freelist */
889
chunk->requested_size = 0;
891
set->freelist[fidx] = chunk;
897
* Returns new pointer to allocated memory of given size; this memory
898
* is added to the set. Memory associated with given pointer is copied
899
* into the new memory, and the old memory is freed.
902
AllocSetRealloc(MemoryContext context, void *pointer, Size size)
904
AllocSet set = (AllocSet) context;
905
AllocChunk chunk = AllocPointerGetChunk(pointer);
906
Size oldsize = chunk->size;
908
#ifdef MEMORY_CONTEXT_CHECKING
909
/* Test for someone scribbling on unused space in chunk */
910
if (chunk->requested_size < oldsize)
911
if (((char *) pointer)[chunk->requested_size] != 0x7E)
912
elog(WARNING, "detected write past chunk end in %s %p",
913
set->header.name, chunk);
916
/* isReset must be false already */
917
Assert(!set->isReset);
920
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
921
* allocated area already is >= the new size. (In particular, we always
922
* fall out here if the requested size is a decrease.)
926
#ifdef MEMORY_CONTEXT_CHECKING
927
#ifdef RANDOMIZE_ALLOCATED_MEMORY
928
/* We can only fill the extra space if we know the prior request */
929
if (size > chunk->requested_size)
930
randomize_mem((char *) AllocChunkGetPointer(chunk) + chunk->requested_size,
931
size - chunk->requested_size);
934
chunk->requested_size = size;
935
/* set mark to catch clobber of "unused" space */
937
((char *) pointer)[size] = 0x7E;
942
if (oldsize > set->allocChunkLimit)
945
* The chunk must have been allocated as a single-chunk block. Find
946
* the containing block and use realloc() to make it bigger with
947
* minimum space wastage.
949
AllocBlock block = set->blocks;
950
AllocBlock prevblock = NULL;
954
while (block != NULL)
956
if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
962
elog(ERROR, "could not find block containing chunk %p", chunk);
963
/* let's just make sure chunk is the only one in the block */
964
Assert(block->freeptr == ((char *) block) +
965
(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ));
968
chksize = MAXALIGN(size);
969
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
970
block = (AllocBlock) realloc(block, blksize);
973
MemoryContextStats(TopMemoryContext);
975
(errcode(ERRCODE_OUT_OF_MEMORY),
976
errmsg("out of memory"),
977
errdetail("Failed on request of size %lu.",
978
(unsigned long) size)));
980
block->freeptr = block->endptr = ((char *) block) + blksize;
982
/* Update pointers since block has likely been moved */
983
chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
984
if (prevblock == NULL)
987
prevblock->next = block;
988
chunk->size = chksize;
990
#ifdef MEMORY_CONTEXT_CHECKING
991
#ifdef RANDOMIZE_ALLOCATED_MEMORY
992
/* We can only fill the extra space if we know the prior request */
993
randomize_mem((char *) AllocChunkGetPointer(chunk) + chunk->requested_size,
994
size - chunk->requested_size);
997
chunk->requested_size = size;
998
/* set mark to catch clobber of "unused" space */
999
if (size < chunk->size)
1000
((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
1003
return AllocChunkGetPointer(chunk);
1008
* Small-chunk case. We just do this by brute force, ie, allocate a
1009
* new chunk and copy the data. Since we know the existing data isn't
1010
* huge, this won't involve any great memcpy expense, so it's not
1011
* worth being smarter. (At one time we tried to avoid memcpy when it
1012
* was possible to enlarge the chunk in-place, but that turns out to
1013
* misbehave unpleasantly for repeated cycles of
1014
* palloc/repalloc/pfree: the eventually freed chunks go into the
1015
* wrong freelist for the next initial palloc request, and so we leak
1016
* memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1018
AllocPointer newPointer;
1020
/* allocate new chunk */
1021
newPointer = AllocSetAlloc((MemoryContext) set, size);
1023
/* transfer existing data (certain to fit) */
1024
memcpy(newPointer, pointer, oldsize);
1026
/* free old chunk */
1027
AllocSetFree((MemoryContext) set, pointer);
1034
* AllocSetGetChunkSpace
1035
* Given a currently-allocated chunk, determine the total space
1036
* it occupies (including all memory-allocation overhead).
1039
AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1041
AllocChunk chunk = AllocPointerGetChunk(pointer);
1043
return chunk->size + ALLOC_CHUNKHDRSZ;
1048
* Is an allocset empty of any allocated space?
1051
AllocSetIsEmpty(MemoryContext context)
1053
AllocSet set = (AllocSet) context;
1056
* For now, we say "empty" only if the context is new or just reset. We
1057
* could examine the freelists to determine if all space has been freed,
1058
* but it's not really worth the trouble for present uses of this
1068
* Displays stats about memory consumption of an allocset.
1071
AllocSetStats(MemoryContext context, int level)
1073
AllocSet set = (AllocSet) context;
1076
long totalspace = 0;
1083
for (block = set->blocks; block != NULL; block = block->next)
1086
totalspace += block->endptr - ((char *) block);
1087
freespace += block->endptr - block->freeptr;
1089
for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1091
for (chunk = set->freelist[fidx]; chunk != NULL;
1092
chunk = (AllocChunk) chunk->aset)
1095
freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1099
for (i = 0; i < level; i++)
1100
fprintf(stderr, " ");
1103
"%s: %lu total in %ld blocks; %lu free (%ld chunks); %lu used\n",
1104
set->header.name, totalspace, nblocks, freespace, nchunks,
1105
totalspace - freespace);
1109
#ifdef MEMORY_CONTEXT_CHECKING
1113
* Walk through chunks and check consistency of memory.
1115
* NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1116
* find yourself in an infinite loop when trouble occurs, because this
1117
* routine will be entered again when elog cleanup tries to release memory!
1120
AllocSetCheck(MemoryContext context)
1122
AllocSet set = (AllocSet) context;
1123
char *name = set->header.name;
1126
for (block = set->blocks; block != NULL; block = block->next)
1128
char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1129
long blk_used = block->freeptr - bpoz;
1134
* Empty block - empty can be keeper-block only
1138
if (set->keeper != block)
1139
elog(WARNING, "problem in alloc set %s: empty block %p",
1146
while (bpoz < block->freeptr)
1148
AllocChunk chunk = (AllocChunk) bpoz;
1153
chsize = chunk->size; /* aligned chunk size */
1154
dsize = chunk->requested_size; /* real data */
1155
chdata_end = ((char *) chunk) + (ALLOC_CHUNKHDRSZ + dsize);
1161
elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1162
name, chunk, block);
1163
if (chsize < (1 << ALLOC_MINBITS))
1164
elog(WARNING, "problem in alloc set %s: bad size %lu for chunk %p in block %p",
1165
name, (unsigned long) chsize, chunk, block);
1167
/* single-chunk block? */
1168
if (chsize > set->allocChunkLimit &&
1169
chsize + ALLOC_CHUNKHDRSZ != blk_used)
1170
elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1171
name, chunk, block);
1174
* If chunk is allocated, check for correct aset pointer. (If it's
1175
* free, the aset is the freelist pointer, which we can't check as
1178
if (dsize > 0 && chunk->aset != (void *) set)
1179
elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1180
name, block, chunk);
1183
* Check for overwrite of "unallocated" space in chunk
1185
if (dsize > 0 && dsize < chsize && *chdata_end != 0x7E)
1186
elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1187
name, block, chunk);
1192
bpoz += ALLOC_CHUNKHDRSZ + chsize;
1195
if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1196
elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1201
#endif /* MEMORY_CONTEXT_CHECKING */