2
//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
5
//Redistribution and use in source and binary forms, with or without
6
//modification, are permitted provided that the following conditions
9
// Redistributions of source code must retain the above copyright
10
// notice, this list of conditions and the following disclaimer.
12
// Redistributions in binary form must reproduce the above
13
// copyright notice, this list of conditions and the following
14
// disclaimer in the documentation and/or other materials provided
15
// with the distribution.
17
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
18
// contributors may be used to endorse or promote products derived
19
// from this software without specific prior written permission.
21
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32
//POSSIBILITY OF SUCH DAMAGE.
35
#include "../Include/PoolAlloc.h"
36
#include "../Include/Common.h"
38
#include "Include/InitializeGlobals.h"
39
#include "osinclude.h"
41
OS_TLSIndex PoolIndex;
43
void InitializeGlobalPools()
45
TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
49
TPoolAllocator *globalPoolAllocator = new TPoolAllocator(true);
51
TThreadGlobalPools* threadData = new TThreadGlobalPools();
53
threadData->globalPoolAllocator = globalPoolAllocator;
55
OS_SetTLSValue(PoolIndex, threadData);
56
globalPoolAllocator->push();
59
void FreeGlobalPools()
61
// Release the allocated memory for this thread.
62
TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
66
GlobalPoolAllocator.popAll();
67
delete &GlobalPoolAllocator;
71
bool InitializePoolIndex()
73
// Allocate a TLS index.
74
if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
82
// Release the TLS index.
83
OS_FreeTLSIndex(PoolIndex);
86
TPoolAllocator& GetGlobalPoolAllocator()
88
TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
90
return *threadData->globalPoolAllocator;
93
void SetGlobalPoolAllocatorPtr(TPoolAllocator* poolAllocator)
95
TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
97
threadData->globalPoolAllocator = poolAllocator;
101
// Implement the functionality of the TPoolAllocator class, which
102
// is documented in PoolAlloc.h.
104
TPoolAllocator::TPoolAllocator(bool g, int growthIncrement, int allocationAlignment) :
106
pageSize(growthIncrement),
107
alignment(allocationAlignment),
113
// Don't allow page sizes we know are smaller than all common
116
if (pageSize < 4*1024)
120
// A large currentPageOffset indicates a new page needs to
121
// be obtained to allocate memory.
123
currentPageOffset = pageSize;
126
// Adjust alignment to be at least pointer aligned and
129
size_t minAlign = sizeof(void*);
130
alignment &= ~(minAlign - 1);
131
if (alignment < minAlign)
132
alignment = minAlign;
134
while (a < alignment)
137
alignmentMask = a - 1;
142
headerSkip = minAlign;
143
if (headerSkip < sizeof(tHeader)) {
144
headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
148
// Put a marker at the beginning of the stack. We won't
151
tAllocState start = { currentPageOffset, 0 };
152
stack.push_back(start);
155
TPoolAllocator::~TPoolAllocator()
159
// Then we know that this object is not being
160
// allocated after other, globally scoped objects
161
// that depend on it. So we can delete the "in use" memory.
164
tHeader* next = inUseList->nextPage;
165
inUseList->~tHeader();
166
delete [] reinterpret_cast<char*>(inUseList);
172
// Always delete the free list memory - it can't be being
173
// (correctly) referenced, whether the pool allocator was
174
// global or not. We should not check the guard blocks
175
// here, because we did it already when the block was
176
// placed into the free list.
179
tHeader* next = freeList->nextPage;
180
delete [] reinterpret_cast<char*>(freeList);
185
// Support MSVC++ 6.0
186
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
187
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
188
const unsigned char TAllocation::userDataFill = 0xcd;
191
const size_t TAllocation::guardBlockSize = 16;
193
const size_t TAllocation::guardBlockSize = 0;
197
// Check a single guard block for damage
199
void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, char* locText) const
201
for (int x = 0; x < guardBlockSize; x++) {
202
if (blockMem[x] != val) {
205
// We don't print the assert message. It's here just to be helpful.
206
sprintf(assertMsg, "PoolAlloc: Damage %s %u byte allocation at 0x%p\n",
207
locText, size, data());
208
assert(0 && "PoolAlloc: Damage in guard block");
214
void TPoolAllocator::push()
216
tAllocState state = { currentPageOffset, inUseList };
218
stack.push_back(state);
221
// Indicate there is no current page to allocate from.
223
currentPageOffset = pageSize;
227
// Do a mass-deallocation of all the individual allocations
228
// that have occurred since the last push(), or since the
229
// last pop(), or since the object's creation.
231
// The deallocated pages are saved for future allocations.
233
void TPoolAllocator::pop()
235
if (stack.size() < 1)
238
tHeader* page = stack.back().page;
239
currentPageOffset = stack.back().offset;
241
while (inUseList != page) {
242
// invoke destructor to free allocation list
243
inUseList->~tHeader();
245
tHeader* nextInUse = inUseList->nextPage;
246
if (inUseList->pageCount > 1)
247
delete [] reinterpret_cast<char*>(inUseList);
249
inUseList->nextPage = freeList;
250
freeList = inUseList;
252
inUseList = nextInUse;
259
// Do a mass-deallocation of all the individual allocations
260
// that have occurred.
262
void TPoolAllocator::popAll()
264
while (stack.size() > 0)
268
void* TPoolAllocator::allocate(size_t numBytes)
270
// If we are using guard blocks, all allocations are bracketed by
271
// them: [guardblock][allocation][guardblock]. numBytes is how
272
// much memory the caller asked for. allocationSize is the total
273
// size including guard blocks. In release build,
274
// guardBlockSize=0 and this all gets optimized away.
275
size_t allocationSize = TAllocation::allocationSize(numBytes);
278
// Just keep some interesting statistics.
281
totalBytes += numBytes;
284
// Do the allocation, most likely case first, for efficiency.
285
// This step could be moved to be inline sometime.
287
if (currentPageOffset + allocationSize <= pageSize) {
289
// Safe to allocate from currentPageOffset.
291
unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
292
currentPageOffset += allocationSize;
293
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
295
return initializeAllocation(inUseList, memory, numBytes);
298
if (allocationSize + headerSkip > pageSize) {
300
// Do a multi-page allocation. Don't mix these with the others.
301
// The OS is efficient and allocating and free-ing multiple pages.
303
size_t numBytesToAlloc = allocationSize + headerSkip;
304
tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
308
// Use placement-new to initialize header
309
new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
312
currentPageOffset = pageSize; // make next allocation come from a new page
314
// No guard blocks for multi-page allocations (yet)
315
return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
319
// Need a simple page to allocate from.
324
freeList = freeList->nextPage;
326
memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
331
// Use placement-new to initialize header
332
new(memory) tHeader(inUseList, 1);
335
unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
336
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
338
return initializeAllocation(inUseList, ret, numBytes);
343
// Check all allocations in a list for damage by calling check on each.
345
void TAllocation::checkAllocList() const
347
for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)