2
* PROGRAM: Client/Server Common Code
4
* DESCRIPTION: Memory Pool Manager (based on B+ tree)
6
* The contents of this file are subject to the Initial
7
* Developer's Public License Version 1.0 (the "License");
8
* you may not use this file except in compliance with the
9
* License. You may obtain a copy of the License at
10
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
12
* Software distributed under the License is distributed AS IS,
13
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
14
* See the License for the specific language governing rights
15
* and limitations under the License.
17
* The Original Code was created by Nickolay Samofatov
18
* for the Firebird Open Source RDBMS project.
20
* STL allocator is based on one by Mike Nordell and John Bellardo
22
* Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
23
* and all contributors signed below.
25
* All Rights Reserved.
29
* Alex Peshkoff <peshkoff@mail.ru>
30
* added PermanentStorage and AutoStorage classes.
35
#ifndef CLASSES_ALLOC_H
36
#define CLASSES_ALLOC_H
42
#include "../jrd/common.h"
43
#include "../common/classes/fb_atomic.h"
44
#include "../common/classes/tree.h"
45
#include "../common/classes/locks.h"
47
#include <stdlib.h> /* XPG: prototypes for malloc/free have to be in
52
#define THROW_BAD_ALLOC
54
#define THROW_BAD_ALLOC throw (Firebird::BadAlloc)
59
// Size of Valgrind red zone applied before and after memory block allocated for user
60
#define VALGRIND_REDZONE 8
62
// When memory block is deallocated by user from the pool it must pass queue of this
63
// length before it is actually deallocated and access protection from it removed.
64
#define DELAYED_FREE_COUNT 1024
66
// When memory extent is deallocated when pool is destroying it must pass through
67
// queue of this length before it is actually returned to system
68
#define DELAYED_EXTENT_COUNT 32
74
// Maximum number of B+ tree pages kept spare for tree allocation
75
// Since we store only unique fragment lengths in our tree there
76
// shouldn't be more than 16K elements in it. This is why MAX_TREE_DEPTH
77
// equal to 4 is more than enough
78
const int MAX_TREE_DEPTH = 4;
80
// Alignment for all memory blocks. Sizes of memory blocks in headers are measured in this units
81
const size_t ALLOC_ALIGNMENT = ALIGNMENT;
83
static inline size_t MEM_ALIGN(size_t value) {
84
return FB_ALIGN(value, ALLOC_ALIGNMENT);
87
// Flags for memory block
88
const USHORT MBK_LARGE = 1; // Block is large, allocated from OS directly
89
const USHORT MBK_PARENT = 2; // Block is allocated from parent pool
90
const USHORT MBK_USED = 4; // Block is used
91
const USHORT MBK_LAST = 8; // Block is last in the extent
92
const USHORT MBK_DELAYED = 16; // Block is pending in the delayed free queue
94
struct FreeMemoryBlock {
95
FreeMemoryBlock* fbk_next_fragment;
99
// Has size of 12 bytes for 32-bit targets and 16 bytes on 64-bit ones
105
// Length and offset are measured in bytes thus memory extent size is limited to 64k
106
// Larger extents are not needed now, but this may be icreased later via using allocation units
107
USHORT mbk_length; // Actual block size: header not included, redirection list is included if applicable
108
USHORT mbk_prev_length;
111
ULONG mbk_large_length;
113
#ifdef DEBUG_GDS_ALLOC
114
const char* mbk_file;
118
class MemoryPool* mbk_pool;
119
FreeMemoryBlock* mbk_prev_fragment;
121
#if defined(USE_VALGRIND) && (VALGRIND_REDZONE != 0)
122
const char mbk_valgrind_redzone[VALGRIND_REDZONE];
127
// This structure is appended to the end of block redirected to parent pool or operating system
128
// It is a doubly-linked list which we are going to use when our pool is going to be deleted
129
struct MemoryRedirectList {
130
MemoryBlock* mrl_prev;
131
MemoryBlock* mrl_next;
134
const SSHORT TYPE_POOL = -1;
135
const SSHORT TYPE_EXTENT = -2;
136
const SSHORT TYPE_LEAFPAGE = -3;
137
const SSHORT TYPE_TREEPAGE = -4;
139
// We store BlkInfo structures instead of BlkHeader pointers to get benefits from
140
// processor cache-hit optimizations
143
FreeMemoryBlock* bli_fragments;
144
inline static const size_t& generate(const void* sender, const BlockInfo& i) {
149
struct MemoryExtent {
150
MemoryExtent *mxt_next;
151
MemoryExtent *mxt_prev;
154
struct PendingFreeBlock {
155
PendingFreeBlock *next;
160
MemoryStats() : mst_usage(0), mst_mapped(0), mst_max_usage(0), mst_max_mapped(0) {}
162
size_t get_current_usage() const { return mst_usage.value(); }
163
size_t get_maximum_usage() const { return mst_max_usage; }
164
size_t get_current_mapping() const { return mst_mapped.value(); }
165
size_t get_maximum_mapping() const { return mst_max_mapped; }
167
// Forbid copy constructor
168
MemoryStats(const MemoryStats& object) {}
170
// Currently allocated memory (without allocator overhead)
171
// Useful for monitoring engine memory leaks
172
AtomicCounter mst_usage;
173
// Amount of memory mapped (including all overheads)
174
// Useful for monitoring OS memory consumption
175
AtomicCounter mst_mapped;
177
// We don't particularily care about extreme precision of these max values,
178
// this is why we don't synchronize them on Windows
179
size_t mst_max_usage;
180
size_t mst_max_mapped;
182
friend class MemoryPool;
186
// Memory pool based on B+ tree of free memory blocks
188
// We are going to have two target architectures:
189
// 1. Multi-process server with customizable lock manager
190
// 2. Multi-threaded server with single process (SUPERSERVER)
192
// MemoryPool inheritance looks weird because we cannot use
193
// any pointers to functions in shared memory. VMT usage in
194
// MemoryPool and its descendants is prohibited
197
class InternalAllocator {
199
void* allocate(size_t size) {
200
return ((MemoryPool*)this)->tree_alloc(size);
202
void deallocate(void* block) {
203
((MemoryPool*)this)->tree_free(block);
206
typedef BePlusTree<BlockInfo, size_t, InternalAllocator, BlockInfo> FreeBlocksTree;
208
// We keep most of our structures uninitialized as long we redirect
209
// our allocations to parent pool
210
bool parent_redirect;
212
// B+ tree ordered by length
213
FreeBlocksTree freeBlocks;
215
MemoryExtent *extents; // Linked list of all memory extents
217
Vector<void*, 2> spareLeafs;
218
Vector<void*, MAX_TREE_DEPTH + 1> spareNodes;
220
PendingFreeBlock *pendingFree;
222
// Synchronization of this object is a little bit tricky. Allocations
223
// redirected to parent pool are not protected with our mutex and not
224
// accounted locally, i.e. redirect_amount and parent_redirected linked list
225
// are synchronized with parent pool mutex only. All other pool members are
226
// synchronized with this mutex.
229
// Current usage counters for pool. Used to move pool to different statistics group
230
AtomicCounter used_memory;
232
size_t mapped_memory;
234
MemoryPool *parent; // Parent pool. Used to redirect small allocations there
235
MemoryBlock *parent_redirected, *os_redirected;
236
size_t redirect_amount; // Amount of memory redirected to parent
237
// It is protected by parent pool mutex along with redirect list
238
// Statistics group for the pool
242
// Circular FIFO buffer of read/write protected blocks pending free operation
243
void* delayedFree[DELAYED_FREE_COUNT];
244
int delayedFreeHandles[DELAYED_FREE_COUNT];
245
size_t delayedFreeCount;
246
size_t delayedFreePos;
249
/* Returns NULL in case it cannot allocate requested chunk */
250
static void* external_alloc(size_t &size);
252
static void external_free(void* blk, size_t &size, bool pool_destroying);
254
void* tree_alloc(size_t size);
256
void tree_free(void* block);
260
inline void addFreeBlock(MemoryBlock* blk);
262
void removeFreeBlock(MemoryBlock* blk);
264
void free_blk_extent(MemoryBlock* blk);
266
// Allocates small block from this pool. Pool must be locked during call
267
void* internal_alloc(size_t size, SSHORT type = 0
268
#ifdef DEBUG_GDS_ALLOC
269
, const char* file = NULL, int line = 0
273
// Deallocates small block from this pool. Pool must be locked during this call
274
void internal_deallocate(void* block);
276
// Forbid copy constructor, should never be called
277
MemoryPool(const MemoryPool& pool) : freeBlocks((InternalAllocator*)this) { }
279
// Used by pools to track memory usage.
281
// These 2 methods are thread-safe due to usage of atomic counters only
282
inline void increment_usage(size_t size);
283
inline void decrement_usage(size_t size);
285
inline void increment_mapping(size_t size);
286
inline void decrement_mapping(size_t size);
289
// Do not allow to create and destroy pool directly from outside
290
MemoryPool(MemoryPool* _parent, MemoryStats &_stats, void* first_extent, void* root_page);
292
// This should never be called
296
// Used to create MemoryPool descendants
297
static MemoryPool* internal_create(size_t instance_size,
298
MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group);
301
// Default statistics group for process
302
static MemoryStats* default_stats_group;
304
// Pool created for process
305
static MemoryPool* processMemoryPool;
307
// Create memory pool instance
308
static MemoryPool* createPool(MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group) {
309
return internal_create(sizeof(MemoryPool), parent, stats);
312
// Set context pool for current thread of execution
313
static MemoryPool* setContextPool(MemoryPool* newPool);
315
// Get context pool for current thread of execution
316
static MemoryPool* getContextPool();
318
// Set statistics group for pool. Usage counters will be decremented from
319
// previously set group and added to new
320
void setStatsGroup(MemoryStats& stats);
322
// Deallocate pool and all its contents
323
static void deletePool(MemoryPool* pool);
325
// Allocate memory block. Result is not zero-initialized.
326
// It case of problems this method throws Firebird::BadAlloc
327
void* allocate(size_t size, SSHORT type = 0
328
#ifdef DEBUG_GDS_ALLOC
329
, const char* file = NULL, int line = 0
333
// Allocate memory block. In case of problems this method returns NULL
334
void* allocate_nothrow(size_t size, SSHORT type = 0
335
#ifdef DEBUG_GDS_ALLOC
336
, const char* file = NULL, int line = 0
340
void deallocate(void* block);
342
// Check pool for internal consistent. When enabled, call is very expensive
343
bool verify_pool(bool fast_checks_only = false);
345
// Print out pool contents. This is debugging routine
346
void print_contents(FILE*, bool = false, const char* filter_path = 0);
348
// The same routine, but more easily callable from the debugger
349
void print_contents(const char* filename, bool = false,
350
const char* filter_path = 0);
352
// Deallocate memory block. Pool is derived from block header
353
static void globalFree(void* block) {
355
((MemoryBlock*)((char*)block - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_pool->deallocate(block);
358
// Allocate zero-initialized block of memory
359
void* calloc(size_t size, SSHORT type = 0
360
#ifdef DEBUG_GDS_ALLOC
361
, const char* file = NULL, int line = 0
364
void* result = allocate(size, type
365
#ifdef DEBUG_GDS_ALLOC
369
memset(result, 0, size);
373
/// Returns the type associated with the allocated memory.
374
static SSHORT blk_type(const void* mem) {
375
return ((MemoryBlock*)((char *)mem - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_type;
378
/// Returns the pool the memory was allocated from.
379
//static MemoryPool* blk_pool(const void* mem) {
380
// return ((MemoryBlock*)((char *)mem - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_pool;
383
friend class InternalAllocator;
386
// Class intended to manage execution context pool stack
387
// Declare instance of this class when you need to set new context pool and it
388
// will be restored automatically as soon holder variable gets out of scope
389
class ContextPoolHolder {
391
ContextPoolHolder(MemoryPool* newPool) {
392
savedPool = MemoryPool::setContextPool(newPool);
394
~ContextPoolHolder() {
395
MemoryPool::setContextPool(savedPool);
398
MemoryPool* savedPool;
401
// template enabling common use of old and new pools control code
402
// to be dropped when old-style code goes away
403
template <typename SubsystemThreadData, typename SubsystemPool>
404
class SubsystemContextPoolHolder
405
: public ContextPoolHolder
408
SubsystemContextPoolHolder <SubsystemThreadData, SubsystemPool>
410
SubsystemThreadData* subThreadData,
411
SubsystemPool* newPool
413
: ContextPoolHolder(newPool),
414
savedThreadData(subThreadData),
415
savedPool(savedThreadData->getDefaultPool())
417
savedThreadData->setDefaultPool(newPool);
419
~SubsystemContextPoolHolder() {
420
savedThreadData->setDefaultPool(savedPool);
423
SubsystemThreadData* savedThreadData;
424
SubsystemPool* savedPool;
427
} // namespace Firebird
429
using Firebird::MemoryPool;
431
inline static MemoryPool* getDefaultMemoryPool() { return Firebird::MemoryPool::processMemoryPool; }
433
// Global versions of operators new and delete
434
inline void* operator new(size_t s) THROW_BAD_ALLOC
436
return getDefaultMemoryPool()->allocate(s, 0
437
#ifdef DEBUG_GDS_ALLOC
442
inline void* operator new[](size_t s) THROW_BAD_ALLOC
444
return getDefaultMemoryPool()->allocate(s, 0
445
#ifdef DEBUG_GDS_ALLOC
451
inline void* operator new(size_t, void* ptr) throw()
455
inline void* operator new[](size_t, void* ptr) throw()
460
inline void operator delete(void* mem) throw()
462
Firebird::MemoryPool::globalFree(mem);
464
inline void operator delete[](void* mem) throw()
466
Firebird::MemoryPool::globalFree(mem);
469
#ifdef DEBUG_GDS_ALLOC
470
inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
471
return pool.allocate(s, 0, file, line);
473
inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
474
return pool.allocate(s, 0, file, line);
476
#define FB_NEW(pool) new(pool, __FILE__, __LINE__)
477
#define FB_NEW_RPT(pool, count) new(pool, count, __FILE__, __LINE__)
479
inline void* operator new(size_t s, Firebird::MemoryPool& pool) {
480
return pool.allocate(s);
482
inline void* operator new[](size_t s, Firebird::MemoryPool& pool) {
483
return pool.allocate(s);
485
#define FB_NEW(pool) new(pool)
486
#define FB_NEW_RPT(pool, count) new(pool, count)
492
// Permanent storage is used as base class for all objects,
493
// performing memory allocation in methods other than
494
// constructors of this objects. Permanent means that pool,
495
// which will be later used for such allocations, must
496
// be explicitly passed in all constructors of such object.
497
class PermanentStorage {
501
explicit PermanentStorage(MemoryPool& p) : pool(p) { }
502
MemoryPool& getPool() const { return pool; }
505
// Automatic storage is used as base class for objects,
506
// that may have constructors without explicit MemoryPool
507
// parameter. In this case AutoStorage sends AutoMemoryPool
508
// to PermanentStorage. To ensure this operation to be safe
509
// such trick possible only for local (on stack) variables.
510
class AutoStorage : public PermanentStorage {
512
#if defined(DEV_BUILD)
513
void ProbeStack() const;
516
static MemoryPool& getAutoMemoryPool();
518
AutoStorage() : PermanentStorage(getAutoMemoryPool()) {
519
#if defined(DEV_BUILD)
523
explicit AutoStorage(MemoryPool& p) : PermanentStorage(p) { }
526
} // namespace Firebird
529
#endif // CLASSES_ALLOC_H