2
* Two Levels Segregate Fit memory allocator (TLSF)
5
* Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
7
* Thanks to Ismael Ripoll for his suggestions and reviews
9
* Copyright (C) 2007, 2006, 2005, 2004
11
* This code is released using a dual license strategy: GPL/LGPL
12
* You can choose the licence that better fits your requirements.
14
* Released under the terms of the GNU General Public License Version 2.0
15
* Released under the terms of the GNU Lesser General Public License
18
* This is kernel port of TLSF allocator.
19
* Original code can be found at: http://rtportal.upv.es/rtmalloc/
20
* Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
21
* (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
22
* /allocators/tlsf-kmod r229 dated Aug 27, 2008
23
* Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
26
#include <xen/config.h>
31
#define MAX_POOL_NAME_LEN 16
33
/* Some IMPORTANT TLSF parameters */
34
#define MEM_ALIGN (sizeof(void *) * 2)
35
#define MEM_ALIGN_MASK (~(MEM_ALIGN - 1))
38
#define MAX_LOG2_SLI (5)
39
#define MAX_SLI (1 << MAX_LOG2_SLI)
41
#define FLI_OFFSET (6)
42
/* tlsf structure just will manage blocks bigger than 128 bytes */
43
#define SMALL_BLOCK (128)
44
#define REAL_FLI (MAX_FLI - FLI_OFFSET)
45
#define MIN_BLOCK_SIZE (sizeof(struct free_ptr))
46
#define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
48
#define PTR_MASK (sizeof(void *) - 1)
49
#define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
51
#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
52
((char *)(addr) + (r)))
53
#define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
54
#define ROUNDDOWN_SIZE(r) ((r) & MEM_ALIGN_MASK)
55
#define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK)
57
#define BLOCK_STATE (0x1)
58
#define PREV_STATE (0x2)
60
/* bit 0 of the block size */
61
#define FREE_BLOCK (0x1)
62
#define USED_BLOCK (0x0)
64
/* bit 1 of the block size */
65
#define PREV_FREE (0x2)
66
#define PREV_USED (0x0)
68
static spinlock_t pool_list_lock;
69
static struct list_head pool_list_head;
77
/* All blocks in a region are linked in order of physical address */
78
struct bhdr *prev_hdr;
80
* The size is stored in bytes
81
* bit 0: block is free, if set
82
* bit 1: previous block is free, if set
85
/* Free blocks in individual freelists are linked */
87
struct free_ptr free_ptr;
88
u8 buffer[sizeof(struct free_ptr)];
93
/* First level bitmap (REAL_FLI bits) */
96
/* Second level bitmap */
97
u32 sl_bitmap[REAL_FLI];
100
struct bhdr *matrix[REAL_FLI][MAX_SLI];
104
unsigned long init_size;
105
unsigned long max_size;
106
unsigned long grow_size;
109
unsigned long used_size;
110
unsigned long num_regions;
112
/* User provided functions for expanding/shrinking pool */
113
xmem_pool_get_memory *get_mem;
114
xmem_pool_put_memory *put_mem;
116
struct list_head list;
119
char name[MAX_POOL_NAME_LEN];
127
* Returns indexes (fl, sl) of the list used to serve request of size r
129
static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
133
if ( *r < SMALL_BLOCK )
136
*sl = *r / (SMALL_BLOCK / MAX_SLI);
140
t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1;
143
*sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
145
/*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
153
* Returns indexes (fl, sl) which is used as starting point to search
154
* for a block of size r. It also rounds up requested size(r) to the
157
static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
159
if ( r < SMALL_BLOCK )
162
*sl = r / (SMALL_BLOCK / MAX_SLI);
167
*sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
173
* Returns first block from a list that hold blocks larger than or
174
* equal to the one pointed by the indexes (fl, sl)
176
static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
179
u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
180
struct bhdr *b = NULL;
185
b = p->matrix[*fl][*sl];
189
*fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1;
190
if ( likely(*fl > 0) )
192
*sl = ffs(p->sl_bitmap[*fl]) - 1;
193
b = p->matrix[*fl][*sl];
201
* Remove first free block(b) from free list with indexes (fl, sl).
203
static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
206
p->matrix[fl][sl] = b->ptr.free_ptr.next;
207
if ( p->matrix[fl][sl] )
209
p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
213
clear_bit(sl, &p->sl_bitmap[fl]);
214
if ( !p->sl_bitmap[fl] )
215
clear_bit(fl, &p->fl_bitmap);
217
b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
221
* Removes block(b) from free list with indexes (fl, sl)
223
static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
226
if ( b->ptr.free_ptr.next )
227
b->ptr.free_ptr.next->ptr.free_ptr.prev =
228
b->ptr.free_ptr.prev;
229
if ( b->ptr.free_ptr.prev )
230
b->ptr.free_ptr.prev->ptr.free_ptr.next =
231
b->ptr.free_ptr.next;
232
if ( p->matrix[fl][sl] == b )
234
p->matrix[fl][sl] = b->ptr.free_ptr.next;
235
if ( !p->matrix[fl][sl] )
237
clear_bit(sl, &p->sl_bitmap[fl]);
238
if ( !p->sl_bitmap[fl] )
239
clear_bit (fl, &p->fl_bitmap);
242
b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
246
* Insert block(b) in free list with indexes (fl, sl)
248
static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
250
b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
251
if ( p->matrix[fl][sl] )
252
p->matrix[fl][sl]->ptr.free_ptr.prev = b;
253
p->matrix[fl][sl] = b;
254
set_bit(sl, &p->sl_bitmap[fl]);
255
set_bit(fl, &p->fl_bitmap);
259
* Region is a virtually contiguous memory region and Pool is
260
* collection of such regions
262
static inline void ADD_REGION(void *region, unsigned long region_size,
263
struct xmem_pool *pool)
268
b = (struct bhdr *)(region);
270
b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
271
| FREE_BLOCK | PREV_USED;
272
MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
273
INSERT_BLOCK(b, pool, fl, sl);
274
/* The sentinel block: allows us to know when we're in the last block */
275
lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
277
lb->size = 0 | USED_BLOCK | PREV_FREE;
278
pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
283
* TLSF pool-based allocator start.
286
struct xmem_pool *xmem_pool_create(
288
xmem_pool_get_memory get_mem,
289
xmem_pool_put_memory put_mem,
290
unsigned long init_size,
291
unsigned long max_size,
292
unsigned long grow_size)
294
struct xmem_pool *pool;
295
int pool_bytes, pool_order;
297
BUG_ON(max_size && (max_size < init_size));
299
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
300
pool_order = get_order_from_bytes(pool_bytes);
302
pool = (void *)alloc_xenheap_pages(pool_order, 0);
305
memset(pool, 0, pool_bytes);
307
/* Round to next page boundary */
308
init_size = ROUNDUP_PAGE(init_size);
309
max_size = ROUNDUP_PAGE(max_size);
310
grow_size = ROUNDUP_PAGE(grow_size);
312
/* pool global overhead not included in used size */
315
pool->init_size = init_size;
316
pool->max_size = max_size;
317
pool->grow_size = grow_size;
318
pool->get_mem = get_mem;
319
pool->put_mem = put_mem;
320
strlcpy(pool->name, name, sizeof(pool->name));
322
/* always obtain init_region lazily now to ensure it is get_mem'd
323
* in the same "context" as all other regions */
325
spin_lock_init(&pool->lock);
327
spin_lock(&pool_list_lock);
328
list_add_tail(&pool->list, &pool_list_head);
329
spin_unlock(&pool_list_lock);
334
unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
336
return pool->used_size;
339
unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
342
total = ROUNDUP_SIZE(sizeof(*pool))
344
+ (pool->num_regions - 1) * pool->grow_size;
348
void xmem_pool_destroy(struct xmem_pool *pool)
350
int pool_bytes, pool_order;
355
/* User is destroying without ever allocating from this pool */
356
if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
358
ASSERT(!pool->init_region);
359
pool->used_size -= BHDR_OVERHEAD;
362
/* Check for memory leaks in this pool */
363
if ( xmem_pool_get_used_size(pool) )
364
printk("memory leak in pool: %s (%p). "
365
"%lu bytes still in use.\n",
366
pool->name, pool, xmem_pool_get_used_size(pool));
368
spin_lock(&pool_list_lock);
369
list_del_init(&pool->list);
370
spin_unlock(&pool_list_lock);
372
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
373
pool_order = get_order_from_bytes(pool_bytes);
374
free_xenheap_pages(pool,pool_order);
377
void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
379
struct bhdr *b, *b2, *next_b, *region;
381
unsigned long tmp_size;
383
if ( pool->init_region == NULL )
385
if ( (region = pool->get_mem(pool->init_size)) == NULL )
387
ADD_REGION(region, pool->init_size, pool);
388
pool->init_region = region;
391
size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
392
/* Rounding up the requested size and calculating fl and sl */
394
spin_lock(&pool->lock);
396
MAPPING_SEARCH(&size, &fl, &sl);
398
/* Searching a free block */
399
if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
402
if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
404
if ( pool->max_size && (pool->init_size +
405
pool->num_regions * pool->grow_size
408
spin_unlock(&pool->lock);
409
if ( (region = pool->get_mem(pool->grow_size)) == NULL )
411
spin_lock(&pool->lock);
412
ADD_REGION(region, pool->grow_size, pool);
415
EXTRACT_BLOCK_HDR(b, pool, fl, sl);
418
next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
419
/* Should the block be split? */
420
tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
421
if ( tmp_size >= sizeof(struct bhdr) )
423
tmp_size -= BHDR_OVERHEAD;
424
b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
426
b2->size = tmp_size | FREE_BLOCK | PREV_USED;
429
next_b->prev_hdr = b2;
431
MAPPING_INSERT(tmp_size, &fl, &sl);
432
INSERT_BLOCK(b2, pool, fl, sl);
434
b->size = size | (b->size & PREV_STATE);
438
next_b->size &= (~PREV_FREE);
439
b->size &= (~FREE_BLOCK); /* Now it's used */
442
pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
444
spin_unlock(&pool->lock);
445
return (void *)b->ptr.buffer;
449
spin_unlock(&pool->lock);
455
void xmem_pool_free(void *ptr, struct xmem_pool *pool)
457
struct bhdr *b, *tmp_b;
460
if ( unlikely(ptr == NULL) )
463
b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
465
spin_lock(&pool->lock);
466
b->size |= FREE_BLOCK;
467
pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
468
b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
469
tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
470
if ( tmp_b->size & FREE_BLOCK )
472
MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
473
EXTRACT_BLOCK(tmp_b, pool, fl, sl);
474
b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
476
if ( b->size & PREV_FREE )
479
MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
480
EXTRACT_BLOCK(tmp_b, pool, fl, sl);
481
tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
484
tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
487
MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
489
if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
493
pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
497
INSERT_BLOCK(b, pool, fl, sl);
499
tmp_b->size |= PREV_FREE;
502
spin_unlock(&pool->lock);
505
int xmem_pool_maxalloc(struct xmem_pool *pool)
507
return pool->grow_size - (2 * BHDR_OVERHEAD);
511
* Glue for xmalloc().
514
static struct xmem_pool *xenpool;
516
static void *xmalloc_pool_get(unsigned long size)
518
ASSERT(size == PAGE_SIZE);
519
return alloc_xenheap_page();
522
static void xmalloc_pool_put(void *p)
524
free_xenheap_page(p);
527
static void *xmalloc_whole_pages(unsigned long size)
530
unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
532
b = alloc_xenheap_pages(pageorder, 0);
536
b->size = (1 << (pageorder + PAGE_SHIFT));
537
return (void *)b->ptr.buffer;
540
static void tlsf_init(void)
542
INIT_LIST_HEAD(&pool_list_head);
543
spin_lock_init(&pool_list_lock);
544
xenpool = xmem_pool_create(
545
"xmalloc", xmalloc_pool_get, xmalloc_pool_put,
546
PAGE_SIZE, 0, PAGE_SIZE);
554
void *_xmalloc(unsigned long size, unsigned long align)
561
ASSERT((align & (align - 1)) == 0);
562
if ( align < MEM_ALIGN )
564
size += align - MEM_ALIGN;
569
if ( size < PAGE_SIZE )
570
p = xmem_pool_alloc(size, xenpool);
572
p = xmalloc_whole_pages(size);
574
/* Add alignment padding. */
575
if ( (pad = -(long)p & (align - 1)) != 0 )
577
char *q = (char *)p + pad;
578
struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
579
ASSERT(q > (char *)p);
584
ASSERT(((unsigned long)p & (align - 1)) == 0);
597
/* Strip alignment padding. */
598
b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
601
p = (char *)p - (b->size & ~1u);
602
b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
603
ASSERT(!(b->size & 1));
606
if ( b->size >= PAGE_SIZE )
607
free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
609
xmem_pool_free(p, xenpool);