2
* Basic general purpose allocator for managing special purpose memory
3
* not managed by the regular kmalloc/kfree interface.
4
* Uses for this includes on-device special memory, uncached memory
2
* Basic general purpose allocator for managing special purpose
3
* memory, for example, memory that is not managed by the regular
4
* kmalloc/kfree interface. Uses for this includes on-device special
5
* memory, uncached memory etc.
7
* It is safe to use the allocator in NMI handlers and other special
8
* unblockable contexts that could otherwise deadlock on locks. This
9
* is implemented by using atomic operations and retries on any
10
* conflicts. The disadvantage is that there may be livelocks in
11
* extreme cases. For better scalability, one allocator can be used
14
* The lockless operation only works if there is enough memory
15
* available. If new memory is added to the pool a lock has to be
16
* still taken. So any user relying on locklessness has to ensure
17
* that sufficient memory is preallocated.
19
* The basic atomic operation of this allocator is cmpxchg on long.
20
* On architectures that don't have NMI-safe cmpxchg implementation,
21
* the allocator can NOT be used in NMI handler. So code uses the
22
* allocator in NMI handler should depend on
23
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
7
25
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
13
31
#include <linux/slab.h>
14
32
#include <linux/module.h>
15
33
#include <linux/bitmap.h>
34
#include <linux/rculist.h>
35
#include <linux/interrupt.h>
16
36
#include <linux/genalloc.h>
38
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
40
unsigned long val, nval;
45
if (val & mask_to_set)
48
} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
53
static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
55
unsigned long val, nval;
60
if ((val & mask_to_clear) != mask_to_clear)
63
} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
69
* bitmap_set_ll - set the specified number of bits at the specified position
70
* @map: pointer to a bitmap
71
* @start: a bit position in @map
72
* @nr: number of bits to set
74
* Set @nr bits start from @start in @map lock-lessly. Several users
75
* can set/clear the same bitmap simultaneously without lock. If two
76
* users set the same bit, one user will return remain bits, otherwise
79
static int bitmap_set_ll(unsigned long *map, int start, int nr)
81
unsigned long *p = map + BIT_WORD(start);
82
const int size = start + nr;
83
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
86
while (nr - bits_to_set >= 0) {
87
if (set_bits_ll(p, mask_to_set))
90
bits_to_set = BITS_PER_LONG;
95
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96
if (set_bits_ll(p, mask_to_set))
104
* bitmap_clear_ll - clear the specified number of bits at the specified position
105
* @map: pointer to a bitmap
106
* @start: a bit position in @map
107
* @nr: number of bits to set
109
* Clear @nr bits start from @start in @map lock-lessly. Several users
110
* can set/clear the same bitmap simultaneously without lock. If two
111
* users clear the same bit, one user will return remain bits,
112
* otherwise return 0.
114
static int bitmap_clear_ll(unsigned long *map, int start, int nr)
116
unsigned long *p = map + BIT_WORD(start);
117
const int size = start + nr;
118
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
121
while (nr - bits_to_clear >= 0) {
122
if (clear_bits_ll(p, mask_to_clear))
125
bits_to_clear = BITS_PER_LONG;
126
mask_to_clear = ~0UL;
130
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131
if (clear_bits_ll(p, mask_to_clear))
20
139
* gen_pool_create - create a new special memory pool
63
182
if (unlikely(chunk == NULL))
66
spin_lock_init(&chunk->lock);
67
185
chunk->phys_addr = phys;
68
186
chunk->start_addr = virt;
69
187
chunk->end_addr = virt + size;
188
atomic_set(&chunk->avail, size);
71
write_lock(&pool->lock);
72
list_add(&chunk->next_chunk, &pool->chunks);
73
write_unlock(&pool->lock);
190
spin_lock(&pool->lock);
191
list_add_rcu(&chunk->next_chunk, &pool->chunks);
192
spin_unlock(&pool->lock);
86
205
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
88
struct list_head *_chunk;
89
207
struct gen_pool_chunk *chunk;
91
read_lock(&pool->lock);
92
list_for_each(_chunk, &pool->chunks) {
93
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
95
if (addr >= chunk->start_addr && addr < chunk->end_addr)
96
return chunk->phys_addr + addr - chunk->start_addr;
208
phys_addr_t paddr = -1;
211
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
212
if (addr >= chunk->start_addr && addr < chunk->end_addr) {
213
paddr = chunk->phys_addr + (addr - chunk->start_addr);
98
read_unlock(&pool->lock);
102
221
EXPORT_SYMBOL(gen_pool_virt_to_phys);
137
255
* @size: number of bytes to allocate from the pool
139
257
* Allocate the requested number of bytes from the specified pool.
140
* Uses a first-fit algorithm.
258
* Uses a first-fit algorithm. Can not be used in NMI handler on
259
* architectures without NMI-safe cmpxchg implementation.
142
261
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
144
struct list_head *_chunk;
145
263
struct gen_pool_chunk *chunk;
146
unsigned long addr, flags;
264
unsigned long addr = 0;
147
265
int order = pool->min_alloc_order;
148
int nbits, start_bit, end_bit;
266
int nbits, start_bit = 0, end_bit, remain;
268
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
153
275
nbits = (size + (1UL << order) - 1) >> order;
155
read_lock(&pool->lock);
156
list_for_each(_chunk, &pool->chunks) {
157
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
277
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
278
if (size > atomic_read(&chunk->avail))
159
281
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
161
spin_lock_irqsave(&chunk->lock, flags);
162
start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
164
if (start_bit >= end_bit) {
165
spin_unlock_irqrestore(&chunk->lock, flags);
283
start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
284
start_bit, nbits, 0);
285
if (start_bit >= end_bit)
287
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
289
remain = bitmap_clear_ll(chunk->bits, start_bit,
169
295
addr = chunk->start_addr + ((unsigned long)start_bit << order);
171
bitmap_set(chunk->bits, start_bit, nbits);
172
spin_unlock_irqrestore(&chunk->lock, flags);
173
read_unlock(&pool->lock);
296
size = nbits << order;
297
atomic_sub(size, &chunk->avail);
176
read_unlock(&pool->lock);
179
303
EXPORT_SYMBOL(gen_pool_alloc);
184
308
* @addr: starting address of memory to free back to pool
185
309
* @size: size in bytes of memory to free
187
* Free previously allocated special memory back to the specified pool.
311
* Free previously allocated special memory back to the specified
312
* pool. Can not be used in NMI handler on architectures without
313
* NMI-safe cmpxchg implementation.
189
315
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
191
struct list_head *_chunk;
192
317
struct gen_pool_chunk *chunk;
194
318
int order = pool->min_alloc_order;
319
int start_bit, nbits, remain;
321
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
197
325
nbits = (size + (1UL << order) - 1) >> order;
199
read_lock(&pool->lock);
200
list_for_each(_chunk, &pool->chunks) {
201
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
327
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
203
328
if (addr >= chunk->start_addr && addr < chunk->end_addr) {
204
329
BUG_ON(addr + size > chunk->end_addr);
205
spin_lock_irqsave(&chunk->lock, flags);
206
bit = (addr - chunk->start_addr) >> order;
208
__clear_bit(bit++, chunk->bits);
209
spin_unlock_irqrestore(&chunk->lock, flags);
330
start_bit = (addr - chunk->start_addr) >> order;
331
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
333
size = nbits << order;
334
atomic_add(size, &chunk->avail);
214
read_unlock(&pool->lock);
216
342
EXPORT_SYMBOL(gen_pool_free);
345
* gen_pool_for_each_chunk - call func for every chunk of generic memory pool
346
* @pool: the generic memory pool
347
* @func: func to call
348
* @data: additional data used by @func
350
* Call @func for every chunk of generic memory pool. The @func is
351
* called with rcu_read_lock held.
353
void gen_pool_for_each_chunk(struct gen_pool *pool,
354
void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
357
struct gen_pool_chunk *chunk;
360
list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
361
func(pool, chunk, data);
364
EXPORT_SYMBOL(gen_pool_for_each_chunk);
367
* gen_pool_avail - get available free space of the pool
368
* @pool: pool to get available free space
370
* Return available free space of the specified pool.
372
size_t gen_pool_avail(struct gen_pool *pool)
374
struct gen_pool_chunk *chunk;
378
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
379
avail += atomic_read(&chunk->avail);
383
EXPORT_SYMBOL_GPL(gen_pool_avail);
386
* gen_pool_size - get size in bytes of memory managed by the pool
387
* @pool: pool to get size
389
* Return size in bytes of memory managed by the pool.
391
size_t gen_pool_size(struct gen_pool *pool)
393
struct gen_pool_chunk *chunk;
397
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
398
size += chunk->end_addr - chunk->start_addr;
402
EXPORT_SYMBOL_GPL(gen_pool_size);