97
list_add_tail(&node->free_stack, &mm->unused_nodes);
97
list_add_tail(&node->node_list, &mm->unused_nodes);
99
99
spin_unlock(&mm->unused_lock);
102
102
EXPORT_SYMBOL(drm_mm_pre_get);
104
static int drm_mm_create_tail_node(struct drm_mm *mm,
106
unsigned long size, int atomic)
108
struct drm_mm_node *child;
110
child = drm_mm_kmalloc(mm, atomic);
111
if (unlikely(child == NULL))
116
child->start = start;
119
list_add_tail(&child->node_list, &mm->node_list);
120
list_add_tail(&child->free_stack, &mm->free_stack);
125
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
129
struct drm_mm_node *child;
131
child = drm_mm_kmalloc(parent->mm, atomic);
132
if (unlikely(child == NULL))
135
INIT_LIST_HEAD(&child->free_stack);
138
child->start = parent->start;
139
child->mm = parent->mm;
141
list_add_tail(&child->node_list, &parent->node_list);
142
INIT_LIST_HEAD(&child->free_stack);
144
parent->size -= size;
145
parent->start += size;
150
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
104
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106
return hole_node->start + hole_node->size;
109
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111
struct drm_mm_node *next_node =
112
list_entry(hole_node->node_list.next, struct drm_mm_node,
115
return next_node->start;
118
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
119
struct drm_mm_node *node,
120
unsigned long size, unsigned alignment)
122
struct drm_mm *mm = hole_node->mm;
123
unsigned long tmp = 0, wasted = 0;
124
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
127
BUG_ON(!hole_node->hole_follows || node->allocated);
130
tmp = hole_start % alignment;
133
hole_node->hole_follows = 0;
134
list_del_init(&hole_node->hole_stack);
136
wasted = alignment - tmp;
138
node->start = hole_start + wasted;
143
INIT_LIST_HEAD(&node->hole_stack);
144
list_add(&node->node_list, &hole_node->node_list);
146
BUG_ON(node->start + node->size > hole_end);
148
if (node->start + node->size < hole_end) {
149
list_add(&node->hole_stack, &mm->hole_stack);
150
node->hole_follows = 1;
152
node->hole_follows = 0;
156
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
151
157
unsigned long size,
152
158
unsigned alignment,
156
struct drm_mm_node *align_splitoff = NULL;
160
tmp = node->start % alignment;
164
drm_mm_split_at_start(node, alignment - tmp, atomic);
165
if (unlikely(align_splitoff == NULL))
169
if (node->size == size) {
170
list_del_init(&node->free_stack);
173
node = drm_mm_split_at_start(node, size, atomic);
177
drm_mm_put_block(align_splitoff);
161
struct drm_mm_node *node;
163
node = drm_mm_kmalloc(hole_node->mm, atomic);
164
if (unlikely(node == NULL))
167
drm_mm_insert_helper(hole_node, node, size, alignment);
181
171
EXPORT_SYMBOL(drm_mm_get_block_generic);
183
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
190
struct drm_mm_node *align_splitoff = NULL;
194
if (node->start < start)
195
wasted += start - node->start;
174
* Search for free space and insert a preallocated memory node. Returns
175
* -ENOSPC if no suitable free area is available. The preallocated memory node
178
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179
unsigned long size, unsigned alignment)
181
struct drm_mm_node *hole_node;
183
hole_node = drm_mm_search_free(mm, size, alignment, 0);
187
drm_mm_insert_helper(hole_node, node, size, alignment);
191
EXPORT_SYMBOL(drm_mm_insert_node);
193
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
194
struct drm_mm_node *node,
195
unsigned long size, unsigned alignment,
196
unsigned long start, unsigned long end)
198
struct drm_mm *mm = hole_node->mm;
199
unsigned long tmp = 0, wasted = 0;
200
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
201
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
203
BUG_ON(!hole_node->hole_follows || node->allocated);
205
if (hole_start < start)
206
wasted += start - hole_start;
197
tmp = ((node->start + wasted) % alignment);
208
tmp = (hole_start + wasted) % alignment;
200
211
wasted += alignment - tmp;
202
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
203
if (unlikely(align_splitoff == NULL))
214
hole_node->hole_follows = 0;
215
list_del_init(&hole_node->hole_stack);
207
if (node->size == size) {
208
list_del_init(&node->free_stack);
218
node->start = hole_start + wasted;
223
INIT_LIST_HEAD(&node->hole_stack);
224
list_add(&node->node_list, &hole_node->node_list);
226
BUG_ON(node->start + node->size > hole_end);
227
BUG_ON(node->start + node->size > end);
229
if (node->start + node->size < hole_end) {
230
list_add(&node->hole_stack, &mm->hole_stack);
231
node->hole_follows = 1;
211
node = drm_mm_split_at_start(node, size, atomic);
233
node->hole_follows = 0;
215
drm_mm_put_block(align_splitoff);
237
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
244
struct drm_mm_node *node;
246
node = drm_mm_kmalloc(hole_node->mm, atomic);
247
if (unlikely(node == NULL))
250
drm_mm_insert_helper_range(hole_node, node, size, alignment,
219
255
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
258
* Search for free space and insert a preallocated memory node. Returns
259
* -ENOSPC if no suitable free area is available. This is for range
260
* restricted allocations. The preallocated memory node must be cleared.
262
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
263
unsigned long size, unsigned alignment,
264
unsigned long start, unsigned long end)
266
struct drm_mm_node *hole_node;
268
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
273
drm_mm_insert_helper_range(hole_node, node, size, alignment,
278
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
281
* Remove a memory node from the allocator.
283
void drm_mm_remove_node(struct drm_mm_node *node)
285
struct drm_mm *mm = node->mm;
286
struct drm_mm_node *prev_node;
288
BUG_ON(node->scanned_block || node->scanned_prev_free
289
|| node->scanned_next_free);
292
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
294
if (node->hole_follows) {
295
BUG_ON(drm_mm_hole_node_start(node)
296
== drm_mm_hole_node_end(node));
297
list_del(&node->hole_stack);
299
BUG_ON(drm_mm_hole_node_start(node)
300
!= drm_mm_hole_node_end(node));
302
if (!prev_node->hole_follows) {
303
prev_node->hole_follows = 1;
304
list_add(&prev_node->hole_stack, &mm->hole_stack);
306
list_move(&prev_node->hole_stack, &mm->hole_stack);
308
list_del(&node->node_list);
311
EXPORT_SYMBOL(drm_mm_remove_node);
222
* Put a block. Merge with the previous and / or next block if they are free.
223
* Otherwise add to the free stack.
314
* Remove a memory node from the allocator and free the allocated struct
315
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316
* drm_mm_get_block functions.
226
void drm_mm_put_block(struct drm_mm_node *cur)
318
void drm_mm_put_block(struct drm_mm_node *node)
229
struct drm_mm *mm = cur->mm;
230
struct list_head *cur_head = &cur->node_list;
231
struct list_head *root_head = &mm->node_list;
232
struct drm_mm_node *prev_node = NULL;
233
struct drm_mm_node *next_node;
237
BUG_ON(cur->scanned_block || cur->scanned_prev_free
238
|| cur->scanned_next_free);
240
if (cur_head->prev != root_head) {
242
list_entry(cur_head->prev, struct drm_mm_node, node_list);
243
if (prev_node->free) {
244
prev_node->size += cur->size;
248
if (cur_head->next != root_head) {
250
list_entry(cur_head->next, struct drm_mm_node, node_list);
251
if (next_node->free) {
253
prev_node->size += next_node->size;
254
list_del(&next_node->node_list);
255
list_del(&next_node->free_stack);
256
spin_lock(&mm->unused_lock);
257
if (mm->num_unused < MM_UNUSED_TARGET) {
258
list_add(&next_node->free_stack,
263
spin_unlock(&mm->unused_lock);
265
next_node->size += cur->size;
266
next_node->start = cur->start;
273
list_add(&cur->free_stack, &mm->free_stack);
275
list_del(&cur->node_list);
276
spin_lock(&mm->unused_lock);
277
if (mm->num_unused < MM_UNUSED_TARGET) {
278
list_add(&cur->free_stack, &mm->unused_nodes);
282
spin_unlock(&mm->unused_lock);
321
struct drm_mm *mm = node->mm;
323
drm_mm_remove_node(node);
325
spin_lock(&mm->unused_lock);
326
if (mm->num_unused < MM_UNUSED_TARGET) {
327
list_add(&node->node_list, &mm->unused_nodes);
331
spin_unlock(&mm->unused_lock);
286
333
EXPORT_SYMBOL(drm_mm_put_block);
288
335
static int check_free_hole(unsigned long start, unsigned long end,
430
499
int drm_mm_scan_add_block(struct drm_mm_node *node)
432
501
struct drm_mm *mm = node->mm;
433
struct list_head *prev_free, *next_free;
434
struct drm_mm_node *prev_node, *next_node;
502
struct drm_mm_node *prev_node;
503
unsigned long hole_start, hole_end;
435
504
unsigned long adj_start;
436
505
unsigned long adj_end;
438
507
mm->scanned_blocks++;
440
prev_free = next_free = NULL;
509
BUG_ON(node->scanned_block);
443
510
node->scanned_block = 1;
446
if (node->node_list.prev != &mm->node_list) {
447
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
450
if (prev_node->free) {
451
list_del(&prev_node->node_list);
453
node->start = prev_node->start;
454
node->size += prev_node->size;
456
prev_node->scanned_prev_free = 1;
458
prev_free = &prev_node->free_stack;
462
if (node->node_list.next != &mm->node_list) {
463
next_node = list_entry(node->node_list.next, struct drm_mm_node,
466
if (next_node->free) {
467
list_del(&next_node->node_list);
469
node->size += next_node->size;
471
next_node->scanned_next_free = 1;
473
next_free = &next_node->free_stack;
477
/* The free_stack list is not used for allocated objects, so these two
478
* pointers can be abused (as long as no allocations in this memory
479
* manager happens). */
480
node->free_stack.prev = prev_free;
481
node->free_stack.next = next_free;
512
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
515
node->scanned_preceeds_hole = prev_node->hole_follows;
516
prev_node->hole_follows = 1;
517
list_del(&node->node_list);
518
node->node_list.prev = &prev_node->node_list;
519
node->node_list.next = &mm->prev_scanned_node->node_list;
520
mm->prev_scanned_node = node;
522
hole_start = drm_mm_hole_node_start(prev_node);
523
hole_end = drm_mm_hole_node_end(prev_node);
483
524
if (mm->scan_check_range) {
484
adj_start = node->start < mm->scan_start ?
485
mm->scan_start : node->start;
486
adj_end = node->start + node->size > mm->scan_end ?
487
mm->scan_end : node->start + node->size;
525
adj_start = hole_start < mm->scan_start ?
526
mm->scan_start : hole_start;
527
adj_end = hole_end > mm->scan_end ?
528
mm->scan_end : hole_end;
489
adj_start = node->start;
490
adj_end = node->start + node->size;
530
adj_start = hole_start;
493
534
if (check_free_hole(adj_start , adj_end,
494
535
mm->scan_size, mm->scan_alignment)) {
495
mm->scan_hit_start = node->start;
496
mm->scan_hit_size = node->size;
536
mm->scan_hit_start = hole_start;
537
mm->scan_hit_size = hole_end;
577
598
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
579
INIT_LIST_HEAD(&mm->node_list);
580
INIT_LIST_HEAD(&mm->free_stack);
600
INIT_LIST_HEAD(&mm->hole_stack);
581
601
INIT_LIST_HEAD(&mm->unused_nodes);
582
602
mm->num_unused = 0;
583
603
mm->scanned_blocks = 0;
584
604
spin_lock_init(&mm->unused_lock);
586
return drm_mm_create_tail_node(mm, start, size, 0);
606
/* Clever trick to avoid a special case in the free hole tracking. */
607
INIT_LIST_HEAD(&mm->head_node.node_list);
608
INIT_LIST_HEAD(&mm->head_node.hole_stack);
609
mm->head_node.hole_follows = 1;
610
mm->head_node.scanned_block = 0;
611
mm->head_node.scanned_prev_free = 0;
612
mm->head_node.scanned_next_free = 0;
613
mm->head_node.mm = mm;
614
mm->head_node.start = start + size;
615
mm->head_node.size = start - mm->head_node.start;
616
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
588
620
EXPORT_SYMBOL(drm_mm_init);
590
622
void drm_mm_takedown(struct drm_mm * mm)
592
struct list_head *bnode = mm->free_stack.next;
593
struct drm_mm_node *entry;
594
struct drm_mm_node *next;
596
entry = list_entry(bnode, struct drm_mm_node, free_stack);
598
if (entry->node_list.next != &mm->node_list ||
599
entry->free_stack.next != &mm->free_stack) {
624
struct drm_mm_node *entry, *next;
626
if (!list_empty(&mm->head_node.node_list)) {
600
627
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
604
list_del(&entry->free_stack);
605
list_del(&entry->node_list);
608
631
spin_lock(&mm->unused_lock);
609
list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
610
list_del(&entry->free_stack);
632
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
633
list_del(&entry->node_list);
612
635
--mm->num_unused;
620
643
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
622
645
struct drm_mm_node *entry;
623
int total_used = 0, total_free = 0, total = 0;
625
list_for_each_entry(entry, &mm->node_list, node_list) {
626
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
646
unsigned long total_used = 0, total_free = 0, total = 0;
647
unsigned long hole_start, hole_end, hole_size;
649
hole_start = drm_mm_hole_node_start(&mm->head_node);
650
hole_end = drm_mm_hole_node_end(&mm->head_node);
651
hole_size = hole_end - hole_start;
653
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
654
prefix, hole_start, hole_end,
656
total_free += hole_size;
658
drm_mm_for_each_node(entry, mm) {
659
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
627
660
prefix, entry->start, entry->start + entry->size,
628
entry->size, entry->free ? "free" : "used");
629
total += entry->size;
631
total_free += entry->size;
633
total_used += entry->size;
662
total_used += entry->size;
664
if (entry->hole_follows) {
665
hole_start = drm_mm_hole_node_start(entry);
666
hole_end = drm_mm_hole_node_end(entry);
667
hole_size = hole_end - hole_start;
668
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
669
prefix, hole_start, hole_end,
671
total_free += hole_size;
635
printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
674
total = total_free + total_used;
676
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
636
677
total_used, total_free);
638
679
EXPORT_SYMBOL(drm_mm_debug_table);
641
682
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
643
684
struct drm_mm_node *entry;
644
int total_used = 0, total_free = 0, total = 0;
646
list_for_each_entry(entry, &mm->node_list, node_list) {
647
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
648
total += entry->size;
650
total_free += entry->size;
652
total_used += entry->size;
685
unsigned long total_used = 0, total_free = 0, total = 0;
686
unsigned long hole_start, hole_end, hole_size;
688
hole_start = drm_mm_hole_node_start(&mm->head_node);
689
hole_end = drm_mm_hole_node_end(&mm->head_node);
690
hole_size = hole_end - hole_start;
692
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
693
hole_start, hole_end, hole_size);
694
total_free += hole_size;
696
drm_mm_for_each_node(entry, mm) {
697
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
698
entry->start, entry->start + entry->size,
700
total_used += entry->size;
701
if (entry->hole_follows) {
702
hole_start = drm_mm_hole_node_start(entry);
703
hole_end = drm_mm_hole_node_end(entry);
704
hole_size = hole_end - hole_start;
705
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706
hole_start, hole_end, hole_size);
707
total_free += hole_size;
654
seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
710
total = total_free + total_used;
712
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
657
715
EXPORT_SYMBOL(drm_mm_dump_table);