2
#include "util/u_inlines.h"
3
#include "util/u_memory.h"
4
#include "util/u_double_list.h"
6
#include "nouveau_screen.h"
7
#include "nouveau_mm.h"
9
#include "nouveau/nouveau_bo.h"
11
#define MM_MIN_ORDER 7
12
#define MM_MAX_ORDER 20
14
#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
16
#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
17
#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
20
struct list_head free;
21
struct list_head used;
22
struct list_head full;
27
struct nouveau_device *dev;
28
struct mm_bucket bucket[MM_NUM_BUCKETS];
29
uint32_t storage_type;
35
struct list_head head;
36
struct nouveau_bo *bo;
37
struct nouveau_mman *cache;
45
mm_slab_alloc(struct mm_slab *slab)
52
for (i = 0; i < (slab->count + 31) / 32; ++i) {
53
b = ffs(slab->bits[i]) - 1;
56
assert(n < slab->count);
58
slab->bits[i] &= ~(1 << b);
66
mm_slab_free(struct mm_slab *slab, int i)
68
assert(i < slab->count);
69
slab->bits[i / 32] |= 1 << (i % 32);
71
assert(slab->free <= slab->count);
75
mm_get_order(uint32_t size)
77
int s = __builtin_clz(size) ^ 31;
84
static struct mm_bucket *
85
mm_bucket_by_order(struct nouveau_mman *cache, int order)
87
if (order > MM_MAX_ORDER)
89
return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
92
static struct mm_bucket *
93
mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
95
return mm_bucket_by_order(cache, mm_get_order(size));
98
/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
99
static INLINE uint32_t
100
mm_default_slab_size(unsigned chunk_order)
102
static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
104
12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
107
assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
109
return 1 << slab_order[chunk_order - MM_MIN_ORDER];
113
mm_slab_new(struct nouveau_mman *cache, int chunk_order)
115
struct mm_slab *slab;
117
const uint32_t size = mm_default_slab_size(chunk_order);
119
words = ((size >> chunk_order) + 31) / 32;
122
slab = MALLOC(sizeof(struct mm_slab) + words * 4);
124
return PIPE_ERROR_OUT_OF_MEMORY;
126
memset(&slab->bits[0], ~0, words * 4);
129
ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
130
0, cache->storage_type, &slab->bo);
133
return PIPE_ERROR_OUT_OF_MEMORY;
136
LIST_INITHEAD(&slab->head);
139
slab->order = chunk_order;
140
slab->count = slab->free = size >> chunk_order;
142
LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
144
cache->allocated += size;
146
debug_printf("MM: new slab, total memory = %llu KiB\n",
147
cache->allocated / 1024);
152
/* @return token to identify slab or NULL if we just allocated a new bo */
153
struct nouveau_mm_allocation *
154
nouveau_mm_allocate(struct nouveau_mman *cache,
155
uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
157
struct mm_bucket *bucket;
158
struct mm_slab *slab;
159
struct nouveau_mm_allocation *alloc;
162
bucket = mm_bucket_by_size(cache, size);
164
ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
165
0, cache->storage_type, bo);
167
debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
173
if (!LIST_IS_EMPTY(&bucket->used)) {
174
slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
176
if (LIST_IS_EMPTY(&bucket->free)) {
177
mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
179
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
181
LIST_DEL(&slab->head);
182
LIST_ADD(&slab->head, &bucket->used);
185
*offset = mm_slab_alloc(slab) << slab->order;
187
alloc = MALLOC_STRUCT(nouveau_mm_allocation);
191
nouveau_bo_ref(slab->bo, bo);
193
if (slab->free == 0) {
194
LIST_DEL(&slab->head);
195
LIST_ADD(&slab->head, &bucket->full);
199
alloc->offset = *offset;
200
alloc->priv = (void *)slab;
206
nouveau_mm_free(struct nouveau_mm_allocation *alloc)
208
struct mm_slab *slab = (struct mm_slab *)alloc->priv;
209
struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
211
mm_slab_free(slab, alloc->offset >> slab->order);
213
if (slab->free == 1) {
214
LIST_DEL(&slab->head);
217
LIST_ADDTAIL(&slab->head, &bucket->used);
219
LIST_ADDTAIL(&slab->head, &bucket->free);
226
nouveau_mm_free_work(void *data)
228
nouveau_mm_free(data);
231
struct nouveau_mman *
232
nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
233
uint32_t storage_type)
235
struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
242
cache->domain = domain;
243
cache->storage_type = storage_type;
244
cache->allocated = 0;
246
for (i = 0; i < MM_NUM_BUCKETS; ++i) {
247
LIST_INITHEAD(&cache->bucket[i].free);
248
LIST_INITHEAD(&cache->bucket[i].used);
249
LIST_INITHEAD(&cache->bucket[i].full);
256
nouveau_mm_free_slabs(struct list_head *head)
258
struct mm_slab *slab, *next;
260
LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
261
LIST_DEL(&slab->head);
262
nouveau_bo_ref(NULL, &slab->bo);
268
nouveau_mm_destroy(struct nouveau_mman *cache)
275
for (i = 0; i < MM_NUM_BUCKETS; ++i) {
276
if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
277
!LIST_IS_EMPTY(&cache->bucket[i].full))
278
debug_printf("WARNING: destroying GPU memory cache "
279
"with some buffers still in use\n");
281
nouveau_mm_free_slabs(&cache->bucket[i].free);
282
nouveau_mm_free_slabs(&cache->bucket[i].used);
283
nouveau_mm_free_slabs(&cache->bucket[i].full);