1
by Amit Kucheria, Amit Kucheria
[ Amit Kucheria ] |
1 |
/**************************************************************************
|
2 |
*
|
|
3 |
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
|
4 |
* All Rights Reserved.
|
|
5 |
*
|
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
7 |
* copy of this software and associated documentation files (the
|
|
8 |
* "Software"), to deal in the Software without restriction, including
|
|
9 |
* without limitation the rights to use, copy, modify, merge, publish,
|
|
10 |
* distribute, sub license, and/or sell copies of the Software, and to
|
|
11 |
* permit persons to whom the Software is furnished to do so, subject to
|
|
12 |
* the following conditions:
|
|
13 |
*
|
|
14 |
* The above copyright notice and this permission notice (including the
|
|
15 |
* next paragraph) shall be included in all copies or substantial portions
|
|
16 |
* of the Software.
|
|
17 |
*
|
|
18 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
19 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
20 |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
21 |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
22 |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
23 |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
24 |
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
25 |
*
|
|
26 |
**************************************************************************/
|
|
27 |
/*
|
|
28 |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
|
29 |
*/
|
|
30 |
/* Notes:
|
|
31 |
*
|
|
32 |
* We store bo pointer in drm_mm_node struct so we know which bo own a
|
|
33 |
* specific node. There is no protection on the pointer, thus to make
|
|
34 |
* sure things don't go berserk you have to access this pointer while
|
|
35 |
* holding the global lru lock and make sure anytime you free a node you
|
|
36 |
* reset the pointer to NULL.
|
|
37 |
*/
|
|
38 |
||
39 |
#include "ttm/ttm_module.h" |
|
40 |
#include "ttm/ttm_bo_driver.h" |
|
41 |
#include "ttm/ttm_placement.h" |
|
42 |
#include <linux/jiffies.h> |
|
43 |
#include <linux/slab.h> |
|
44 |
#include <linux/sched.h> |
|
45 |
#include <linux/mm.h> |
|
46 |
#include <linux/file.h> |
|
47 |
#include <linux/module.h> |
|
48 |
||
49 |
#define TTM_ASSERT_LOCKED(param)
|
|
50 |
#define TTM_DEBUG(fmt, arg...)
|
|
51 |
#define TTM_BO_HASH_ORDER 13
|
|
52 |
||
53 |
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); |
|
54 |
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); |
|
55 |
static void ttm_bo_global_kobj_release(struct kobject *kobj); |
|
56 |
||
57 |
static struct attribute ttm_bo_count = { |
|
58 |
.name = "bo_count", |
|
59 |
.mode = S_IRUGO |
|
60 |
};
|
|
61 |
||
62 |
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) |
|
63 |
{
|
|
64 |
int i; |
|
65 |
||
66 |
for (i = 0; i <= TTM_PL_PRIV5; i++) |
|
67 |
if (flags & (1 << i)) { |
|
68 |
*mem_type = i; |
|
69 |
return 0; |
|
70 |
}
|
|
71 |
return -EINVAL; |
|
72 |
}
|
|
73 |
||
74 |
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) |
|
75 |
{
|
|
76 |
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
|
77 |
||
78 |
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); |
|
79 |
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); |
|
80 |
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); |
|
81 |
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); |
|
82 |
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); |
|
83 |
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); |
|
84 |
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); |
|
85 |
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", |
|
86 |
man->available_caching); |
|
87 |
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", |
|
88 |
man->default_caching); |
|
89 |
if (mem_type != TTM_PL_SYSTEM) { |
|
90 |
spin_lock(&bdev->glob->lru_lock); |
|
91 |
drm_mm_debug_table(&man->manager, TTM_PFX); |
|
92 |
spin_unlock(&bdev->glob->lru_lock); |
|
93 |
}
|
|
94 |
}
|
|
95 |
||
96 |
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
|
97 |
struct ttm_placement *placement) |
|
98 |
{
|
|
99 |
int i, ret, mem_type; |
|
100 |
||
101 |
printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", |
|
102 |
bo, bo->mem.num_pages, bo->mem.size >> 10, |
|
103 |
bo->mem.size >> 20); |
|
104 |
for (i = 0; i < placement->num_placement; i++) { |
|
105 |
ret = ttm_mem_type_from_flags(placement->placement[i], |
|
106 |
&mem_type); |
|
107 |
if (ret) |
|
108 |
return; |
|
109 |
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", |
|
110 |
i, placement->placement[i], mem_type); |
|
111 |
ttm_mem_type_debug(bo->bdev, mem_type); |
|
112 |
}
|
|
113 |
}
|
|
114 |
||
115 |
static ssize_t ttm_bo_global_show(struct kobject *kobj, |
|
116 |
struct attribute *attr, |
|
117 |
char *buffer) |
|
118 |
{
|
|
119 |
struct ttm_bo_global *glob = |
|
120 |
container_of(kobj, struct ttm_bo_global, kobj); |
|
121 |
||
122 |
return snprintf(buffer, PAGE_SIZE, "%lu\n", |
|
123 |
(unsigned long) atomic_read(&glob->bo_count)); |
|
124 |
}
|
|
125 |
||
126 |
static struct attribute *ttm_bo_global_attrs[] = { |
|
127 |
&ttm_bo_count, |
|
128 |
NULL
|
|
129 |
};
|
|
130 |
||
131 |
static struct sysfs_ops ttm_bo_global_ops = { |
|
132 |
.show = &ttm_bo_global_show |
|
133 |
};
|
|
134 |
||
135 |
static struct kobj_type ttm_bo_glob_kobj_type = { |
|
136 |
.release = &ttm_bo_global_kobj_release, |
|
137 |
.sysfs_ops = &ttm_bo_global_ops, |
|
138 |
.default_attrs = ttm_bo_global_attrs |
|
139 |
};
|
|
140 |
||
141 |
||
142 |
static inline uint32_t ttm_bo_type_flags(unsigned type) |
|
143 |
{
|
|
144 |
return 1 << (type); |
|
145 |
}
|
|
146 |
||
147 |
static void ttm_bo_release_list(struct kref *list_kref) |
|
148 |
{
|
|
149 |
struct ttm_buffer_object *bo = |
|
150 |
container_of(list_kref, struct ttm_buffer_object, list_kref); |
|
151 |
struct ttm_bo_device *bdev = bo->bdev; |
|
152 |
||
153 |
BUG_ON(atomic_read(&bo->list_kref.refcount)); |
|
154 |
BUG_ON(atomic_read(&bo->kref.refcount)); |
|
155 |
BUG_ON(atomic_read(&bo->cpu_writers)); |
|
156 |
BUG_ON(bo->sync_obj != NULL); |
|
157 |
BUG_ON(bo->mem.mm_node != NULL); |
|
158 |
BUG_ON(!list_empty(&bo->lru)); |
|
159 |
BUG_ON(!list_empty(&bo->ddestroy)); |
|
160 |
||
161 |
if (bo->ttm) |
|
162 |
ttm_tt_destroy(bo->ttm); |
|
163 |
atomic_dec(&bo->glob->bo_count); |
|
164 |
if (bo->destroy) |
|
165 |
bo->destroy(bo); |
|
166 |
else { |
|
167 |
ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size); |
|
168 |
kfree(bo); |
|
169 |
}
|
|
170 |
}
|
|
171 |
||
172 |
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) |
|
173 |
{
|
|
174 |
||
175 |
if (interruptible) { |
|
176 |
int ret = 0; |
|
177 |
||
178 |
ret = wait_event_interruptible(bo->event_queue, |
|
179 |
atomic_read(&bo->reserved) == 0); |
|
180 |
if (unlikely(ret != 0)) |
|
181 |
return ret; |
|
182 |
} else { |
|
183 |
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); |
|
184 |
}
|
|
185 |
return 0; |
|
186 |
}
|
|
187 |
EXPORT_SYMBOL(ttm_bo_wait_unreserved); |
|
188 |
||
189 |
static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
|
190 |
{
|
|
191 |
struct ttm_bo_device *bdev = bo->bdev; |
|
192 |
struct ttm_mem_type_manager *man; |
|
193 |
||
194 |
BUG_ON(!atomic_read(&bo->reserved)); |
|
195 |
||
196 |
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
|
197 |
||
198 |
BUG_ON(!list_empty(&bo->lru)); |
|
199 |
||
200 |
man = &bdev->man[bo->mem.mem_type]; |
|
201 |
list_add_tail(&bo->lru, &man->lru); |
|
202 |
kref_get(&bo->list_kref); |
|
203 |
||
204 |
if (bo->ttm != NULL) { |
|
205 |
list_add_tail(&bo->swap, &bo->glob->swap_lru); |
|
206 |
kref_get(&bo->list_kref); |
|
207 |
}
|
|
208 |
}
|
|
209 |
}
|
|
210 |
||
211 |
/**
|
|
212 |
* Call with the lru_lock held.
|
|
213 |
*/
|
|
214 |
||
215 |
static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
|
216 |
{
|
|
217 |
int put_count = 0; |
|
218 |
||
219 |
if (!list_empty(&bo->swap)) { |
|
220 |
list_del_init(&bo->swap); |
|
221 |
++put_count; |
|
222 |
}
|
|
223 |
if (!list_empty(&bo->lru)) { |
|
224 |
list_del_init(&bo->lru); |
|
225 |
++put_count; |
|
226 |
}
|
|
227 |
||
228 |
/*
|
|
229 |
* TODO: Add a driver hook to delete from
|
|
230 |
* driver-specific LRU's here.
|
|
231 |
*/
|
|
232 |
||
233 |
return put_count; |
|
234 |
}
|
|
235 |
||
236 |
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, |
|
237 |
bool interruptible, |
|
238 |
bool no_wait, bool use_sequence, uint32_t sequence) |
|
239 |
{
|
|
240 |
struct ttm_bo_global *glob = bo->glob; |
|
241 |
int ret; |
|
242 |
||
243 |
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { |
|
244 |
if (use_sequence && bo->seq_valid && |
|
245 |
(sequence - bo->val_seq < (1 << 31))) { |
|
246 |
return -EAGAIN; |
|
247 |
}
|
|
248 |
||
249 |
if (no_wait) |
|
250 |
return -EBUSY; |
|
251 |
||
252 |
spin_unlock(&glob->lru_lock); |
|
253 |
ret = ttm_bo_wait_unreserved(bo, interruptible); |
|
254 |
spin_lock(&glob->lru_lock); |
|
255 |
||
256 |
if (unlikely(ret)) |
|
257 |
return ret; |
|
258 |
}
|
|
259 |
||
260 |
if (use_sequence) { |
|
261 |
bo->val_seq = sequence; |
|
262 |
bo->seq_valid = true; |
|
263 |
} else { |
|
264 |
bo->seq_valid = false; |
|
265 |
}
|
|
266 |
||
267 |
return 0; |
|
268 |
}
|
|
269 |
EXPORT_SYMBOL(ttm_bo_reserve); |
|
270 |
||
271 |
static void ttm_bo_ref_bug(struct kref *list_kref) |
|
272 |
{
|
|
273 |
BUG(); |
|
274 |
}
|
|
275 |
||
276 |
int ttm_bo_reserve(struct ttm_buffer_object *bo, |
|
277 |
bool interruptible, |
|
278 |
bool no_wait, bool use_sequence, uint32_t sequence) |
|
279 |
{
|
|
280 |
struct ttm_bo_global *glob = bo->glob; |
|
281 |
int put_count = 0; |
|
282 |
int ret; |
|
283 |
||
284 |
spin_lock(&glob->lru_lock); |
|
285 |
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, |
|
286 |
sequence); |
|
287 |
if (likely(ret == 0)) |
|
288 |
put_count = ttm_bo_del_from_lru(bo); |
|
289 |
spin_unlock(&glob->lru_lock); |
|
290 |
||
291 |
while (put_count--) |
|
292 |
kref_put(&bo->list_kref, ttm_bo_ref_bug); |
|
293 |
||
294 |
return ret; |
|
295 |
}
|
|
296 |
||
297 |
void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
|
298 |
{
|
|
299 |
struct ttm_bo_global *glob = bo->glob; |
|
300 |
||
301 |
spin_lock(&glob->lru_lock); |
|
302 |
ttm_bo_add_to_lru(bo); |
|
303 |
atomic_set(&bo->reserved, 0); |
|
304 |
wake_up_all(&bo->event_queue); |
|
305 |
spin_unlock(&glob->lru_lock); |
|
306 |
}
|
|
307 |
EXPORT_SYMBOL(ttm_bo_unreserve); |
|
308 |
||
309 |
/*
|
|
310 |
* Call bo->mutex locked.
|
|
311 |
*/
|
|
312 |
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
|
313 |
{
|
|
314 |
struct ttm_bo_device *bdev = bo->bdev; |
|
315 |
struct ttm_bo_global *glob = bo->glob; |
|
316 |
int ret = 0; |
|
317 |
uint32_t page_flags = 0; |
|
318 |
||
319 |
TTM_ASSERT_LOCKED(&bo->mutex); |
|
320 |
bo->ttm = NULL; |
|
321 |
||
322 |
if (bdev->need_dma32) |
|
323 |
page_flags |= TTM_PAGE_FLAG_DMA32; |
|
324 |
||
325 |
switch (bo->type) { |
|
326 |
case ttm_bo_type_device: |
|
327 |
if (zero_alloc) |
|
328 |
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
|
329 |
case ttm_bo_type_kernel: |
|
330 |
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
|
331 |
page_flags, glob->dummy_read_page); |
|
332 |
if (unlikely(bo->ttm == NULL)) |
|
333 |
ret = -ENOMEM; |
|
334 |
break; |
|
335 |
case ttm_bo_type_user: |
|
336 |
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
|
337 |
page_flags | TTM_PAGE_FLAG_USER, |
|
338 |
glob->dummy_read_page); |
|
339 |
if (unlikely(bo->ttm == NULL)) { |
|
340 |
ret = -ENOMEM; |
|
341 |
break; |
|
342 |
}
|
|
343 |
||
344 |
ret = ttm_tt_set_user(bo->ttm, current, |
|
345 |
bo->buffer_start, bo->num_pages); |
|
346 |
if (unlikely(ret != 0)) |
|
347 |
ttm_tt_destroy(bo->ttm); |
|
348 |
break; |
|
349 |
default: |
|
350 |
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); |
|
351 |
ret = -EINVAL; |
|
352 |
break; |
|
353 |
}
|
|
354 |
||
355 |
return ret; |
|
356 |
}
|
|
357 |
||
358 |
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
|
359 |
struct ttm_mem_reg *mem, |
|
360 |
bool evict, bool interruptible, bool no_wait) |
|
361 |
{
|
|
362 |
struct ttm_bo_device *bdev = bo->bdev; |
|
363 |
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
|
364 |
bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
|
365 |
struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
|
366 |
struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
|
367 |
int ret = 0; |
|
368 |
||
369 |
if (old_is_pci || new_is_pci || |
|
370 |
((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) |
|
371 |
ttm_bo_unmap_virtual(bo); |
|
372 |
||
373 |
/*
|
|
374 |
* Create and bind a ttm if required.
|
|
375 |
*/
|
|
376 |
||
377 |
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { |
|
378 |
ret = ttm_bo_add_ttm(bo, false); |
|
379 |
if (ret) |
|
380 |
goto out_err; |
|
381 |
||
382 |
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
|
383 |
if (ret) |
|
384 |
goto out_err; |
|
385 |
||
386 |
if (mem->mem_type != TTM_PL_SYSTEM) { |
|
387 |
ret = ttm_tt_bind(bo->ttm, mem); |
|
388 |
if (ret) |
|
389 |
goto out_err; |
|
390 |
}
|
|
391 |
||
392 |
if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
|
393 |
bo->mem = *mem; |
|
394 |
mem->mm_node = NULL; |
|
395 |
goto moved; |
|
396 |
}
|
|
397 |
||
398 |
}
|
|
399 |
||
400 |
if (bdev->driver->move_notify) |
|
401 |
bdev->driver->move_notify(bo, mem); |
|
402 |
||
403 |
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
|
404 |
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
|
405 |
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); |
|
406 |
else if (bdev->driver->move) |
|
407 |
ret = bdev->driver->move(bo, evict, interruptible, |
|
408 |
no_wait, mem); |
|
409 |
else
|
|
410 |
ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); |
|
411 |
||
412 |
if (ret) |
|
413 |
goto out_err; |
|
414 |
||
415 |
moved: |
|
416 |
if (bo->evicted) { |
|
417 |
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
|
418 |
if (ret) |
|
419 |
printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); |
|
420 |
bo->evicted = false; |
|
421 |
}
|
|
422 |
||
423 |
if (bo->mem.mm_node) { |
|
424 |
spin_lock(&bo->lock); |
|
425 |
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + |
|
426 |
bdev->man[bo->mem.mem_type].gpu_offset; |
|
427 |
bo->cur_placement = bo->mem.placement; |
|
428 |
spin_unlock(&bo->lock); |
|
429 |
} else |
|
430 |
bo->offset = 0; |
|
431 |
||
432 |
return 0; |
|
433 |
||
434 |
out_err: |
|
435 |
new_man = &bdev->man[bo->mem.mem_type]; |
|
436 |
if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
|
437 |
ttm_tt_unbind(bo->ttm); |
|
438 |
ttm_tt_destroy(bo->ttm); |
|
439 |
bo->ttm = NULL; |
|
440 |
}
|
|
441 |
||
442 |
return ret; |
|
443 |
}
|
|
444 |
||
445 |
/**
|
|
446 |
* If bo idle, remove from delayed- and lru lists, and unref.
|
|
447 |
* If not idle, and already on delayed list, do nothing.
|
|
448 |
* If not idle, and not on delayed list, put on delayed list,
|
|
449 |
* up the list_kref and schedule a delayed list check.
|
|
450 |
*/
|
|
451 |
||
452 |
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) |
|
453 |
{
|
|
454 |
struct ttm_bo_device *bdev = bo->bdev; |
|
455 |
struct ttm_bo_global *glob = bo->glob; |
|
456 |
struct ttm_bo_driver *driver = bdev->driver; |
|
457 |
int ret; |
|
458 |
||
459 |
spin_lock(&bo->lock); |
|
460 |
(void) ttm_bo_wait(bo, false, false, !remove_all); |
|
461 |
||
462 |
if (!bo->sync_obj) { |
|
463 |
int put_count; |
|
464 |
||
465 |
spin_unlock(&bo->lock); |
|
466 |
||
467 |
spin_lock(&glob->lru_lock); |
|
468 |
put_count = ttm_bo_del_from_lru(bo); |
|
469 |
||
470 |
ret = ttm_bo_reserve_locked(bo, false, false, false, 0); |
|
471 |
BUG_ON(ret); |
|
472 |
if (bo->ttm) |
|
473 |
ttm_tt_unbind(bo->ttm); |
|
474 |
||
475 |
if (!list_empty(&bo->ddestroy)) { |
|
476 |
list_del_init(&bo->ddestroy); |
|
477 |
++put_count; |
|
478 |
}
|
|
479 |
if (bo->mem.mm_node) { |
|
480 |
bo->mem.mm_node->private = NULL; |
|
481 |
drm_mm_put_block(bo->mem.mm_node); |
|
482 |
bo->mem.mm_node = NULL; |
|
483 |
}
|
|
484 |
spin_unlock(&glob->lru_lock); |
|
485 |
||
486 |
atomic_set(&bo->reserved, 0); |
|
487 |
||
488 |
while (put_count--) |
|
489 |
kref_put(&bo->list_kref, ttm_bo_ref_bug); |
|
490 |
||
491 |
return 0; |
|
492 |
}
|
|
493 |
||
494 |
spin_lock(&glob->lru_lock); |
|
495 |
if (list_empty(&bo->ddestroy)) { |
|
496 |
void *sync_obj = bo->sync_obj; |
|
497 |
void *sync_obj_arg = bo->sync_obj_arg; |
|
498 |
||
499 |
kref_get(&bo->list_kref); |
|
500 |
list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
|
501 |
spin_unlock(&glob->lru_lock); |
|
502 |
spin_unlock(&bo->lock); |
|
503 |
||
504 |
if (sync_obj) |
|
505 |
driver->sync_obj_flush(sync_obj, sync_obj_arg); |
|
506 |
schedule_delayed_work(&bdev->wq, |
|
507 |
((HZ / 100) < 1) ? 1 : HZ / 100); |
|
508 |
ret = 0; |
|
509 |
||
510 |
} else { |
|
511 |
spin_unlock(&glob->lru_lock); |
|
512 |
spin_unlock(&bo->lock); |
|
513 |
ret = -EBUSY; |
|
514 |
}
|
|
515 |
||
516 |
return ret; |
|
517 |
}
|
|
518 |
||
519 |
/**
|
|
520 |
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
|
|
521 |
* encountered buffers.
|
|
522 |
*/
|
|
523 |
||
524 |
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
|
525 |
{
|
|
526 |
struct ttm_bo_global *glob = bdev->glob; |
|
527 |
struct ttm_buffer_object *entry = NULL; |
|
528 |
int ret = 0; |
|
529 |
||
530 |
spin_lock(&glob->lru_lock); |
|
531 |
if (list_empty(&bdev->ddestroy)) |
|
532 |
goto out_unlock; |
|
533 |
||
534 |
entry = list_first_entry(&bdev->ddestroy, |
|
535 |
struct ttm_buffer_object, ddestroy); |
|
536 |
kref_get(&entry->list_kref); |
|
537 |
||
538 |
for (;;) { |
|
539 |
struct ttm_buffer_object *nentry = NULL; |
|
540 |
||
541 |
if (entry->ddestroy.next != &bdev->ddestroy) { |
|
542 |
nentry = list_first_entry(&entry->ddestroy, |
|
543 |
struct ttm_buffer_object, ddestroy); |
|
544 |
kref_get(&nentry->list_kref); |
|
545 |
}
|
|
546 |
||
547 |
spin_unlock(&glob->lru_lock); |
|
548 |
ret = ttm_bo_cleanup_refs(entry, remove_all); |
|
549 |
kref_put(&entry->list_kref, ttm_bo_release_list); |
|
550 |
entry = nentry; |
|
551 |
||
552 |
if (ret || !entry) |
|
553 |
goto out; |
|
554 |
||
555 |
spin_lock(&glob->lru_lock); |
|
556 |
if (list_empty(&entry->ddestroy)) |
|
557 |
break; |
|
558 |
}
|
|
559 |
||
560 |
out_unlock: |
|
561 |
spin_unlock(&glob->lru_lock); |
|
562 |
out: |
|
563 |
if (entry) |
|
564 |
kref_put(&entry->list_kref, ttm_bo_release_list); |
|
565 |
return ret; |
|
566 |
}
|
|
567 |
||
568 |
static void ttm_bo_delayed_workqueue(struct work_struct *work) |
|
569 |
{
|
|
570 |
struct ttm_bo_device *bdev = |
|
571 |
container_of(work, struct ttm_bo_device, wq.work); |
|
572 |
||
573 |
if (ttm_bo_delayed_delete(bdev, false)) { |
|
574 |
schedule_delayed_work(&bdev->wq, |
|
575 |
((HZ / 100) < 1) ? 1 : HZ / 100); |
|
576 |
}
|
|
577 |
}
|
|
578 |
||
579 |
static void ttm_bo_release(struct kref *kref) |
|
580 |
{
|
|
581 |
struct ttm_buffer_object *bo = |
|
582 |
container_of(kref, struct ttm_buffer_object, kref); |
|
583 |
struct ttm_bo_device *bdev = bo->bdev; |
|
584 |
||
585 |
if (likely(bo->vm_node != NULL)) { |
|
586 |
rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
|
587 |
drm_mm_put_block(bo->vm_node); |
|
588 |
bo->vm_node = NULL; |
|
589 |
}
|
|
590 |
write_unlock(&bdev->vm_lock); |
|
591 |
ttm_bo_cleanup_refs(bo, false); |
|
592 |
kref_put(&bo->list_kref, ttm_bo_release_list); |
|
593 |
write_lock(&bdev->vm_lock); |
|
594 |
}
|
|
595 |
||
596 |
void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
|
597 |
{
|
|
598 |
struct ttm_buffer_object *bo = *p_bo; |
|
599 |
struct ttm_bo_device *bdev = bo->bdev; |
|
600 |
||
601 |
*p_bo = NULL; |
|
602 |
write_lock(&bdev->vm_lock); |
|
603 |
kref_put(&bo->kref, ttm_bo_release); |
|
604 |
write_unlock(&bdev->vm_lock); |
|
605 |
}
|
|
606 |
EXPORT_SYMBOL(ttm_bo_unref); |
|
607 |
||
608 |
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
|
609 |
bool no_wait) |
|
610 |
{
|
|
611 |
struct ttm_bo_device *bdev = bo->bdev; |
|
612 |
struct ttm_bo_global *glob = bo->glob; |
|
613 |
struct ttm_mem_reg evict_mem; |
|
614 |
struct ttm_placement placement; |
|
615 |
int ret = 0; |
|
616 |
||
617 |
spin_lock(&bo->lock); |
|
618 |
ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
|
619 |
spin_unlock(&bo->lock); |
|
620 |
||
621 |
if (unlikely(ret != 0)) { |
|
622 |
if (ret != -ERESTARTSYS) { |
|
623 |
printk(KERN_ERR TTM_PFX |
|
624 |
"Failed to expire sync object before "
|
|
625 |
"buffer eviction.\n"); |
|
626 |
}
|
|
627 |
goto out; |
|
628 |
}
|
|
629 |
||
630 |
BUG_ON(!atomic_read(&bo->reserved)); |
|
631 |
||
632 |
evict_mem = bo->mem; |
|
633 |
evict_mem.mm_node = NULL; |
|
634 |
||
635 |
placement.fpfn = 0; |
|
636 |
placement.lpfn = 0; |
|
637 |
placement.num_placement = 0; |
|
638 |
placement.num_busy_placement = 0; |
|
639 |
bdev->driver->evict_flags(bo, &placement); |
|
640 |
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
|
641 |
no_wait); |
|
642 |
if (ret) { |
|
643 |
if (ret != -ERESTARTSYS) { |
|
644 |
printk(KERN_ERR TTM_PFX |
|
645 |
"Failed to find memory space for "
|
|
646 |
"buffer 0x%p eviction.\n", bo); |
|
647 |
ttm_bo_mem_space_debug(bo, &placement); |
|
648 |
}
|
|
649 |
goto out; |
|
650 |
}
|
|
651 |
||
652 |
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
|
653 |
no_wait); |
|
654 |
if (ret) { |
|
655 |
if (ret != -ERESTARTSYS) |
|
656 |
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); |
|
657 |
spin_lock(&glob->lru_lock); |
|
658 |
if (evict_mem.mm_node) { |
|
659 |
evict_mem.mm_node->private = NULL; |
|
660 |
drm_mm_put_block(evict_mem.mm_node); |
|
661 |
evict_mem.mm_node = NULL; |
|
662 |
}
|
|
663 |
spin_unlock(&glob->lru_lock); |
|
664 |
goto out; |
|
665 |
}
|
|
666 |
bo->evicted = true; |
|
667 |
out: |
|
668 |
return ret; |
|
669 |
}
|
|
670 |
||
671 |
static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
|
672 |
uint32_t mem_type, |
|
673 |
bool interruptible, bool no_wait) |
|
674 |
{
|
|
675 |
struct ttm_bo_global *glob = bdev->glob; |
|
676 |
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
|
677 |
struct ttm_buffer_object *bo; |
|
678 |
int ret, put_count = 0; |
|
679 |
||
680 |
retry: |
|
681 |
spin_lock(&glob->lru_lock); |
|
682 |
if (list_empty(&man->lru)) { |
|
683 |
spin_unlock(&glob->lru_lock); |
|
684 |
return -EBUSY; |
|
685 |
}
|
|
686 |
||
687 |
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); |
|
688 |
kref_get(&bo->list_kref); |
|
689 |
||
690 |
ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
|
691 |
||
692 |
if (unlikely(ret == -EBUSY)) { |
|
693 |
spin_unlock(&glob->lru_lock); |
|
694 |
if (likely(!no_wait)) |
|
695 |
ret = ttm_bo_wait_unreserved(bo, interruptible); |
|
696 |
||
697 |
kref_put(&bo->list_kref, ttm_bo_release_list); |
|
698 |
||
699 |
/**
|
|
700 |
* We *need* to retry after releasing the lru lock.
|
|
701 |
*/
|
|
702 |
||
703 |
if (unlikely(ret != 0)) |
|
704 |
return ret; |
|
705 |
goto retry; |
|
706 |
}
|
|
707 |
||
708 |
put_count = ttm_bo_del_from_lru(bo); |
|
709 |
spin_unlock(&glob->lru_lock); |
|
710 |
||
711 |
BUG_ON(ret != 0); |
|
712 |
||
713 |
while (put_count--) |
|
714 |
kref_put(&bo->list_kref, ttm_bo_ref_bug); |
|
715 |
||
716 |
ret = ttm_bo_evict(bo, interruptible, no_wait); |
|
717 |
ttm_bo_unreserve(bo); |
|
718 |
||
719 |
kref_put(&bo->list_kref, ttm_bo_release_list); |
|
720 |
return ret; |
|
721 |
}
|
|
722 |
||
723 |
static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, |
|
724 |
struct ttm_mem_type_manager *man, |
|
725 |
struct ttm_placement *placement, |
|
726 |
struct ttm_mem_reg *mem, |
|
727 |
struct drm_mm_node **node) |
|
728 |
{
|
|
729 |
struct ttm_bo_global *glob = bo->glob; |
|
730 |
unsigned long lpfn; |
|
731 |
int ret; |
|
732 |
||
733 |
lpfn = placement->lpfn; |
|
734 |
if (!lpfn) |
|
735 |
lpfn = man->size; |
|
736 |
*node = NULL; |
|
737 |
do { |
|
738 |
ret = drm_mm_pre_get(&man->manager); |
|
739 |
if (unlikely(ret)) |
|
740 |
return ret; |
|
741 |
||
742 |
spin_lock(&glob->lru_lock); |
|
743 |
*node = drm_mm_search_free_in_range(&man->manager, |
|
744 |
mem->num_pages, mem->page_alignment, |
|
745 |
placement->fpfn, lpfn, 1); |
|
746 |
if (unlikely(*node == NULL)) { |
|
747 |
spin_unlock(&glob->lru_lock); |
|
748 |
return 0; |
|
749 |
}
|
|
750 |
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages, |
|
751 |
mem->page_alignment, |
|
752 |
placement->fpfn, |
|
753 |
lpfn); |
|
754 |
spin_unlock(&glob->lru_lock); |
|
755 |
} while (*node == NULL); |
|
756 |
return 0; |
|
757 |
}
|
|
758 |
||
759 |
/**
|
|
760 |
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
|
761 |
* space, or we've evicted everything and there isn't enough space.
|
|
762 |
*/
|
|
763 |
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
|
764 |
uint32_t mem_type, |
|
765 |
struct ttm_placement *placement, |
|
766 |
struct ttm_mem_reg *mem, |
|
767 |
bool interruptible, bool no_wait) |
|
768 |
{
|
|
769 |
struct ttm_bo_device *bdev = bo->bdev; |
|
770 |
struct ttm_bo_global *glob = bdev->glob; |
|
771 |
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
|
772 |
struct drm_mm_node *node; |
|
773 |
int ret; |
|
774 |
||
775 |
do { |
|
776 |
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); |
|
777 |
if (unlikely(ret != 0)) |
|
778 |
return ret; |
|
779 |
if (node) |
|
780 |
break; |
|
781 |
spin_lock(&glob->lru_lock); |
|
782 |
if (list_empty(&man->lru)) { |
|
783 |
spin_unlock(&glob->lru_lock); |
|
784 |
break; |
|
785 |
}
|
|
786 |
spin_unlock(&glob->lru_lock); |
|
787 |
ret = ttm_mem_evict_first(bdev, mem_type, interruptible, |
|
788 |
no_wait); |
|
789 |
if (unlikely(ret != 0)) |
|
790 |
return ret; |
|
791 |
} while (1); |
|
792 |
if (node == NULL) |
|
793 |
return -ENOMEM; |
|
794 |
mem->mm_node = node; |
|
795 |
mem->mem_type = mem_type; |
|
796 |
return 0; |
|
797 |
}
|
|
798 |
||
799 |
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
|
800 |
uint32_t cur_placement, |
|
801 |
uint32_t proposed_placement) |
|
802 |
{
|
|
803 |
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
|
804 |
uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
|
805 |
||
806 |
/**
|
|
807 |
* Keep current caching if possible.
|
|
808 |
*/
|
|
809 |
||
810 |
if ((cur_placement & caching) != 0) |
|
811 |
result |= (cur_placement & caching); |
|
812 |
else if ((man->default_caching & caching) != 0) |
|
813 |
result |= man->default_caching; |
|
814 |
else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
|
815 |
result |= TTM_PL_FLAG_CACHED; |
|
816 |
else if ((TTM_PL_FLAG_WC & caching) != 0) |
|
817 |
result |= TTM_PL_FLAG_WC; |
|
818 |
else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
|
819 |
result |= TTM_PL_FLAG_UNCACHED; |
|
820 |
||
821 |
return result; |
|
822 |
}
|
|
823 |
||
824 |
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
|
825 |
bool disallow_fixed, |
|
826 |
uint32_t mem_type, |
|
827 |
uint32_t proposed_placement, |
|
828 |
uint32_t *masked_placement) |
|
829 |
{
|
|
830 |
uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
|
831 |
||
832 |
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) |
|
833 |
return false; |
|
834 |
||
835 |
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
|
836 |
return false; |
|
837 |
||
838 |
if ((proposed_placement & man->available_caching) == 0) |
|
839 |
return false; |
|
840 |
||
841 |
cur_flags |= (proposed_placement & man->available_caching); |
|
842 |
||
843 |
*masked_placement = cur_flags; |
|
844 |
return true; |
|
845 |
}
|
|
846 |
||
847 |
/**
|
|
848 |
* Creates space for memory region @mem according to its type.
|
|
849 |
*
|
|
850 |
* This function first searches for free space in compatible memory types in
|
|
851 |
* the priority order defined by the driver. If free space isn't found, then
|
|
852 |
* ttm_bo_mem_force_space is attempted in priority order to evict and find
|
|
853 |
* space.
|
|
854 |
*/
|
|
855 |
int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
|
856 |
struct ttm_placement *placement, |
|
857 |
struct ttm_mem_reg *mem, |
|
858 |
bool interruptible, bool no_wait) |
|
859 |
{
|
|
860 |
struct ttm_bo_device *bdev = bo->bdev; |
|
861 |
struct ttm_mem_type_manager *man; |
|
862 |
uint32_t mem_type = TTM_PL_SYSTEM; |
|
863 |
uint32_t cur_flags = 0; |
|
864 |
bool type_found = false; |
|
865 |
bool type_ok = false; |
|
866 |
bool has_erestartsys = false; |
|
867 |
struct drm_mm_node *node = NULL; |
|
868 |
int i, ret; |
|
869 |
||
870 |
mem->mm_node = NULL; |
|
871 |
for (i = 0; i < placement->num_placement; ++i) { |
|
872 |
ret = ttm_mem_type_from_flags(placement->placement[i], |
|
873 |
&mem_type); |
|
874 |
if (ret) |
|
875 |
return ret; |
|
876 |
man = &bdev->man[mem_type]; |
|
877 |
||
878 |
type_ok = ttm_bo_mt_compatible(man, |
|
879 |
bo->type == ttm_bo_type_user, |
|
880 |
mem_type, |
|
881 |
placement->placement[i], |
|
882 |
&cur_flags); |
|
883 |
||
884 |
if (!type_ok) |
|
885 |
continue; |
|
886 |
||
887 |
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
|
888 |
cur_flags); |
|
889 |
/*
|
|
890 |
* Use the access and other non-mapping-related flag bits from
|
|
891 |
* the memory placement flags to the current flags
|
|
892 |
*/
|
|
893 |
ttm_flag_masked(&cur_flags, placement->placement[i], |
|
894 |
~TTM_PL_MASK_MEMTYPE); |
|
895 |
||
896 |
if (mem_type == TTM_PL_SYSTEM) |
|
897 |
break; |
|
898 |
||
899 |
if (man->has_type && man->use_type) { |
|
900 |
type_found = true; |
|
901 |
ret = ttm_bo_man_get_node(bo, man, placement, mem, |
|
902 |
&node); |
|
903 |
if (unlikely(ret)) |
|
904 |
return ret; |
|
905 |
}
|
|
906 |
if (node) |
|
907 |
break; |
|
908 |
}
|
|
909 |
||
910 |
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { |
|
911 |
mem->mm_node = node; |
|
912 |
mem->mem_type = mem_type; |
|
913 |
mem->placement = cur_flags; |
|
914 |
if (node) |
|
915 |
node->private = bo; |
|
916 |
return 0; |
|
917 |
}
|
|
918 |
||
919 |
if (!type_found) |
|
920 |
return -EINVAL; |
|
921 |
||
922 |
for (i = 0; i < placement->num_busy_placement; ++i) { |
|
923 |
ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
|
924 |
&mem_type); |
|
925 |
if (ret) |
|
926 |
return ret; |
|
927 |
man = &bdev->man[mem_type]; |
|
928 |
if (!man->has_type) |
|
929 |
continue; |
|
930 |
if (!ttm_bo_mt_compatible(man, |
|
931 |
bo->type == ttm_bo_type_user, |
|
932 |
mem_type, |
|
933 |
placement->busy_placement[i], |
|
934 |
&cur_flags)) |
|
935 |
continue; |
|
936 |
||
937 |
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
|
938 |
cur_flags); |
|
939 |
/*
|
|
940 |
* Use the access and other non-mapping-related flag bits from
|
|
941 |
* the memory placement flags to the current flags
|
|
942 |
*/
|
|
943 |
ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
|
944 |
~TTM_PL_MASK_MEMTYPE); |
|
945 |
||
946 |
||
947 |
if (mem_type == TTM_PL_SYSTEM) { |
|
948 |
mem->mem_type = mem_type; |
|
949 |
mem->placement = cur_flags; |
|
950 |
mem->mm_node = NULL; |
|
951 |
return 0; |
|
952 |
}
|
|
953 |
||
954 |
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
|
955 |
interruptible, no_wait); |
|
956 |
if (ret == 0 && mem->mm_node) { |
|
957 |
mem->placement = cur_flags; |
|
958 |
mem->mm_node->private = bo; |
|
959 |
return 0; |
|
960 |
}
|
|
961 |
if (ret == -ERESTARTSYS) |
|
962 |
has_erestartsys = true; |
|
963 |
}
|
|
964 |
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
|
965 |
return ret; |
|
966 |
}
|
|
967 |
EXPORT_SYMBOL(ttm_bo_mem_space); |
|
968 |
||
969 |
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) |
|
970 |
{
|
|
971 |
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) |
|
972 |
return -EBUSY; |
|
973 |
||
974 |
return wait_event_interruptible(bo->event_queue, |
|
975 |
atomic_read(&bo->cpu_writers) == 0); |
|
976 |
}
|
|
977 |
EXPORT_SYMBOL(ttm_bo_wait_cpu); |
|
978 |
||
979 |
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
|
980 |
struct ttm_placement *placement, |
|
981 |
bool interruptible, bool no_wait) |
|
982 |
{
|
|
983 |
struct ttm_bo_global *glob = bo->glob; |
|
984 |
int ret = 0; |
|
985 |
struct ttm_mem_reg mem; |
|
986 |
||
987 |
BUG_ON(!atomic_read(&bo->reserved)); |
|
988 |
||
989 |
/*
|
|
990 |
* FIXME: It's possible to pipeline buffer moves.
|
|
991 |
* Have the driver move function wait for idle when necessary,
|
|
992 |
* instead of doing it here.
|
|
993 |
*/
|
|
994 |
spin_lock(&bo->lock); |
|
995 |
ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
|
996 |
spin_unlock(&bo->lock); |
|
997 |
if (ret) |
|
998 |
return ret; |
|
999 |
mem.num_pages = bo->num_pages; |
|
1000 |
mem.size = mem.num_pages << PAGE_SHIFT; |
|
1001 |
mem.page_alignment = bo->mem.page_alignment; |
|
1002 |
/*
|
|
1003 |
* Determine where to move the buffer.
|
|
1004 |
*/
|
|
1005 |
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); |
|
1006 |
if (ret) |
|
1007 |
goto out_unlock; |
|
1008 |
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); |
|
1009 |
out_unlock: |
|
1010 |
if (ret && mem.mm_node) { |
|
1011 |
spin_lock(&glob->lru_lock); |
|
1012 |
mem.mm_node->private = NULL; |
|
1013 |
drm_mm_put_block(mem.mm_node); |
|
1014 |
spin_unlock(&glob->lru_lock); |
|
1015 |
}
|
|
1016 |
return ret; |
|
1017 |
}
|
|
1018 |
||
1019 |
static int ttm_bo_mem_compat(struct ttm_placement *placement, |
|
1020 |
struct ttm_mem_reg *mem) |
|
1021 |
{
|
|
1022 |
int i; |
|
1023 |
struct drm_mm_node *node = mem->mm_node; |
|
1024 |
||
1025 |
if (node && placement->lpfn != 0 && |
|
1026 |
(node->start < placement->fpfn || |
|
1027 |
node->start + node->size > placement->lpfn)) |
|
1028 |
return -1; |
|
1029 |
||
1030 |
for (i = 0; i < placement->num_placement; i++) { |
|
1031 |
if ((placement->placement[i] & mem->placement & |
|
1032 |
TTM_PL_MASK_CACHING) && |
|
1033 |
(placement->placement[i] & mem->placement & |
|
1034 |
TTM_PL_MASK_MEM)) |
|
1035 |
return i; |
|
1036 |
}
|
|
1037 |
return -1; |
|
1038 |
}
|
|
1039 |
||
1040 |
int ttm_bo_validate(struct ttm_buffer_object *bo, |
|
1041 |
struct ttm_placement *placement, |
|
1042 |
bool interruptible, bool no_wait) |
|
1043 |
{
|
|
1044 |
int ret; |
|
1045 |
||
1046 |
BUG_ON(!atomic_read(&bo->reserved)); |
|
1047 |
/* Check that range is valid */
|
|
1048 |
if (placement->lpfn || placement->fpfn) |
|
1049 |
if (placement->fpfn > placement->lpfn || |
|
1050 |
(placement->lpfn - placement->fpfn) < bo->num_pages) |
|
1051 |
return -EINVAL; |
|
1052 |
/*
|
|
1053 |
* Check whether we need to move buffer.
|
|
1054 |
*/
|
|
1055 |
ret = ttm_bo_mem_compat(placement, &bo->mem); |
|
1056 |
if (ret < 0) { |
|
1057 |
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); |
|
1058 |
if (ret) |
|
1059 |
return ret; |
|
1060 |
} else { |
|
1061 |
/*
|
|
1062 |
* Use the access and other non-mapping-related flag bits from
|
|
1063 |
* the compatible memory placement flags to the active flags
|
|
1064 |
*/
|
|
1065 |
ttm_flag_masked(&bo->mem.placement, placement->placement[ret], |
|
1066 |
~TTM_PL_MASK_MEMTYPE); |
|
1067 |
}
|
|
1068 |
/*
|
|
1069 |
* We might need to add a TTM.
|
|
1070 |
*/
|
|
1071 |
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
|
1072 |
ret = ttm_bo_add_ttm(bo, true); |
|
1073 |
if (ret) |
|
1074 |
return ret; |
|
1075 |
}
|
|
1076 |
return 0; |
|
1077 |
}
|
|
1078 |
EXPORT_SYMBOL(ttm_bo_validate); |
|
1079 |
||
1080 |
int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
|
1081 |
struct ttm_placement *placement) |
|
1082 |
{
|
|
1083 |
int i; |
|
1084 |
||
1085 |
if (placement->fpfn || placement->lpfn) { |
|
1086 |
if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { |
|
1087 |
printk(KERN_ERR TTM_PFX "Page number range to small " |
|
1088 |
"Need %lu pages, range is [%u, %u]\n", |
|
1089 |
bo->mem.num_pages, placement->fpfn, |
|
1090 |
placement->lpfn); |
|
1091 |
return -EINVAL; |
|
1092 |
}
|
|
1093 |
}
|
|
1094 |
for (i = 0; i < placement->num_placement; i++) { |
|
1095 |
if (!capable(CAP_SYS_ADMIN)) { |
|
1096 |
if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { |
|
1097 |
printk(KERN_ERR TTM_PFX "Need to be root to " |
|
1098 |
"modify NO_EVICT status.\n"); |
|
1099 |
return -EINVAL; |
|
1100 |
}
|
|
1101 |
}
|
|
1102 |
}
|
|
1103 |
for (i = 0; i < placement->num_busy_placement; i++) { |
|
1104 |
if (!capable(CAP_SYS_ADMIN)) { |
|
1105 |
if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { |
|
1106 |
printk(KERN_ERR TTM_PFX "Need to be root to " |
|
1107 |
"modify NO_EVICT status.\n"); |
|
1108 |
return -EINVAL; |
|
1109 |
}
|
|
1110 |
}
|
|
1111 |
}
|
|
1112 |
return 0; |
|
1113 |
}
|
|
1114 |
||
1115 |
int ttm_bo_init(struct ttm_bo_device *bdev, |
|
1116 |
struct ttm_buffer_object *bo, |
|
1117 |
unsigned long size, |
|
1118 |
enum ttm_bo_type type, |
|
1119 |
struct ttm_placement *placement, |
|
1120 |
uint32_t page_alignment, |
|
1121 |
unsigned long buffer_start, |
|
1122 |
bool interruptible, |
|
1123 |
struct file *persistant_swap_storage, |
|
1124 |
size_t acc_size, |
|
1125 |
void (*destroy) (struct ttm_buffer_object *)) |
|
1126 |
{
|
|
1127 |
int ret = 0; |
|
1128 |
unsigned long num_pages; |
|
1129 |
||
1130 |
size += buffer_start & ~PAGE_MASK; |
|
1131 |
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
|
1132 |
if (num_pages == 0) { |
|
1133 |
printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); |
|
1134 |
return -EINVAL; |
|
1135 |
}
|
|
1136 |
bo->destroy = destroy; |
|
1137 |
||
1138 |
spin_lock_init(&bo->lock); |
|
1139 |
kref_init(&bo->kref); |
|
1140 |
kref_init(&bo->list_kref); |
|
1141 |
atomic_set(&bo->cpu_writers, 0); |
|
1142 |
atomic_set(&bo->reserved, 1); |
|
1143 |
init_waitqueue_head(&bo->event_queue); |
|
1144 |
INIT_LIST_HEAD(&bo->lru); |
|
1145 |
INIT_LIST_HEAD(&bo->ddestroy); |
|
1146 |
INIT_LIST_HEAD(&bo->swap); |
|
1147 |
bo->bdev = bdev; |
|
1148 |
bo->glob = bdev->glob; |
|
1149 |
bo->type = type; |
|
1150 |
bo->num_pages = num_pages; |
|
1151 |
bo->mem.size = num_pages << PAGE_SHIFT; |
|
1152 |
bo->mem.mem_type = TTM_PL_SYSTEM; |
|
1153 |
bo->mem.num_pages = bo->num_pages; |
|
1154 |
bo->mem.mm_node = NULL; |
|
1155 |
bo->mem.page_alignment = page_alignment; |
|
1156 |
bo->buffer_start = buffer_start & PAGE_MASK; |
|
1157 |
bo->priv_flags = 0; |
|
1158 |
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
|
1159 |
bo->seq_valid = false; |
|
1160 |
bo->persistant_swap_storage = persistant_swap_storage; |
|
1161 |
bo->acc_size = acc_size; |
|
1162 |
atomic_inc(&bo->glob->bo_count); |
|
1163 |
||
1164 |
ret = ttm_bo_check_placement(bo, placement); |
|
1165 |
if (unlikely(ret != 0)) |
|
1166 |
goto out_err; |
|
1167 |
||
1168 |
/*
|
|
1169 |
* For ttm_bo_type_device buffers, allocate
|
|
1170 |
* address space from the device.
|
|
1171 |
*/
|
|
1172 |
if (bo->type == ttm_bo_type_device) { |
|
1173 |
ret = ttm_bo_setup_vm(bo); |
|
1174 |
if (ret) |
|
1175 |
goto out_err; |
|
1176 |
}
|
|
1177 |
||
1178 |
ret = ttm_bo_validate(bo, placement, interruptible, false); |
|
1179 |
if (ret) |
|
1180 |
goto out_err; |
|
1181 |
||
1182 |
ttm_bo_unreserve(bo); |
|
1183 |
return 0; |
|
1184 |
||
1185 |
out_err: |
|
1186 |
ttm_bo_unreserve(bo); |
|
1187 |
ttm_bo_unref(&bo); |
|
1188 |
||
1189 |
return ret; |
|
1190 |
}
|
|
1191 |
EXPORT_SYMBOL(ttm_bo_init); |
|
1192 |
||
1193 |
static inline size_t ttm_bo_size(struct ttm_bo_global *glob, |
|
1194 |
unsigned long num_pages) |
|
1195 |
{
|
|
1196 |
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & |
|
1197 |
PAGE_MASK; |
|
1198 |
||
1199 |
return glob->ttm_bo_size + 2 * page_array_size; |
|
1200 |
}
|
|
1201 |
||
1202 |
int ttm_bo_create(struct ttm_bo_device *bdev, |
|
1203 |
unsigned long size, |
|
1204 |
enum ttm_bo_type type, |
|
1205 |
struct ttm_placement *placement, |
|
1206 |
uint32_t page_alignment, |
|
1207 |
unsigned long buffer_start, |
|
1208 |
bool interruptible, |
|
1209 |
struct file *persistant_swap_storage, |
|
1210 |
struct ttm_buffer_object **p_bo) |
|
1211 |
{
|
|
1212 |
struct ttm_buffer_object *bo; |
|
1213 |
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
|
1214 |
int ret; |
|
1215 |
||
1216 |
size_t acc_size = |
|
1217 |
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); |
|
1218 |
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); |
|
1219 |
if (unlikely(ret != 0)) |
|
1220 |
return ret; |
|
1221 |
||
1222 |
bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
|
1223 |
||
1224 |
if (unlikely(bo == NULL)) { |
|
1225 |
ttm_mem_global_free(mem_glob, acc_size); |
|
1226 |
return -ENOMEM; |
|
1227 |
}
|
|
1228 |
||
1229 |
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
|
1230 |
buffer_start, interruptible, |
|
1231 |
persistant_swap_storage, acc_size, NULL); |
|
1232 |
if (likely(ret == 0)) |
|
1233 |
*p_bo = bo; |
|
1234 |
||
1235 |
return ret; |
|
1236 |
}
|
|
1237 |
||
1238 |
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
|
1239 |
unsigned mem_type, bool allow_errors) |
|
1240 |
{
|
|
1241 |
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
|
1242 |
struct ttm_bo_global *glob = bdev->glob; |
|
1243 |
int ret; |
|
1244 |
||
1245 |
/*
|
|
1246 |
* Can't use standard list traversal since we're unlocking.
|
|
1247 |
*/
|
|
1248 |
||
1249 |
spin_lock(&glob->lru_lock); |
|
1250 |
while (!list_empty(&man->lru)) { |
|
1251 |
spin_unlock(&glob->lru_lock); |
|
1252 |
ret = ttm_mem_evict_first(bdev, mem_type, false, false); |
|
1253 |
if (ret) { |
|
1254 |
if (allow_errors) { |
|
1255 |
return ret; |
|
1256 |
} else { |
|
1257 |
printk(KERN_ERR TTM_PFX |
|
1258 |
"Cleanup eviction failed\n"); |
|
1259 |
}
|
|
1260 |
}
|
|
1261 |
spin_lock(&glob->lru_lock); |
|
1262 |
}
|
|
1263 |
spin_unlock(&glob->lru_lock); |
|
1264 |
return 0; |
|
1265 |
}
|
|
1266 |
||
1267 |
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
|
1268 |
{
|
|
1269 |
struct ttm_bo_global *glob = bdev->glob; |
|
1270 |
struct ttm_mem_type_manager *man; |
|
1271 |
int ret = -EINVAL; |
|
1272 |
||
1273 |
if (mem_type >= TTM_NUM_MEM_TYPES) { |
|
1274 |
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); |
|
1275 |
return ret; |
|
1276 |
}
|
|
1277 |
man = &bdev->man[mem_type]; |
|
1278 |
||
1279 |
if (!man->has_type) { |
|
1280 |
printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " |
|
1281 |
"memory manager type %u\n", mem_type); |
|
1282 |
return ret; |
|
1283 |
}
|
|
1284 |
||
1285 |
man->use_type = false; |
|
1286 |
man->has_type = false; |
|
1287 |
||
1288 |
ret = 0; |
|
1289 |
if (mem_type > 0) { |
|
1290 |
ttm_bo_force_list_clean(bdev, mem_type, false); |
|
1291 |
||
1292 |
spin_lock(&glob->lru_lock); |
|
1293 |
if (drm_mm_clean(&man->manager)) |
|
1294 |
drm_mm_takedown(&man->manager); |
|
1295 |
else
|
|
1296 |
ret = -EBUSY; |
|
1297 |
||
1298 |
spin_unlock(&glob->lru_lock); |
|
1299 |
}
|
|
1300 |
||
1301 |
return ret; |
|
1302 |
}
|
|
1303 |
EXPORT_SYMBOL(ttm_bo_clean_mm); |
|
1304 |
||
1305 |
int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
|
1306 |
{
|
|
1307 |
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
|
1308 |
||
1309 |
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { |
|
1310 |
printk(KERN_ERR TTM_PFX |
|
1311 |
"Illegal memory manager memory type %u.\n", |
|
1312 |
mem_type); |
|
1313 |
return -EINVAL; |
|
1314 |
}
|
|
1315 |
||
1316 |
if (!man->has_type) { |
|
1317 |
printk(KERN_ERR TTM_PFX |
|
1318 |
"Memory type %u has not been initialized.\n", |
|
1319 |
mem_type); |
|
1320 |
return 0; |
|
1321 |
}
|
|
1322 |
||
1323 |
return ttm_bo_force_list_clean(bdev, mem_type, true); |
|
1324 |
}
|
|
1325 |
EXPORT_SYMBOL(ttm_bo_evict_mm); |
|
1326 |
||
1327 |
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
|
1328 |
unsigned long p_size) |
|
1329 |
{
|
|
1330 |
int ret = -EINVAL; |
|
1331 |
struct ttm_mem_type_manager *man; |
|
1332 |
||
1333 |
if (type >= TTM_NUM_MEM_TYPES) { |
|
1334 |
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); |
|
1335 |
return ret; |
|
1336 |
}
|
|
1337 |
||
1338 |
man = &bdev->man[type]; |
|
1339 |
if (man->has_type) { |
|
1340 |
printk(KERN_ERR TTM_PFX |
|
1341 |
"Memory manager already initialized for type %d\n", |
|
1342 |
type); |
|
1343 |
return ret; |
|
1344 |
}
|
|
1345 |
||
1346 |
ret = bdev->driver->init_mem_type(bdev, type, man); |
|
1347 |
if (ret) |
|
1348 |
return ret; |
|
1349 |
||
1350 |
ret = 0; |
|
1351 |
if (type != TTM_PL_SYSTEM) { |
|
1352 |
if (!p_size) { |
|
1353 |
printk(KERN_ERR TTM_PFX |
|
1354 |
"Zero size memory manager type %d\n", |
|
1355 |
type); |
|
1356 |
return ret; |
|
1357 |
}
|
|
1358 |
ret = drm_mm_init(&man->manager, 0, p_size); |
|
1359 |
if (ret) |
|
1360 |
return ret; |
|
1361 |
}
|
|
1362 |
man->has_type = true; |
|
1363 |
man->use_type = true; |
|
1364 |
man->size = p_size; |
|
1365 |
||
1366 |
INIT_LIST_HEAD(&man->lru); |
|
1367 |
||
1368 |
return 0; |
|
1369 |
}
|
|
1370 |
EXPORT_SYMBOL(ttm_bo_init_mm); |
|
1371 |
||
1372 |
static void ttm_bo_global_kobj_release(struct kobject *kobj) |
|
1373 |
{
|
|
1374 |
struct ttm_bo_global *glob = |
|
1375 |
container_of(kobj, struct ttm_bo_global, kobj); |
|
1376 |
||
1377 |
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); |
|
1378 |
__free_page(glob->dummy_read_page); |
|
1379 |
kfree(glob); |
|
1380 |
}
|
|
1381 |
||
1382 |
void ttm_bo_global_release(struct ttm_global_reference *ref) |
|
1383 |
{
|
|
1384 |
struct ttm_bo_global *glob = ref->object; |
|
1385 |
||
1386 |
kobject_del(&glob->kobj); |
|
1387 |
kobject_put(&glob->kobj); |
|
1388 |
}
|
|
1389 |
EXPORT_SYMBOL(ttm_bo_global_release); |
|
1390 |
||
1391 |
int ttm_bo_global_init(struct ttm_global_reference *ref) |
|
1392 |
{
|
|
1393 |
struct ttm_bo_global_ref *bo_ref = |
|
1394 |
container_of(ref, struct ttm_bo_global_ref, ref); |
|
1395 |
struct ttm_bo_global *glob = ref->object; |
|
1396 |
int ret; |
|
1397 |
||
1398 |
mutex_init(&glob->device_list_mutex); |
|
1399 |
spin_lock_init(&glob->lru_lock); |
|
1400 |
glob->mem_glob = bo_ref->mem_glob; |
|
1401 |
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
|
1402 |
||
1403 |
if (unlikely(glob->dummy_read_page == NULL)) { |
|
1404 |
ret = -ENOMEM; |
|
1405 |
goto out_no_drp; |
|
1406 |
}
|
|
1407 |
||
1408 |
INIT_LIST_HEAD(&glob->swap_lru); |
|
1409 |
INIT_LIST_HEAD(&glob->device_list); |
|
1410 |
||
1411 |
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); |
|
1412 |
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); |
|
1413 |
if (unlikely(ret != 0)) { |
|
1414 |
printk(KERN_ERR TTM_PFX |
|
1415 |
"Could not register buffer object swapout.\n"); |
|
1416 |
goto out_no_shrink; |
|
1417 |
}
|
|
1418 |
||
1419 |
glob->ttm_bo_extra_size = |
|
1420 |
ttm_round_pot(sizeof(struct ttm_tt)) + |
|
1421 |
ttm_round_pot(sizeof(struct ttm_backend)); |
|
1422 |
||
1423 |
glob->ttm_bo_size = glob->ttm_bo_extra_size + |
|
1424 |
ttm_round_pot(sizeof(struct ttm_buffer_object)); |
|
1425 |
||
1426 |
atomic_set(&glob->bo_count, 0); |
|
1427 |
||
1428 |
kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); |
|
1429 |
ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); |
|
1430 |
if (unlikely(ret != 0)) |
|
1431 |
kobject_put(&glob->kobj); |
|
1432 |
return ret; |
|
1433 |
out_no_shrink: |
|
1434 |
__free_page(glob->dummy_read_page); |
|
1435 |
out_no_drp: |
|
1436 |
kfree(glob); |
|
1437 |
return ret; |
|
1438 |
}
|
|
1439 |
EXPORT_SYMBOL(ttm_bo_global_init); |
|
1440 |
||
1441 |
||
1442 |
int ttm_bo_device_release(struct ttm_bo_device *bdev) |
|
1443 |
{
|
|
1444 |
int ret = 0; |
|
1445 |
unsigned i = TTM_NUM_MEM_TYPES; |
|
1446 |
struct ttm_mem_type_manager *man; |
|
1447 |
struct ttm_bo_global *glob = bdev->glob; |
|
1448 |
||
1449 |
while (i--) { |
|
1450 |
man = &bdev->man[i]; |
|
1451 |
if (man->has_type) { |
|
1452 |
man->use_type = false; |
|
1453 |
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { |
|
1454 |
ret = -EBUSY; |
|
1455 |
printk(KERN_ERR TTM_PFX |
|
1456 |
"DRM memory manager type %d "
|
|
1457 |
"is not clean.\n", i); |
|
1458 |
}
|
|
1459 |
man->has_type = false; |
|
1460 |
}
|
|
1461 |
}
|
|
1462 |
||
1463 |
mutex_lock(&glob->device_list_mutex); |
|
1464 |
list_del(&bdev->device_list); |
|
1465 |
mutex_unlock(&glob->device_list_mutex); |
|
1466 |
||
1467 |
if (!cancel_delayed_work(&bdev->wq)) |
|
1468 |
flush_scheduled_work(); |
|
1469 |
||
1470 |
while (ttm_bo_delayed_delete(bdev, true)) |
|
1471 |
;
|
|
1472 |
||
1473 |
spin_lock(&glob->lru_lock); |
|
1474 |
if (list_empty(&bdev->ddestroy)) |
|
1475 |
TTM_DEBUG("Delayed destroy list was clean\n"); |
|
1476 |
||
1477 |
if (list_empty(&bdev->man[0].lru)) |
|
1478 |
TTM_DEBUG("Swap list was clean\n"); |
|
1479 |
spin_unlock(&glob->lru_lock); |
|
1480 |
||
1481 |
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); |
|
1482 |
write_lock(&bdev->vm_lock); |
|
1483 |
drm_mm_takedown(&bdev->addr_space_mm); |
|
1484 |
write_unlock(&bdev->vm_lock); |
|
1485 |
||
1486 |
return ret; |
|
1487 |
}
|
|
1488 |
EXPORT_SYMBOL(ttm_bo_device_release); |
|
1489 |
||
1490 |
int ttm_bo_device_init(struct ttm_bo_device *bdev, |
|
1491 |
struct ttm_bo_global *glob, |
|
1492 |
struct ttm_bo_driver *driver, |
|
1493 |
uint64_t file_page_offset, |
|
1494 |
bool need_dma32) |
|
1495 |
{
|
|
1496 |
int ret = -EINVAL; |
|
1497 |
||
1498 |
rwlock_init(&bdev->vm_lock); |
|
1499 |
bdev->driver = driver; |
|
1500 |
||
1501 |
memset(bdev->man, 0, sizeof(bdev->man)); |
|
1502 |
||
1503 |
/*
|
|
1504 |
* Initialize the system memory buffer type.
|
|
1505 |
* Other types need to be driver / IOCTL initialized.
|
|
1506 |
*/
|
|
1507 |
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
|
1508 |
if (unlikely(ret != 0)) |
|
1509 |
goto out_no_sys; |
|
1510 |
||
1511 |
bdev->addr_space_rb = RB_ROOT; |
|
1512 |
ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); |
|
1513 |
if (unlikely(ret != 0)) |
|
1514 |
goto out_no_addr_mm; |
|
1515 |
||
1516 |
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
|
1517 |
bdev->nice_mode = true; |
|
1518 |
INIT_LIST_HEAD(&bdev->ddestroy); |
|
1519 |
bdev->dev_mapping = NULL; |
|
1520 |
bdev->glob = glob; |
|
1521 |
bdev->need_dma32 = need_dma32; |
|
1522 |
||
1523 |
mutex_lock(&glob->device_list_mutex); |
|
1524 |
list_add_tail(&bdev->device_list, &glob->device_list); |
|
1525 |
mutex_unlock(&glob->device_list_mutex); |
|
1526 |
||
1527 |
return 0; |
|
1528 |
out_no_addr_mm: |
|
1529 |
ttm_bo_clean_mm(bdev, 0); |
|
1530 |
out_no_sys: |
|
1531 |
return ret; |
|
1532 |
}
|
|
1533 |
EXPORT_SYMBOL(ttm_bo_device_init); |
|
1534 |
||
1535 |
/*
|
|
1536 |
* buffer object vm functions.
|
|
1537 |
*/
|
|
1538 |
||
1539 |
bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
|
1540 |
{
|
|
1541 |
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
|
1542 |
||
1543 |
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
|
1544 |
if (mem->mem_type == TTM_PL_SYSTEM) |
|
1545 |
return false; |
|
1546 |
||
1547 |
if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
|
1548 |
return false; |
|
1549 |
||
1550 |
if (mem->placement & TTM_PL_FLAG_CACHED) |
|
1551 |
return false; |
|
1552 |
}
|
|
1553 |
return true; |
|
1554 |
}
|
|
1555 |
||
1556 |
int ttm_bo_pci_offset(struct ttm_bo_device *bdev, |
|
1557 |
struct ttm_mem_reg *mem, |
|
1558 |
unsigned long *bus_base, |
|
1559 |
unsigned long *bus_offset, unsigned long *bus_size) |
|
1560 |
{
|
|
1561 |
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
|
1562 |
||
1563 |
*bus_size = 0; |
|
1564 |
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
|
1565 |
return -EINVAL; |
|
1566 |
||
1567 |
if (ttm_mem_reg_is_pci(bdev, mem)) { |
|
1568 |
*bus_offset = mem->mm_node->start << PAGE_SHIFT; |
|
1569 |
*bus_size = mem->num_pages << PAGE_SHIFT; |
|
1570 |
*bus_base = man->io_offset; |
|
1571 |
}
|
|
1572 |
||
1573 |
return 0; |
|
1574 |
}
|
|
1575 |
||
1576 |
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
|
1577 |
{
|
|
1578 |
struct ttm_bo_device *bdev = bo->bdev; |
|
1579 |
loff_t offset = (loff_t) bo->addr_space_offset; |
|
1580 |
loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; |
|
1581 |
||
1582 |
if (!bdev->dev_mapping) |
|
1583 |
return; |
|
1584 |
||
1585 |
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
|
1586 |
}
|
|
1587 |
EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
|
1588 |
||
1589 |
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
|
1590 |
{
|
|
1591 |
struct ttm_bo_device *bdev = bo->bdev; |
|
1592 |
struct rb_node **cur = &bdev->addr_space_rb.rb_node; |
|
1593 |
struct rb_node *parent = NULL; |
|
1594 |
struct ttm_buffer_object *cur_bo; |
|
1595 |
unsigned long offset = bo->vm_node->start; |
|
1596 |
unsigned long cur_offset; |
|
1597 |
||
1598 |
while (*cur) { |
|
1599 |
parent = *cur; |
|
1600 |
cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); |
|
1601 |
cur_offset = cur_bo->vm_node->start; |
|
1602 |
if (offset < cur_offset) |
|
1603 |
cur = &parent->rb_left; |
|
1604 |
else if (offset > cur_offset) |
|
1605 |
cur = &parent->rb_right; |
|
1606 |
else
|
|
1607 |
BUG(); |
|
1608 |
}
|
|
1609 |
||
1610 |
rb_link_node(&bo->vm_rb, parent, cur); |
|
1611 |
rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); |
|
1612 |
}
|
|
1613 |
||
1614 |
/**
|
|
1615 |
* ttm_bo_setup_vm:
|
|
1616 |
*
|
|
1617 |
* @bo: the buffer to allocate address space for
|
|
1618 |
*
|
|
1619 |
* Allocate address space in the drm device so that applications
|
|
1620 |
* can mmap the buffer and access the contents. This only
|
|
1621 |
* applies to ttm_bo_type_device objects as others are not
|
|
1622 |
* placed in the drm device address space.
|
|
1623 |
*/
|
|
1624 |
||
1625 |
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) |
|
1626 |
{
|
|
1627 |
struct ttm_bo_device *bdev = bo->bdev; |
|
1628 |
int ret; |
|
1629 |
||
1630 |
retry_pre_get: |
|
1631 |
ret = drm_mm_pre_get(&bdev->addr_space_mm); |
|
1632 |
if (unlikely(ret != 0)) |
|
1633 |
return ret; |
|
1634 |
||
1635 |
write_lock(&bdev->vm_lock); |
|
1636 |
bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, |
|
1637 |
bo->mem.num_pages, 0, 0); |
|
1638 |
||
1639 |
if (unlikely(bo->vm_node == NULL)) { |
|
1640 |
ret = -ENOMEM; |
|
1641 |
goto out_unlock; |
|
1642 |
}
|
|
1643 |
||
1644 |
bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, |
|
1645 |
bo->mem.num_pages, 0); |
|
1646 |
||
1647 |
if (unlikely(bo->vm_node == NULL)) { |
|
1648 |
write_unlock(&bdev->vm_lock); |
|
1649 |
goto retry_pre_get; |
|
1650 |
}
|
|
1651 |
||
1652 |
ttm_bo_vm_insert_rb(bo); |
|
1653 |
write_unlock(&bdev->vm_lock); |
|
1654 |
bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; |
|
1655 |
||
1656 |
return 0; |
|
1657 |
out_unlock: |
|
1658 |
write_unlock(&bdev->vm_lock); |
|
1659 |
return ret; |
|
1660 |
}
|
|
1661 |
||
1662 |
int ttm_bo_wait(struct ttm_buffer_object *bo, |
|
1663 |
bool lazy, bool interruptible, bool no_wait) |
|
1664 |
{
|
|
1665 |
struct ttm_bo_driver *driver = bo->bdev->driver; |
|
1666 |
void *sync_obj; |
|
1667 |
void *sync_obj_arg; |
|
1668 |
int ret = 0; |
|
1669 |
||
1670 |
if (likely(bo->sync_obj == NULL)) |
|
1671 |
return 0; |
|
1672 |
||
1673 |
while (bo->sync_obj) { |
|
1674 |
||
1675 |
if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { |
|
1676 |
void *tmp_obj = bo->sync_obj; |
|
1677 |
bo->sync_obj = NULL; |
|
1678 |
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
|
1679 |
spin_unlock(&bo->lock); |
|
1680 |
driver->sync_obj_unref(&tmp_obj); |
|
1681 |
spin_lock(&bo->lock); |
|
1682 |
continue; |
|
1683 |
}
|
|
1684 |
||
1685 |
if (no_wait) |
|
1686 |
return -EBUSY; |
|
1687 |
||
1688 |
sync_obj = driver->sync_obj_ref(bo->sync_obj); |
|
1689 |
sync_obj_arg = bo->sync_obj_arg; |
|
1690 |
spin_unlock(&bo->lock); |
|
1691 |
ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, |
|
1692 |
lazy, interruptible); |
|
1693 |
if (unlikely(ret != 0)) { |
|
1694 |
driver->sync_obj_unref(&sync_obj); |
|
1695 |
spin_lock(&bo->lock); |
|
1696 |
return ret; |
|
1697 |
}
|
|
1698 |
spin_lock(&bo->lock); |
|
1699 |
if (likely(bo->sync_obj == sync_obj && |
|
1700 |
bo->sync_obj_arg == sync_obj_arg)) { |
|
1701 |
void *tmp_obj = bo->sync_obj; |
|
1702 |
bo->sync_obj = NULL; |
|
1703 |
clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
|
1704 |
&bo->priv_flags); |
|
1705 |
spin_unlock(&bo->lock); |
|
1706 |
driver->sync_obj_unref(&sync_obj); |
|
1707 |
driver->sync_obj_unref(&tmp_obj); |
|
1708 |
spin_lock(&bo->lock); |
|
1709 |
} else { |
|
1710 |
spin_unlock(&bo->lock); |
|
1711 |
driver->sync_obj_unref(&sync_obj); |
|
1712 |
spin_lock(&bo->lock); |
|
1713 |
}
|
|
1714 |
}
|
|
1715 |
return 0; |
|
1716 |
}
|
|
1717 |
EXPORT_SYMBOL(ttm_bo_wait); |
|
1718 |
||
1719 |
void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) |
|
1720 |
{
|
|
1721 |
atomic_set(&bo->reserved, 0); |
|
1722 |
wake_up_all(&bo->event_queue); |
|
1723 |
}
|
|
1724 |
||
1725 |
int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, |
|
1726 |
bool no_wait) |
|
1727 |
{
|
|
1728 |
int ret; |
|
1729 |
||
1730 |
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { |
|
1731 |
if (no_wait) |
|
1732 |
return -EBUSY; |
|
1733 |
else if (interruptible) { |
|
1734 |
ret = wait_event_interruptible |
|
1735 |
(bo->event_queue, atomic_read(&bo->reserved) == 0); |
|
1736 |
if (unlikely(ret != 0)) |
|
1737 |
return ret; |
|
1738 |
} else { |
|
1739 |
wait_event(bo->event_queue, |
|
1740 |
atomic_read(&bo->reserved) == 0); |
|
1741 |
}
|
|
1742 |
}
|
|
1743 |
return 0; |
|
1744 |
}
|
|
1745 |
||
1746 |
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
|
1747 |
{
|
|
1748 |
int ret = 0; |
|
1749 |
||
1750 |
/*
|
|
1751 |
* Using ttm_bo_reserve instead of ttm_bo_block_reservation
|
|
1752 |
* makes sure the lru lists are updated.
|
|
1753 |
*/
|
|
1754 |
||
1755 |
ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
|
1756 |
if (unlikely(ret != 0)) |
|
1757 |
return ret; |
|
1758 |
spin_lock(&bo->lock); |
|
1759 |
ret = ttm_bo_wait(bo, false, true, no_wait); |
|
1760 |
spin_unlock(&bo->lock); |
|
1761 |
if (likely(ret == 0)) |
|
1762 |
atomic_inc(&bo->cpu_writers); |
|
1763 |
ttm_bo_unreserve(bo); |
|
1764 |
return ret; |
|
1765 |
}
|
|
1766 |
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
|
1767 |
||
1768 |
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
|
1769 |
{
|
|
1770 |
if (atomic_dec_and_test(&bo->cpu_writers)) |
|
1771 |
wake_up_all(&bo->event_queue); |
|
1772 |
}
|
|
1773 |
EXPORT_SYMBOL(ttm_bo_synccpu_write_release); |
|
1774 |
||
1775 |
/**
|
|
1776 |
* A buffer object shrink method that tries to swap out the first
|
|
1777 |
* buffer object on the bo_global::swap_lru list.
|
|
1778 |
*/
|
|
1779 |
||
1780 |
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) |
|
1781 |
{
|
|
1782 |
struct ttm_bo_global *glob = |
|
1783 |
container_of(shrink, struct ttm_bo_global, shrink); |
|
1784 |
struct ttm_buffer_object *bo; |
|
1785 |
int ret = -EBUSY; |
|
1786 |
int put_count; |
|
1787 |
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); |
|
1788 |
||
1789 |
spin_lock(&glob->lru_lock); |
|
1790 |
while (ret == -EBUSY) { |
|
1791 |
if (unlikely(list_empty(&glob->swap_lru))) { |
|
1792 |
spin_unlock(&glob->lru_lock); |
|
1793 |
return -EBUSY; |
|
1794 |
}
|
|
1795 |
||
1796 |
bo = list_first_entry(&glob->swap_lru, |
|
1797 |
struct ttm_buffer_object, swap); |
|
1798 |
kref_get(&bo->list_kref); |
|
1799 |
||
1800 |
/**
|
|
1801 |
* Reserve buffer. Since we unlock while sleeping, we need
|
|
1802 |
* to re-check that nobody removed us from the swap-list while
|
|
1803 |
* we slept.
|
|
1804 |
*/
|
|
1805 |
||
1806 |
ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
|
1807 |
if (unlikely(ret == -EBUSY)) { |
|
1808 |
spin_unlock(&glob->lru_lock); |
|
1809 |
ttm_bo_wait_unreserved(bo, false); |
|
1810 |
kref_put(&bo->list_kref, ttm_bo_release_list); |
|
1811 |
spin_lock(&glob->lru_lock); |
|
1812 |
}
|
|
1813 |
}
|
|
1814 |
||
1815 |
BUG_ON(ret != 0); |
|
1816 |
put_count = ttm_bo_del_from_lru(bo); |
|
1817 |
spin_unlock(&glob->lru_lock); |
|
1818 |
||
1819 |
while (put_count--) |
|
1820 |
kref_put(&bo->list_kref, ttm_bo_ref_bug); |
|
1821 |
||
1822 |
/**
|
|
1823 |
* Wait for GPU, then move to system cached.
|
|
1824 |
*/
|
|
1825 |
||
1826 |
spin_lock(&bo->lock); |
|
1827 |
ret = ttm_bo_wait(bo, false, false, false); |
|
1828 |
spin_unlock(&bo->lock); |
|
1829 |
||
1830 |
if (unlikely(ret != 0)) |
|
1831 |
goto out; |
|
1832 |
||
1833 |
if ((bo->mem.placement & swap_placement) != swap_placement) { |
|
1834 |
struct ttm_mem_reg evict_mem; |
|
1835 |
||
1836 |
evict_mem = bo->mem; |
|
1837 |
evict_mem.mm_node = NULL; |
|
1838 |
evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; |
|
1839 |
evict_mem.mem_type = TTM_PL_SYSTEM; |
|
1840 |
||
1841 |
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, |
|
1842 |
false, false); |
|
1843 |
if (unlikely(ret != 0)) |
|
1844 |
goto out; |
|
1845 |
}
|
|
1846 |
||
1847 |
ttm_bo_unmap_virtual(bo); |
|
1848 |
||
1849 |
/**
|
|
1850 |
* Swap out. Buffer will be swapped in again as soon as
|
|
1851 |
* anyone tries to access a ttm page.
|
|
1852 |
*/
|
|
1853 |
||
1854 |
if (bo->bdev->driver->swap_notify) |
|
1855 |
bo->bdev->driver->swap_notify(bo); |
|
1856 |
||
1857 |
ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); |
|
1858 |
out: |
|
1859 |
||
1860 |
/**
|
|
1861 |
*
|
|
1862 |
* Unreserve without putting on LRU to avoid swapping out an
|
|
1863 |
* already swapped buffer.
|
|
1864 |
*/
|
|
1865 |
||
1866 |
atomic_set(&bo->reserved, 0); |
|
1867 |
wake_up_all(&bo->event_queue); |
|
1868 |
kref_put(&bo->list_kref, ttm_bo_release_list); |
|
1869 |
return ret; |
|
1870 |
}
|
|
1871 |
||
1872 |
void ttm_bo_swapout_all(struct ttm_bo_device *bdev) |
|
1873 |
{
|
|
1874 |
while (ttm_bo_swapout(&bdev->glob->shrink) == 0) |
|
1875 |
;
|
|
1876 |
}
|
|
1877 |
EXPORT_SYMBOL(ttm_bo_swapout_all); |