42
42
struct radeon_fence *fence;
43
43
struct radeon_ib *nib;
48
47
r = radeon_fence_create(rdev, &fence);
50
DRM_ERROR("failed to create fence for new IB\n");
49
dev_err(rdev->dev, "failed to create fence for new IB\n");
53
52
mutex_lock(&rdev->ib_pool.mutex);
54
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
55
if (i < RADEON_IB_POOL_SIZE) {
56
set_bit(i, rdev->ib_pool.alloc_bm);
57
rdev->ib_pool.ibs[i].length_dw = 0;
58
*ib = &rdev->ib_pool.ibs[i];
59
mutex_unlock(&rdev->ib_pool.mutex);
62
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
63
/* we go do nothings here */
64
mutex_unlock(&rdev->ib_pool.mutex);
65
DRM_ERROR("all IB allocated none scheduled.\n");
69
/* get the first ib on the scheduled list */
70
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
71
struct radeon_ib, list);
72
if (nib->fence == NULL) {
73
/* we go do nothings here */
74
mutex_unlock(&rdev->ib_pool.mutex);
75
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
79
mutex_unlock(&rdev->ib_pool.mutex);
81
r = radeon_fence_wait(nib->fence, false);
83
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
84
(unsigned long)nib->gpu_addr, nib->length_dw);
85
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
53
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
54
i &= (RADEON_IB_POOL_SIZE - 1);
55
if (rdev->ib_pool.ibs[i].free) {
56
nib = &rdev->ib_pool.ibs[i];
61
/* This should never happen, it means we allocated all
62
* IB and haven't scheduled one yet, return EBUSY to
63
* userspace hoping that on ioctl recall we get better
66
dev_err(rdev->dev, "no free indirect buffer !\n");
67
mutex_unlock(&rdev->ib_pool.mutex);
68
radeon_fence_unref(&fence);
71
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
74
mutex_unlock(&rdev->ib_pool.mutex);
75
r = radeon_fence_wait(nib->fence, false);
77
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79
mutex_lock(&rdev->ib_pool.mutex);
81
mutex_unlock(&rdev->ib_pool.mutex);
82
radeon_fence_unref(&fence);
85
mutex_lock(&rdev->ib_pool.mutex);
88
87
radeon_fence_unref(&nib->fence);
90
89
nib->length_dw = 0;
92
/* scheduled list is accessed here */
93
mutex_lock(&rdev->ib_pool.mutex);
95
INIT_LIST_HEAD(&nib->list);
96
90
mutex_unlock(&rdev->ib_pool.mutex);
101
radeon_fence_unref(&fence);
103
(*ib)->fence = fence;
108
95
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113
100
if (tmp == NULL) {
116
mutex_lock(&rdev->ib_pool.mutex);
117
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118
/* IB is scheduled & not signaled don't do anythings */
119
mutex_unlock(&rdev->ib_pool.mutex);
122
list_del(&tmp->list);
123
INIT_LIST_HEAD(&tmp->list);
103
if (!tmp->fence->emited)
125
104
radeon_fence_unref(&tmp->fence);
128
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
105
mutex_lock(&rdev->ib_pool.mutex);
129
107
mutex_unlock(&rdev->ib_pool.mutex);
164
143
if (rdev->ib_pool.robj)
166
145
/* Allocate 1M object buffer */
167
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168
r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169
true, RADEON_GEM_DOMAIN_GTT,
170
false, &rdev->ib_pool.robj);
146
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
147
true, RADEON_GEM_DOMAIN_GTT,
148
&rdev->ib_pool.robj);
172
150
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
175
r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
153
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
154
if (unlikely(r != 0))
156
r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
158
radeon_bo_unreserve(rdev->ib_pool.robj);
177
159
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
180
r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
162
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
163
radeon_bo_unreserve(rdev->ib_pool.robj);
182
165
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
190
173
rdev->ib_pool.ibs[i].ptr = ptr + offset;
191
174
rdev->ib_pool.ibs[i].idx = i;
192
175
rdev->ib_pool.ibs[i].length_dw = 0;
193
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
176
rdev->ib_pool.ibs[i].free = true;
195
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
178
rdev->ib_pool.head_id = 0;
196
179
rdev->ib_pool.ready = true;
197
180
DRM_INFO("radeon: ib pool ready.\n");
198
181
if (radeon_debugfs_ib_init(rdev)) {
204
187
void radeon_ib_pool_fini(struct radeon_device *rdev)
206
191
if (!rdev->ib_pool.ready) {
209
194
mutex_lock(&rdev->ib_pool.mutex);
210
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
211
195
if (rdev->ib_pool.robj) {
212
radeon_object_kunmap(rdev->ib_pool.robj);
213
radeon_object_unref(&rdev->ib_pool.robj);
196
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
197
if (likely(r == 0)) {
198
radeon_bo_kunmap(rdev->ib_pool.robj);
199
radeon_bo_unpin(rdev->ib_pool.robj);
200
radeon_bo_unreserve(rdev->ib_pool.robj);
202
radeon_bo_unref(&rdev->ib_pool.robj);
214
203
rdev->ib_pool.robj = NULL;
216
205
mutex_unlock(&rdev->ib_pool.mutex);
288
277
rdev->cp.ring_size = ring_size;
289
278
/* Allocate ring buffer */
290
279
if (rdev->cp.ring_obj == NULL) {
291
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
293
RADEON_GEM_DOMAIN_GTT,
297
DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
298
mutex_unlock(&rdev->cp.mutex);
301
r = radeon_object_pin(rdev->cp.ring_obj,
302
RADEON_GEM_DOMAIN_GTT,
305
DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
306
mutex_unlock(&rdev->cp.mutex);
309
r = radeon_object_kmap(rdev->cp.ring_obj,
280
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
281
RADEON_GEM_DOMAIN_GTT,
284
dev_err(rdev->dev, "(%d) ring create failed\n", r);
287
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
288
if (unlikely(r != 0))
290
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
293
radeon_bo_unreserve(rdev->cp.ring_obj);
294
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
297
r = radeon_bo_kmap(rdev->cp.ring_obj,
310
298
(void **)&rdev->cp.ring);
299
radeon_bo_unreserve(rdev->cp.ring_obj);
312
DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
313
mutex_unlock(&rdev->cp.mutex);
301
dev_err(rdev->dev, "(%d) ring map failed\n", r);
322
310
void radeon_ring_fini(struct radeon_device *rdev)
324
314
mutex_lock(&rdev->cp.mutex);
325
315
if (rdev->cp.ring_obj) {
326
radeon_object_kunmap(rdev->cp.ring_obj);
327
radeon_object_unpin(rdev->cp.ring_obj);
328
radeon_object_unref(&rdev->cp.ring_obj);
316
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
317
if (likely(r == 0)) {
318
radeon_bo_kunmap(rdev->cp.ring_obj);
319
radeon_bo_unpin(rdev->cp.ring_obj);
320
radeon_bo_unreserve(rdev->cp.ring_obj);
322
radeon_bo_unref(&rdev->cp.ring_obj);
329
323
rdev->cp.ring = NULL;
330
324
rdev->cp.ring_obj = NULL;