114
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
116
struct fenced_buffer_list *fenced_list = fenced_buf->list;
169
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
171
static enum pipe_error
172
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
173
struct fenced_buffer *fenced_buf);
176
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
178
static enum pipe_error
179
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
180
struct fenced_buffer *fenced_buf,
183
static enum pipe_error
184
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
186
static enum pipe_error
187
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
191
* Dump the fenced buffer list.
193
* Useful to understand failures to allocate buffers.
196
fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
199
struct pb_fence_ops *ops = fenced_mgr->ops;
200
struct list_head *curr, *next;
201
struct fenced_buffer *fenced_buf;
203
debug_printf("%10s %7s %8s %7s %10s %s\n",
204
"buffer", "size", "refcount", "storage", "fence", "signalled");
206
curr = fenced_mgr->unfenced.next;
208
while(curr != &fenced_mgr->unfenced) {
209
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
210
assert(!fenced_buf->fence);
211
debug_printf("%10p %7u %8u %7s\n",
213
fenced_buf->base.base.size,
214
p_atomic_read(&fenced_buf->base.base.reference.count),
215
fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
220
curr = fenced_mgr->fenced.next;
222
while(curr != &fenced_mgr->fenced) {
224
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
225
assert(fenced_buf->buffer);
226
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
227
debug_printf("%10p %7u %8u %7s %10p %s\n",
229
fenced_buf->base.base.size,
230
p_atomic_read(&fenced_buf->base.base.reference.count),
232
(void *) fenced_buf->fence,
233
signaled == 0 ? "y" : "n");
244
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
245
struct fenced_buffer *fenced_buf)
247
assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
249
assert(!fenced_buf->fence);
250
assert(fenced_buf->head.prev);
251
assert(fenced_buf->head.next);
252
LIST_DEL(&fenced_buf->head);
253
assert(fenced_mgr->num_unfenced);
254
--fenced_mgr->num_unfenced;
256
fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
257
fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
264
* Add the buffer to the fenced list.
266
* Reference count should be incremented before calling this function.
269
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
270
struct fenced_buffer *fenced_buf)
118
272
assert(pipe_is_referenced(&fenced_buf->base.base.reference));
119
273
assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
120
274
assert(fenced_buf->fence);
276
p_atomic_inc(&fenced_buf->base.base.reference.count);
123
278
LIST_DEL(&fenced_buf->head);
124
assert(fenced_list->numUnfenced);
125
--fenced_list->numUnfenced;
127
LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
128
++fenced_list->numDelayed;
279
assert(fenced_mgr->num_unfenced);
280
--fenced_mgr->num_unfenced;
281
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
282
++fenced_mgr->num_fenced;
133
* Actually destroy the buffer.
287
* Remove the buffer from the fenced list, and potentially destroy the buffer
288
* if the reference count reaches zero.
290
* Returns TRUE if the buffer was detroyed.
136
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
138
struct fenced_buffer_list *fenced_list = fenced_buf->list;
140
assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
141
assert(!fenced_buf->fence);
143
assert(fenced_buf->head.prev);
144
assert(fenced_buf->head.next);
145
LIST_DEL(&fenced_buf->head);
146
assert(fenced_list->numUnfenced);
147
--fenced_list->numUnfenced;
151
pb_reference(&fenced_buf->buffer, NULL);
157
_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
158
struct fenced_buffer *fenced_buf)
160
struct pb_fence_ops *ops = fenced_list->ops;
292
static INLINE boolean
293
fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
294
struct fenced_buffer *fenced_buf)
296
struct pb_fence_ops *ops = fenced_mgr->ops;
162
298
assert(fenced_buf->fence);
163
assert(fenced_buf->list == fenced_list);
299
assert(fenced_buf->mgr == fenced_mgr);
165
301
ops->fence_reference(ops, &fenced_buf->fence, NULL);
166
302
fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
168
304
assert(fenced_buf->head.prev);
169
305
assert(fenced_buf->head.next);
171
307
LIST_DEL(&fenced_buf->head);
172
assert(fenced_list->numDelayed);
173
--fenced_list->numDelayed;
176
LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
177
++fenced_list->numUnfenced;
184
if(!pipe_is_referenced(&fenced_buf->base.base.reference))
185
_fenced_buffer_destroy(fenced_buf);
308
assert(fenced_mgr->num_fenced);
309
--fenced_mgr->num_fenced;
311
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
312
++fenced_mgr->num_unfenced;
314
if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
315
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
324
* Wait for the fence to expire, and remove it from the fenced list.
326
* This function will release and re-aquire the mutex, so any copy of mutable
327
* state must be discarded after calling it.
189
329
static INLINE enum pipe_error
190
_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
330
fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
331
struct fenced_buffer *fenced_buf)
192
struct fenced_buffer_list *fenced_list = fenced_buf->list;
193
struct pb_fence_ops *ops = fenced_list->ops;
333
struct pb_fence_ops *ops = fenced_mgr->ops;
334
enum pipe_error ret = PIPE_ERROR;
196
337
debug_warning("waiting for GPU");
340
assert(pipe_is_referenced(&fenced_buf->base.base.reference));
199
341
assert(fenced_buf->fence);
200
343
if(fenced_buf->fence) {
201
if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
344
struct pipe_fence_handle *fence = NULL;
348
ops->fence_reference(ops, &fence, fenced_buf->fence);
350
pipe_mutex_unlock(fenced_mgr->mutex);
352
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
354
pipe_mutex_lock(fenced_mgr->mutex);
356
assert(pipe_is_referenced(&fenced_buf->base.base.reference));
359
* Only proceed if the fence object didn't change in the meanwhile.
360
* Otherwise assume the work has been already carried out by another
361
* thread that re-aquired the lock before us.
363
proceed = fence == fenced_buf->fence ? TRUE : FALSE;
365
ops->fence_reference(ops, &fence, NULL);
367
if(proceed && finished == 0) {
369
* Remove from the fenced list
374
destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
376
/* TODO: remove consequents buffers with the same fence? */
380
fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
204
/* Remove from the fenced list */
205
/* TODO: remove consequents */
206
_fenced_buffer_remove(fenced_list, fenced_buf);
209
fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
215
* Free as many fenced buffers from the list head as possible.
391
* Remove as many fenced buffers from the fenced list as possible.
393
* Returns TRUE if at least one buffer was removed.
218
_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
396
fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
221
struct pb_fence_ops *ops = fenced_list->ops;
399
struct pb_fence_ops *ops = fenced_mgr->ops;
222
400
struct list_head *curr, *next;
223
401
struct fenced_buffer *fenced_buf;
224
402
struct pipe_fence_handle *prev_fence = NULL;
226
curr = fenced_list->delayed.next;
405
curr = fenced_mgr->fenced.next;
227
406
next = curr->next;
228
while(curr != &fenced_list->delayed) {
407
while(curr != &fenced_mgr->fenced) {
229
408
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
231
410
if(fenced_buf->fence != prev_fence) {
234
414
signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
417
* Don't return just now. Instead preemptively check if the
418
* following buffers' fences already expired, without further waits.
236
423
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
239
430
prev_fence = fenced_buf->fence;
433
/* This buffer's fence object is identical to the previous buffer's
434
* fence object, so no need to check the fence again.
242
436
assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
245
_fenced_buffer_remove(fenced_list, fenced_buf);
439
fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
452
* Try to free some GPU memory by backing it up into CPU memory.
454
* Returns TRUE if at least one buffer was freed.
457
fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
459
struct list_head *curr, *next;
460
struct fenced_buffer *fenced_buf;
462
curr = fenced_mgr->unfenced.next;
464
while(curr != &fenced_mgr->unfenced) {
465
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
468
* We can only move storage if the buffer is not mapped and not
471
if(fenced_buf->buffer &&
472
!fenced_buf->mapcount &&
476
ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
478
ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
480
fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
483
fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
496
* Destroy CPU storage for this buffer.
499
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
501
if(fenced_buf->data) {
502
align_free(fenced_buf->data);
503
fenced_buf->data = NULL;
504
assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
505
fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
511
* Create CPU storage for this buffer.
513
static enum pipe_error
514
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
515
struct fenced_buffer *fenced_buf)
517
assert(!fenced_buf->data);
521
if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
522
return PIPE_ERROR_OUT_OF_MEMORY;
524
fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
525
if(!fenced_buf->data)
526
return PIPE_ERROR_OUT_OF_MEMORY;
528
fenced_mgr->cpu_total_size += fenced_buf->size;
535
* Destroy the GPU storage.
538
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
540
if(fenced_buf->buffer) {
541
pb_reference(&fenced_buf->buffer, NULL);
547
* Try to create GPU storage for this buffer.
549
* This function is a shorthand around pb_manager::create_buffer for
550
* fenced_buffer_create_gpu_storage_locked()'s benefit.
552
static INLINE boolean
553
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
554
struct fenced_buffer *fenced_buf)
556
struct pb_manager *provider = fenced_mgr->provider;
558
assert(!fenced_buf->buffer);
560
fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
563
return fenced_buf->buffer ? TRUE : FALSE;
568
* Create GPU storage for this buffer.
570
static enum pipe_error
571
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
572
struct fenced_buffer *fenced_buf,
575
assert(!fenced_buf->buffer);
578
* Check for signaled buffers before trying to allocate.
580
fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
582
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
585
* Keep trying while there is some sort of progress:
586
* - fences are expiring,
587
* - or buffers are being being swapped out from GPU memory into CPU memory.
589
while(!fenced_buf->buffer &&
590
(fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
591
fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
592
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
595
if(!fenced_buf->buffer && wait) {
597
* Same as before, but this time around, wait to free buffers if
600
while(!fenced_buf->buffer &&
601
(fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
602
fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
603
fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
607
if(!fenced_buf->buffer) {
609
fenced_manager_dump_locked(fenced_mgr);
612
return PIPE_ERROR_OUT_OF_MEMORY;
619
static enum pipe_error
620
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
624
assert(fenced_buf->data);
625
assert(fenced_buf->buffer);
627
map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
631
memcpy(map, fenced_buf->data, fenced_buf->size);
633
pb_unmap(fenced_buf->buffer);
639
static enum pipe_error
640
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
644
assert(fenced_buf->data);
645
assert(fenced_buf->buffer);
647
map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
651
memcpy(fenced_buf->data, map, fenced_buf->size);
653
pb_unmap(fenced_buf->buffer);
254
660
fenced_buffer_destroy(struct pb_buffer *buf)
256
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
257
struct fenced_buffer_list *fenced_list = fenced_buf->list;
662
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
663
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
259
pipe_mutex_lock(fenced_list->mutex);
260
665
assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
261
if (fenced_buf->fence) {
262
struct pb_fence_ops *ops = fenced_list->ops;
263
if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
264
struct list_head *curr, *prev;
265
curr = &fenced_buf->head;
268
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
269
assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
270
_fenced_buffer_remove(fenced_list, fenced_buf);
273
} while (curr != &fenced_list->delayed);
276
/* delay destruction */
280
_fenced_buffer_destroy(fenced_buf);
282
pipe_mutex_unlock(fenced_list->mutex);
667
pipe_mutex_lock(fenced_mgr->mutex);
669
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
671
pipe_mutex_unlock(fenced_mgr->mutex);
287
fenced_buffer_map(struct pb_buffer *buf,
676
fenced_buffer_map(struct pb_buffer *buf,
290
679
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
291
struct fenced_buffer_list *fenced_list = fenced_buf->list;
292
struct pb_fence_ops *ops = fenced_list->ops;
680
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
681
struct pb_fence_ops *ops = fenced_mgr->ops;
684
pipe_mutex_lock(fenced_mgr->mutex);
295
686
assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
297
/* Serialize writes */
298
if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
299
((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
300
if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
301
/* Don't wait for the GPU to finish writing */
302
if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
303
_fenced_buffer_remove(fenced_list, fenced_buf);
308
/* Wait for the GPU to finish writing */
309
_fenced_buffer_finish(fenced_buf);
314
/* Check for CPU write access (read is OK) */
315
if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
316
/* this is legal -- just for debugging */
317
debug_warning("concurrent CPU writes");
321
map = pb_map(fenced_buf->buffer, flags);
691
while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
692
((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
693
(flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
696
* Don't wait for the GPU to finish accessing it, if blocking is forbidden.
698
if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
699
ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
703
if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
708
* Wait for the GPU to finish accessing. This will release and re-acquire
709
* the mutex, so all copies of mutable state must be discarded.
711
fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
714
if(fenced_buf->buffer) {
715
map = pb_map(fenced_buf->buffer, flags);
718
assert(fenced_buf->data);
719
map = fenced_buf->data;
323
723
++fenced_buf->mapcount;
324
724
fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
728
pipe_mutex_unlock(fenced_mgr->mutex);
455
fenced_buffer_create(struct fenced_buffer_list *fenced_list,
456
struct pb_buffer *buffer)
458
struct fenced_buffer *buf;
463
buf = CALLOC_STRUCT(fenced_buffer);
465
pb_reference(&buffer, NULL);
469
pipe_reference_init(&buf->base.base.reference, 1);
470
buf->base.base.alignment = buffer->base.alignment;
471
buf->base.base.usage = buffer->base.usage;
472
buf->base.base.size = buffer->base.size;
474
buf->base.vtbl = &fenced_buffer_vtbl;
475
buf->buffer = buffer;
476
buf->list = fenced_list;
479
pipe_mutex_lock(fenced_list->mutex);
480
LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
481
++fenced_list->numUnfenced;
482
pipe_mutex_unlock(fenced_list->mutex);
489
struct fenced_buffer_list *
490
fenced_buffer_list_create(struct pb_fence_ops *ops)
492
struct fenced_buffer_list *fenced_list;
494
fenced_list = CALLOC_STRUCT(fenced_buffer_list);
498
fenced_list->ops = ops;
500
LIST_INITHEAD(&fenced_list->delayed);
501
fenced_list->numDelayed = 0;
504
LIST_INITHEAD(&fenced_list->unfenced);
505
fenced_list->numUnfenced = 0;
508
pipe_mutex_init(fenced_list->mutex);
515
fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
518
pipe_mutex_lock(fenced_list->mutex);
519
_fenced_buffer_list_check_free(fenced_list, wait);
520
pipe_mutex_unlock(fenced_list->mutex);
526
fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
528
struct pb_fence_ops *ops = fenced_list->ops;
529
struct list_head *curr, *next;
908
* Wrap a buffer in a fenced buffer.
910
static struct pb_buffer *
911
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
913
const struct pb_desc *desc)
915
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
530
916
struct fenced_buffer *fenced_buf;
532
pipe_mutex_lock(fenced_list->mutex);
534
debug_printf("%10s %7s %7s %10s %s\n",
535
"buffer", "size", "refcount", "fence", "signalled");
537
curr = fenced_list->unfenced.next;
539
while(curr != &fenced_list->unfenced) {
540
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
541
assert(!fenced_buf->fence);
542
debug_printf("%10p %7u %7u\n",
544
fenced_buf->base.base.size,
545
p_atomic_read(&fenced_buf->base.base.reference.count));
550
curr = fenced_list->delayed.next;
552
while(curr != &fenced_list->delayed) {
554
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
555
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
556
debug_printf("%10p %7u %7u %10p %s\n",
558
fenced_buf->base.base.size,
559
p_atomic_read(&fenced_buf->base.base.reference.count),
560
(void *) fenced_buf->fence,
561
signaled == 0 ? "y" : "n");
566
pipe_mutex_unlock(fenced_list->mutex);
572
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
574
pipe_mutex_lock(fenced_list->mutex);
920
* Don't stall the GPU, waste time evicting buffers, or waste memory
921
* trying to create a buffer that will most likely never fit into the
924
if(size > fenced_mgr->max_buffer_size) {
928
fenced_buf = CALLOC_STRUCT(fenced_buffer);
932
pipe_reference_init(&fenced_buf->base.base.reference, 1);
933
fenced_buf->base.base.alignment = desc->alignment;
934
fenced_buf->base.base.usage = desc->usage;
935
fenced_buf->base.base.size = size;
936
fenced_buf->size = size;
937
fenced_buf->desc = *desc;
939
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
940
fenced_buf->mgr = fenced_mgr;
942
pipe_mutex_lock(fenced_mgr->mutex);
945
* Try to create GPU storage without stalling,
947
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
950
* Attempt to use CPU memory to avoid stalling the GPU.
953
ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
957
* Create GPU storage, waiting for some to be available.
960
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
970
assert(fenced_buf->buffer || fenced_buf->data);
972
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
973
++fenced_mgr->num_unfenced;
974
pipe_mutex_unlock(fenced_mgr->mutex);
976
return &fenced_buf->base;
979
pipe_mutex_unlock(fenced_mgr->mutex);
987
fenced_bufmgr_flush(struct pb_manager *mgr)
989
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
991
pipe_mutex_lock(fenced_mgr->mutex);
992
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
994
pipe_mutex_unlock(fenced_mgr->mutex);
996
assert(fenced_mgr->provider->flush);
997
if(fenced_mgr->provider->flush)
998
fenced_mgr->provider->flush(fenced_mgr->provider);
1003
fenced_bufmgr_destroy(struct pb_manager *mgr)
1005
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
1007
pipe_mutex_lock(fenced_mgr->mutex);
576
1009
/* Wait on outstanding fences */
577
while (fenced_list->numDelayed) {
578
pipe_mutex_unlock(fenced_list->mutex);
1010
while (fenced_mgr->num_fenced) {
1011
pipe_mutex_unlock(fenced_mgr->mutex);
579
1012
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
582
_fenced_buffer_list_check_free(fenced_list, 1);
583
pipe_mutex_lock(fenced_list->mutex);
1015
pipe_mutex_lock(fenced_mgr->mutex);
1016
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
587
/*assert(!fenced_list->numUnfenced);*/
1021
/*assert(!fenced_mgr->num_unfenced);*/
590
pipe_mutex_unlock(fenced_list->mutex);
592
fenced_list->ops->destroy(fenced_list->ops);
1024
pipe_mutex_unlock(fenced_mgr->mutex);
1025
pipe_mutex_destroy(fenced_mgr->mutex);
1027
if(fenced_mgr->provider)
1028
fenced_mgr->provider->destroy(fenced_mgr->provider);
1030
fenced_mgr->ops->destroy(fenced_mgr->ops);
1037
fenced_bufmgr_create(struct pb_manager *provider,
1038
struct pb_fence_ops *ops,
1039
pb_size max_buffer_size,
1040
pb_size max_cpu_total_size)
1042
struct fenced_manager *fenced_mgr;
1047
fenced_mgr = CALLOC_STRUCT(fenced_manager);
1051
fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1052
fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1053
fenced_mgr->base.flush = fenced_bufmgr_flush;
1055
fenced_mgr->provider = provider;
1056
fenced_mgr->ops = ops;
1057
fenced_mgr->max_buffer_size = max_buffer_size;
1058
fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1060
LIST_INITHEAD(&fenced_mgr->fenced);
1061
fenced_mgr->num_fenced = 0;
1063
LIST_INITHEAD(&fenced_mgr->unfenced);
1064
fenced_mgr->num_unfenced = 0;
1066
pipe_mutex_init(fenced_mgr->mutex);
1068
return &fenced_mgr->base;