~ubuntu-branches/ubuntu/lucid/mesa/lucid

« back to all changes in this revision

Viewing changes to src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c

  • Committer: Bazaar Package Importer
  • Author(s): Timo Aaltonen, Timo Aaltonen, Robert Hooker
  • Date: 2010-02-19 15:52:12 UTC
  • mfrom: (3.1.10 experimental)
  • Revision ID: james.westby@ubuntu.com-20100219155212-blqov938av7m6exj
Tags: 7.7-3ubuntu1
[ Timo Aaltonen ]
* Merge from Debian experimental.

[ Robert Hooker ]
* Add 100_no_abi_tag.patch: Removes the ABI tag in /usr/lib/libGL.so.1
  which prevented ldconfig from using a libGL from another directory
  at a higher priority than the one in /usr/lib.
* Install libGL alternatives with libgl1-mesa-swx11 as well. (LP: #522048)

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/**************************************************************************
2
2
 *
3
 
 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
 
3
 * Copyright 2007-2010 VMware, Inc.
4
4
 * All Rights Reserved.
5
5
 *
6
6
 * Permission is hereby granted, free of charge, to any person obtaining a
18
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
19
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
20
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 
21
 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
22
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
23
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
24
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
28
/**
29
29
 * \file
30
30
 * Implementation of fenced buffers.
31
 
 * 
32
 
 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33
 
 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 
31
 *
 
32
 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
 
33
 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34
34
 */
35
35
 
36
36
 
50
50
 
51
51
#include "pb_buffer.h"
52
52
#include "pb_buffer_fenced.h"
 
53
#include "pb_bufmgr.h"
53
54
 
54
55
 
55
56
 
59
60
#define SUPER(__derived) (&(__derived)->base)
60
61
 
61
62
 
62
 
struct fenced_buffer_list
 
63
struct fenced_manager
63
64
{
 
65
   struct pb_manager base;
 
66
   struct pb_manager *provider;
 
67
   struct pb_fence_ops *ops;
 
68
 
 
69
   /**
 
70
    * Maximum buffer size that can be safely allocated.
 
71
    */
 
72
   pb_size max_buffer_size;
 
73
 
 
74
   /**
 
75
    * Maximum cpu memory we can allocate before we start waiting for the
 
76
    * GPU to idle.
 
77
    */
 
78
   pb_size max_cpu_total_size;
 
79
 
 
80
   /**
 
81
    * Following members are mutable and protected by this mutex.
 
82
    */
64
83
   pipe_mutex mutex;
65
 
   
66
 
   struct pb_fence_ops *ops;
67
 
   
68
 
   pb_size numDelayed;
69
 
   struct list_head delayed;
70
 
   
71
 
#ifdef DEBUG
72
 
   pb_size numUnfenced;
 
84
 
 
85
   /**
 
86
    * Fenced buffer list.
 
87
    *
 
88
    * All fenced buffers are placed in this listed, ordered from the oldest
 
89
    * fence to the newest fence.
 
90
    */
 
91
   struct list_head fenced;
 
92
   pb_size num_fenced;
 
93
 
73
94
   struct list_head unfenced;
74
 
#endif
 
95
   pb_size num_unfenced;
 
96
 
 
97
   /**
 
98
    * How much temporary CPU memory is being used to hold unvalidated buffers.
 
99
    */
 
100
   pb_size cpu_total_size;
75
101
};
76
102
 
77
103
 
78
104
/**
 
105
 * Fenced buffer.
 
106
 *
79
107
 * Wrapper around a pipe buffer which adds fencing and reference counting.
80
108
 */
81
109
struct fenced_buffer
82
110
{
 
111
   /*
 
112
    * Immutable members.
 
113
    */
 
114
 
83
115
   struct pb_buffer base;
84
 
   
 
116
   struct fenced_manager *mgr;
 
117
 
 
118
   /*
 
119
    * Following members are mutable and protected by fenced_manager::mutex.
 
120
    */
 
121
 
 
122
   struct list_head head;
 
123
 
 
124
   /**
 
125
    * Buffer with storage.
 
126
    */
85
127
   struct pb_buffer *buffer;
 
128
   pb_size size;
 
129
   struct pb_desc desc;
86
130
 
87
 
   /* FIXME: protect access with mutex */
 
131
   /**
 
132
    * Temporary CPU storage data. Used when there isn't enough GPU memory to
 
133
    * store the buffer.
 
134
    */
 
135
   void *data;
88
136
 
89
137
   /**
90
138
    * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
93
141
   unsigned flags;
94
142
 
95
143
   unsigned mapcount;
 
144
 
96
145
   struct pb_validate *vl;
97
146
   unsigned validation_flags;
 
147
 
98
148
   struct pipe_fence_handle *fence;
99
 
 
100
 
   struct list_head head;
101
 
   struct fenced_buffer_list *list;
102
149
};
103
150
 
104
151
 
 
152
static INLINE struct fenced_manager *
 
153
fenced_manager(struct pb_manager *mgr)
 
154
{
 
155
   assert(mgr);
 
156
   return (struct fenced_manager *)mgr;
 
157
}
 
158
 
 
159
 
105
160
static INLINE struct fenced_buffer *
106
161
fenced_buffer(struct pb_buffer *buf)
107
162
{
110
165
}
111
166
 
112
167
 
113
 
static INLINE void
114
 
_fenced_buffer_add(struct fenced_buffer *fenced_buf)
115
 
{
116
 
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
117
 
 
 
168
static void
 
169
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
 
170
 
 
171
static enum pipe_error
 
172
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
 
173
                                        struct fenced_buffer *fenced_buf);
 
174
 
 
175
static void
 
176
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
 
177
 
 
178
static enum pipe_error
 
179
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
 
180
                                        struct fenced_buffer *fenced_buf,
 
181
                                        boolean wait);
 
182
 
 
183
static enum pipe_error
 
184
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
 
185
 
 
186
static enum pipe_error
 
187
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
 
188
 
 
189
 
 
190
/**
 
191
 * Dump the fenced buffer list.
 
192
 *
 
193
 * Useful to understand failures to allocate buffers.
 
194
 */
 
195
static void
 
196
fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
 
197
{
 
198
#ifdef DEBUG
 
199
   struct pb_fence_ops *ops = fenced_mgr->ops;
 
200
   struct list_head *curr, *next;
 
201
   struct fenced_buffer *fenced_buf;
 
202
 
 
203
   debug_printf("%10s %7s %8s %7s %10s %s\n",
 
204
                "buffer", "size", "refcount", "storage", "fence", "signalled");
 
205
 
 
206
   curr = fenced_mgr->unfenced.next;
 
207
   next = curr->next;
 
208
   while(curr != &fenced_mgr->unfenced) {
 
209
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
 
210
      assert(!fenced_buf->fence);
 
211
      debug_printf("%10p %7u %8u %7s\n",
 
212
                   (void *) fenced_buf,
 
213
                   fenced_buf->base.base.size,
 
214
                   p_atomic_read(&fenced_buf->base.base.reference.count),
 
215
                   fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
 
216
      curr = next;
 
217
      next = curr->next;
 
218
   }
 
219
 
 
220
   curr = fenced_mgr->fenced.next;
 
221
   next = curr->next;
 
222
   while(curr != &fenced_mgr->fenced) {
 
223
      int signaled;
 
224
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
 
225
      assert(fenced_buf->buffer);
 
226
      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
 
227
      debug_printf("%10p %7u %8u %7s %10p %s\n",
 
228
                   (void *) fenced_buf,
 
229
                   fenced_buf->base.base.size,
 
230
                   p_atomic_read(&fenced_buf->base.base.reference.count),
 
231
                   "gpu",
 
232
                   (void *) fenced_buf->fence,
 
233
                   signaled == 0 ? "y" : "n");
 
234
      curr = next;
 
235
      next = curr->next;
 
236
   }
 
237
#else
 
238
   (void)fenced_mgr;
 
239
#endif
 
240
}
 
241
 
 
242
 
 
243
static INLINE void
 
244
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
 
245
                             struct fenced_buffer *fenced_buf)
 
246
{
 
247
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
 
248
 
 
249
   assert(!fenced_buf->fence);
 
250
   assert(fenced_buf->head.prev);
 
251
   assert(fenced_buf->head.next);
 
252
   LIST_DEL(&fenced_buf->head);
 
253
   assert(fenced_mgr->num_unfenced);
 
254
   --fenced_mgr->num_unfenced;
 
255
 
 
256
   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
 
257
   fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
 
258
 
 
259
   FREE(fenced_buf);
 
260
}
 
261
 
 
262
 
 
263
/**
 
264
 * Add the buffer to the fenced list.
 
265
 *
 
266
 * Reference count should be incremented before calling this function.
 
267
 */
 
268
static INLINE void
 
269
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
 
270
                         struct fenced_buffer *fenced_buf)
 
271
{
118
272
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
119
273
   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
120
274
   assert(fenced_buf->fence);
121
275
 
122
 
#ifdef DEBUG
 
276
   p_atomic_inc(&fenced_buf->base.base.reference.count);
 
277
 
123
278
   LIST_DEL(&fenced_buf->head);
124
 
   assert(fenced_list->numUnfenced);
125
 
   --fenced_list->numUnfenced;
126
 
#endif
127
 
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
128
 
   ++fenced_list->numDelayed;
 
279
   assert(fenced_mgr->num_unfenced);
 
280
   --fenced_mgr->num_unfenced;
 
281
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
 
282
   ++fenced_mgr->num_fenced;
129
283
}
130
284
 
131
285
 
132
286
/**
133
 
 * Actually destroy the buffer.
 
287
 * Remove the buffer from the fenced list, and potentially destroy the buffer
 
288
 * if the reference count reaches zero.
 
289
 *
 
290
 * Returns TRUE if the buffer was detroyed.
134
291
 */
135
 
static INLINE void
136
 
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
137
 
{
138
 
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
139
 
   
140
 
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
141
 
   assert(!fenced_buf->fence);
142
 
#ifdef DEBUG
143
 
   assert(fenced_buf->head.prev);
144
 
   assert(fenced_buf->head.next);
145
 
   LIST_DEL(&fenced_buf->head);
146
 
   assert(fenced_list->numUnfenced);
147
 
   --fenced_list->numUnfenced;
148
 
#else
149
 
   (void)fenced_list;
150
 
#endif
151
 
   pb_reference(&fenced_buf->buffer, NULL);
152
 
   FREE(fenced_buf);
153
 
}
154
 
 
155
 
 
156
 
static INLINE void
157
 
_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
158
 
                      struct fenced_buffer *fenced_buf)
159
 
{
160
 
   struct pb_fence_ops *ops = fenced_list->ops;
 
292
static INLINE boolean
 
293
fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
 
294
                            struct fenced_buffer *fenced_buf)
 
295
{
 
296
   struct pb_fence_ops *ops = fenced_mgr->ops;
161
297
 
162
298
   assert(fenced_buf->fence);
163
 
   assert(fenced_buf->list == fenced_list);
164
 
   
 
299
   assert(fenced_buf->mgr == fenced_mgr);
 
300
 
165
301
   ops->fence_reference(ops, &fenced_buf->fence, NULL);
166
302
   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
167
 
   
 
303
 
168
304
   assert(fenced_buf->head.prev);
169
305
   assert(fenced_buf->head.next);
170
 
   
 
306
 
171
307
   LIST_DEL(&fenced_buf->head);
172
 
   assert(fenced_list->numDelayed);
173
 
   --fenced_list->numDelayed;
174
 
   
175
 
#ifdef DEBUG
176
 
   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
177
 
   ++fenced_list->numUnfenced;
178
 
#endif
179
 
   
180
 
   /**
181
 
    * FIXME!!!
182
 
    */
183
 
 
184
 
   if(!pipe_is_referenced(&fenced_buf->base.base.reference))
185
 
      _fenced_buffer_destroy(fenced_buf);
 
308
   assert(fenced_mgr->num_fenced);
 
309
   --fenced_mgr->num_fenced;
 
310
 
 
311
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
 
312
   ++fenced_mgr->num_unfenced;
 
313
 
 
314
   if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
 
315
      fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
 
316
      return TRUE;
 
317
   }
 
318
 
 
319
   return FALSE;
186
320
}
187
321
 
188
322
 
 
323
/**
 
324
 * Wait for the fence to expire, and remove it from the fenced list.
 
325
 *
 
326
 * This function will release and re-aquire the mutex, so any copy of mutable
 
327
 * state must be discarded after calling it.
 
328
 */
189
329
static INLINE enum pipe_error
190
 
_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
 
330
fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
 
331
                            struct fenced_buffer *fenced_buf)
191
332
{
192
 
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
193
 
   struct pb_fence_ops *ops = fenced_list->ops;
 
333
   struct pb_fence_ops *ops = fenced_mgr->ops;
 
334
   enum pipe_error ret = PIPE_ERROR;
194
335
 
195
336
#if 0
196
337
   debug_warning("waiting for GPU");
197
338
#endif
198
339
 
 
340
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
199
341
   assert(fenced_buf->fence);
 
342
 
200
343
   if(fenced_buf->fence) {
201
 
      if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
202
 
         return PIPE_ERROR;
 
344
      struct pipe_fence_handle *fence = NULL;
 
345
      int finished;
 
346
      boolean proceed;
 
347
 
 
348
      ops->fence_reference(ops, &fence, fenced_buf->fence);
 
349
 
 
350
      pipe_mutex_unlock(fenced_mgr->mutex);
 
351
 
 
352
      finished = ops->fence_finish(ops, fenced_buf->fence, 0);
 
353
 
 
354
      pipe_mutex_lock(fenced_mgr->mutex);
 
355
 
 
356
      assert(pipe_is_referenced(&fenced_buf->base.base.reference));
 
357
 
 
358
      /*
 
359
       * Only proceed if the fence object didn't change in the meanwhile.
 
360
       * Otherwise assume the work has been already carried out by another
 
361
       * thread that re-aquired the lock before us.
 
362
       */
 
363
      proceed = fence == fenced_buf->fence ? TRUE : FALSE;
 
364
 
 
365
      ops->fence_reference(ops, &fence, NULL);
 
366
 
 
367
      if(proceed && finished == 0) {
 
368
         /*
 
369
          * Remove from the fenced list
 
370
          */
 
371
 
 
372
         boolean destroyed;
 
373
 
 
374
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
 
375
 
 
376
         /* TODO: remove consequents buffers with the same fence? */
 
377
 
 
378
         assert(!destroyed);
 
379
 
 
380
         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
 
381
 
 
382
         ret = PIPE_OK;
203
383
      }
204
 
      /* Remove from the fenced list */
205
 
      /* TODO: remove consequents */
206
 
      _fenced_buffer_remove(fenced_list, fenced_buf);
207
384
   }
208
385
 
209
 
   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
210
 
   return PIPE_OK;
 
386
   return ret;
211
387
}
212
388
 
213
389
 
214
390
/**
215
 
 * Free as many fenced buffers from the list head as possible. 
 
391
 * Remove as many fenced buffers from the fenced list as possible.
 
392
 *
 
393
 * Returns TRUE if at least one buffer was removed.
216
394
 */
217
 
static void
218
 
_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, 
219
 
                               int wait)
 
395
static boolean
 
396
fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
 
397
                                      boolean wait)
220
398
{
221
 
   struct pb_fence_ops *ops = fenced_list->ops;
 
399
   struct pb_fence_ops *ops = fenced_mgr->ops;
222
400
   struct list_head *curr, *next;
223
401
   struct fenced_buffer *fenced_buf;
224
402
   struct pipe_fence_handle *prev_fence = NULL;
 
403
   boolean ret = FALSE;
225
404
 
226
 
   curr = fenced_list->delayed.next;
 
405
   curr = fenced_mgr->fenced.next;
227
406
   next = curr->next;
228
 
   while(curr != &fenced_list->delayed) {
 
407
   while(curr != &fenced_mgr->fenced) {
229
408
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
230
409
 
231
410
      if(fenced_buf->fence != prev_fence) {
232
411
         int signaled;
233
 
         if (wait)
 
412
 
 
413
         if (wait) {
234
414
            signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
235
 
         else
 
415
 
 
416
            /*
 
417
             * Don't return just now. Instead preemptively check if the
 
418
             * following buffers' fences already expired, without further waits.
 
419
             */
 
420
            wait = FALSE;
 
421
         }
 
422
         else {
236
423
            signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
237
 
         if (signaled != 0)
238
 
            break;
 
424
         }
 
425
 
 
426
         if (signaled != 0) {
 
427
            return ret;
 
428
         }
 
429
 
239
430
         prev_fence = fenced_buf->fence;
240
431
      }
241
432
      else {
 
433
         /* This buffer's fence object is identical to the previous buffer's
 
434
          * fence object, so no need to check the fence again.
 
435
          */
242
436
         assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
243
437
      }
244
438
 
245
 
      _fenced_buffer_remove(fenced_list, fenced_buf);
246
 
 
247
 
      curr = next; 
248
 
      next = curr->next;
249
 
   }
 
439
      fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
 
440
 
 
441
      ret = TRUE;
 
442
 
 
443
      curr = next;
 
444
      next = curr->next;
 
445
   }
 
446
 
 
447
   return ret;
 
448
}
 
449
 
 
450
 
 
451
/**
 
452
 * Try to free some GPU memory by backing it up into CPU memory.
 
453
 *
 
454
 * Returns TRUE if at least one buffer was freed.
 
455
 */
 
456
static boolean
 
457
fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
 
458
{
 
459
   struct list_head *curr, *next;
 
460
   struct fenced_buffer *fenced_buf;
 
461
 
 
462
   curr = fenced_mgr->unfenced.next;
 
463
   next = curr->next;
 
464
   while(curr != &fenced_mgr->unfenced) {
 
465
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
 
466
 
 
467
      /*
 
468
       * We can only move storage if the buffer is not mapped and not
 
469
       * validated.
 
470
       */
 
471
      if(fenced_buf->buffer &&
 
472
         !fenced_buf->mapcount &&
 
473
         !fenced_buf->vl) {
 
474
         enum pipe_error ret;
 
475
 
 
476
         ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
 
477
         if(ret == PIPE_OK) {
 
478
            ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
 
479
            if(ret == PIPE_OK) {
 
480
               fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
 
481
               return TRUE;
 
482
            }
 
483
            fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
 
484
         }
 
485
      }
 
486
 
 
487
      curr = next;
 
488
      next = curr->next;
 
489
   }
 
490
 
 
491
   return FALSE;
 
492
}
 
493
 
 
494
 
 
495
/**
 
496
 * Destroy CPU storage for this buffer.
 
497
 */
 
498
static void
 
499
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
 
500
{
 
501
   if(fenced_buf->data) {
 
502
      align_free(fenced_buf->data);
 
503
      fenced_buf->data = NULL;
 
504
      assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
 
505
      fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
 
506
   }
 
507
}
 
508
 
 
509
 
 
510
/**
 
511
 * Create CPU storage for this buffer.
 
512
 */
 
513
static enum pipe_error
 
514
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
 
515
                                        struct fenced_buffer *fenced_buf)
 
516
{
 
517
   assert(!fenced_buf->data);
 
518
   if(fenced_buf->data)
 
519
      return PIPE_OK;
 
520
 
 
521
   if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
 
522
      return PIPE_ERROR_OUT_OF_MEMORY;
 
523
 
 
524
   fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
 
525
   if(!fenced_buf->data)
 
526
      return PIPE_ERROR_OUT_OF_MEMORY;
 
527
 
 
528
   fenced_mgr->cpu_total_size += fenced_buf->size;
 
529
 
 
530
   return PIPE_OK;
 
531
}
 
532
 
 
533
 
 
534
/**
 
535
 * Destroy the GPU storage.
 
536
 */
 
537
static void
 
538
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
 
539
{
 
540
   if(fenced_buf->buffer) {
 
541
      pb_reference(&fenced_buf->buffer, NULL);
 
542
   }
 
543
}
 
544
 
 
545
 
 
546
/**
 
547
 * Try to create GPU storage for this buffer.
 
548
 *
 
549
 * This function is a shorthand around pb_manager::create_buffer for
 
550
 * fenced_buffer_create_gpu_storage_locked()'s benefit.
 
551
 */
 
552
static INLINE boolean
 
553
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
 
554
                                            struct fenced_buffer *fenced_buf)
 
555
{
 
556
   struct pb_manager *provider = fenced_mgr->provider;
 
557
 
 
558
   assert(!fenced_buf->buffer);
 
559
 
 
560
   fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
 
561
                                                fenced_buf->size,
 
562
                                                &fenced_buf->desc);
 
563
   return fenced_buf->buffer ? TRUE : FALSE;
 
564
}
 
565
 
 
566
 
 
567
/**
 
568
 * Create GPU storage for this buffer.
 
569
 */
 
570
static enum pipe_error
 
571
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
 
572
                                        struct fenced_buffer *fenced_buf,
 
573
                                        boolean wait)
 
574
{
 
575
   assert(!fenced_buf->buffer);
 
576
 
 
577
   /*
 
578
    * Check for signaled buffers before trying to allocate.
 
579
    */
 
580
   fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
 
581
 
 
582
   fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
 
583
 
 
584
   /*
 
585
    * Keep trying while there is some sort of progress:
 
586
    * - fences are expiring,
 
587
    * - or buffers are being being swapped out from GPU memory into CPU memory.
 
588
    */
 
589
   while(!fenced_buf->buffer &&
 
590
         (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
 
591
          fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
 
592
      fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
 
593
   }
 
594
 
 
595
   if(!fenced_buf->buffer && wait) {
 
596
      /*
 
597
       * Same as before, but this time around, wait to free buffers if
 
598
       * necessary.
 
599
       */
 
600
      while(!fenced_buf->buffer &&
 
601
            (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
 
602
             fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
 
603
         fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
 
604
      }
 
605
   }
 
606
 
 
607
   if(!fenced_buf->buffer) {
 
608
      if(0)
 
609
         fenced_manager_dump_locked(fenced_mgr);
 
610
 
 
611
      /* give up */
 
612
      return PIPE_ERROR_OUT_OF_MEMORY;
 
613
   }
 
614
 
 
615
   return PIPE_OK;
 
616
}
 
617
 
 
618
 
 
619
static enum pipe_error
 
620
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
 
621
{
 
622
   uint8_t *map;
 
623
 
 
624
   assert(fenced_buf->data);
 
625
   assert(fenced_buf->buffer);
 
626
 
 
627
   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
 
628
   if(!map)
 
629
      return PIPE_ERROR;
 
630
 
 
631
   memcpy(map, fenced_buf->data, fenced_buf->size);
 
632
 
 
633
   pb_unmap(fenced_buf->buffer);
 
634
 
 
635
   return PIPE_OK;
 
636
}
 
637
 
 
638
 
 
639
static enum pipe_error
 
640
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
 
641
{
 
642
   const uint8_t *map;
 
643
 
 
644
   assert(fenced_buf->data);
 
645
   assert(fenced_buf->buffer);
 
646
 
 
647
   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
 
648
   if(!map)
 
649
      return PIPE_ERROR;
 
650
 
 
651
   memcpy(fenced_buf->data, map, fenced_buf->size);
 
652
 
 
653
   pb_unmap(fenced_buf->buffer);
 
654
 
 
655
   return PIPE_OK;
250
656
}
251
657
 
252
658
 
253
659
static void
254
660
fenced_buffer_destroy(struct pb_buffer *buf)
255
661
{
256
 
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);   
257
 
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
 
662
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
 
663
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
258
664
 
259
 
   pipe_mutex_lock(fenced_list->mutex);
260
665
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
261
 
   if (fenced_buf->fence) {
262
 
      struct pb_fence_ops *ops = fenced_list->ops;
263
 
      if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
264
 
         struct list_head *curr, *prev;
265
 
         curr = &fenced_buf->head;
266
 
         prev = curr->prev;
267
 
         do {
268
 
            fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
269
 
            assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
270
 
            _fenced_buffer_remove(fenced_list, fenced_buf);
271
 
            curr = prev;
272
 
            prev = curr->prev;
273
 
         } while (curr != &fenced_list->delayed);
274
 
      }   
275
 
      else {
276
 
         /* delay destruction */
277
 
      }
278
 
   }
279
 
   else {
280
 
      _fenced_buffer_destroy(fenced_buf);
281
 
   }
282
 
   pipe_mutex_unlock(fenced_list->mutex);
 
666
 
 
667
   pipe_mutex_lock(fenced_mgr->mutex);
 
668
 
 
669
   fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
 
670
 
 
671
   pipe_mutex_unlock(fenced_mgr->mutex);
283
672
}
284
673
 
285
674
 
286
675
static void *
287
 
fenced_buffer_map(struct pb_buffer *buf, 
 
676
fenced_buffer_map(struct pb_buffer *buf,
288
677
                  unsigned flags)
289
678
{
290
679
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
291
 
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
292
 
   struct pb_fence_ops *ops = fenced_list->ops;
293
 
   void *map;
 
680
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
681
   struct pb_fence_ops *ops = fenced_mgr->ops;
 
682
   void *map = NULL;
 
683
 
 
684
   pipe_mutex_lock(fenced_mgr->mutex);
294
685
 
295
686
   assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
296
 
   
297
 
   /* Serialize writes */
298
 
   if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
299
 
      ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
300
 
      if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
301
 
         /* Don't wait for the GPU to finish writing */
302
 
         if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
303
 
            _fenced_buffer_remove(fenced_list, fenced_buf);
304
 
         else
305
 
            return NULL;
306
 
      }
307
 
      else {
308
 
         /* Wait for the GPU to finish writing */
309
 
         _fenced_buffer_finish(fenced_buf);
310
 
      }
311
 
   }
312
 
 
313
 
#if 0
314
 
   /* Check for CPU write access (read is OK) */
315
 
   if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
316
 
      /* this is legal -- just for debugging */
317
 
      debug_warning("concurrent CPU writes");
318
 
   }
319
 
#endif
320
 
   
321
 
   map = pb_map(fenced_buf->buffer, flags);
 
687
 
 
688
   /*
 
689
    * Serialize writes.
 
690
    */
 
691
   while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
 
692
         ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
 
693
          (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
 
694
 
 
695
      /* 
 
696
       * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
 
697
       */
 
698
      if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
 
699
          ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
 
700
         goto done;
 
701
      }
 
702
 
 
703
      if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
 
704
         break;
 
705
      }
 
706
 
 
707
      /*
 
708
       * Wait for the GPU to finish accessing. This will release and re-acquire
 
709
       * the mutex, so all copies of mutable state must be discarded.
 
710
       */
 
711
      fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
 
712
   }
 
713
 
 
714
   if(fenced_buf->buffer) {
 
715
      map = pb_map(fenced_buf->buffer, flags);
 
716
   }
 
717
   else {
 
718
      assert(fenced_buf->data);
 
719
      map = fenced_buf->data;
 
720
   }
 
721
 
322
722
   if(map) {
323
723
      ++fenced_buf->mapcount;
324
724
      fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
325
725
   }
326
726
 
 
727
done:
 
728
   pipe_mutex_unlock(fenced_mgr->mutex);
 
729
 
327
730
   return map;
328
731
}
329
732
 
332
735
fenced_buffer_unmap(struct pb_buffer *buf)
333
736
{
334
737
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
 
738
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
739
 
 
740
   pipe_mutex_lock(fenced_mgr->mutex);
 
741
 
335
742
   assert(fenced_buf->mapcount);
336
743
   if(fenced_buf->mapcount) {
337
 
      pb_unmap(fenced_buf->buffer);
 
744
      if (fenced_buf->buffer)
 
745
         pb_unmap(fenced_buf->buffer);
338
746
      --fenced_buf->mapcount;
339
747
      if(!fenced_buf->mapcount)
340
748
         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
341
749
   }
 
750
 
 
751
   pipe_mutex_unlock(fenced_mgr->mutex);
342
752
}
343
753
 
344
754
 
348
758
                       unsigned flags)
349
759
{
350
760
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
 
761
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
351
762
   enum pipe_error ret;
352
 
   
 
763
 
 
764
   pipe_mutex_lock(fenced_mgr->mutex);
 
765
 
353
766
   if(!vl) {
354
767
      /* invalidate */
355
768
      fenced_buf->vl = NULL;
356
769
      fenced_buf->validation_flags = 0;
357
 
      return PIPE_OK;
 
770
      ret = PIPE_OK;
 
771
      goto done;
358
772
   }
359
 
   
 
773
 
360
774
   assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
361
775
   assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
362
776
   flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
363
777
 
364
 
   /* Buffer cannot be validated in two different lists */ 
365
 
   if(fenced_buf->vl && fenced_buf->vl != vl)
366
 
      return PIPE_ERROR_RETRY;
367
 
   
368
 
#if 0
369
 
   /* Do not validate if buffer is still mapped */
370
 
   if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
371
 
      /* TODO: wait for the thread that mapped the buffer to unmap it */
372
 
      return PIPE_ERROR_RETRY;
 
778
   /* Buffer cannot be validated in two different lists */
 
779
   if(fenced_buf->vl && fenced_buf->vl != vl) {
 
780
      ret = PIPE_ERROR_RETRY;
 
781
      goto done;
373
782
   }
374
 
   /* Final sanity checking */
375
 
   assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
376
 
   assert(!fenced_buf->mapcount);
377
 
#endif
378
783
 
379
784
   if(fenced_buf->vl == vl &&
380
785
      (fenced_buf->validation_flags & flags) == flags) {
381
786
      /* Nothing to do -- buffer already validated */
382
 
      return PIPE_OK;
383
 
   }
384
 
   
 
787
      ret = PIPE_OK;
 
788
      goto done;
 
789
   }
 
790
 
 
791
   /*
 
792
    * Create and update GPU storage.
 
793
    */
 
794
   if(!fenced_buf->buffer) {
 
795
      assert(!fenced_buf->mapcount);
 
796
 
 
797
      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
 
798
      if(ret != PIPE_OK) {
 
799
         goto done;
 
800
      }
 
801
 
 
802
      ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
 
803
      if(ret != PIPE_OK) {
 
804
         fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
 
805
         goto done;
 
806
      }
 
807
 
 
808
      if(fenced_buf->mapcount) {
 
809
         debug_printf("warning: validating a buffer while it is still mapped\n");
 
810
      }
 
811
      else {
 
812
         fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
 
813
      }
 
814
   }
 
815
 
385
816
   ret = pb_validate(fenced_buf->buffer, vl, flags);
386
817
   if (ret != PIPE_OK)
387
 
      return ret;
388
 
   
 
818
      goto done;
 
819
 
389
820
   fenced_buf->vl = vl;
390
821
   fenced_buf->validation_flags |= flags;
391
 
   
392
 
   return PIPE_OK;
 
822
 
 
823
done:
 
824
   pipe_mutex_unlock(fenced_mgr->mutex);
 
825
 
 
826
   return ret;
393
827
}
394
828
 
395
829
 
397
831
fenced_buffer_fence(struct pb_buffer *buf,
398
832
                    struct pipe_fence_handle *fence)
399
833
{
400
 
   struct fenced_buffer *fenced_buf;
401
 
   struct fenced_buffer_list *fenced_list;
402
 
   struct pb_fence_ops *ops;
403
 
 
404
 
   fenced_buf = fenced_buffer(buf);
405
 
   fenced_list = fenced_buf->list;
406
 
   ops = fenced_list->ops;
407
 
   
408
 
   if(fence == fenced_buf->fence) {
409
 
      /* Nothing to do */
410
 
      return;
411
 
   }
412
 
 
413
 
   assert(fenced_buf->vl);
414
 
   assert(fenced_buf->validation_flags);
415
 
   
416
 
   pipe_mutex_lock(fenced_list->mutex);
417
 
   if (fenced_buf->fence)
418
 
      _fenced_buffer_remove(fenced_list, fenced_buf);
419
 
   if (fence) {
420
 
      ops->fence_reference(ops, &fenced_buf->fence, fence);
421
 
      fenced_buf->flags |= fenced_buf->validation_flags;
422
 
      _fenced_buffer_add(fenced_buf);
423
 
   }
424
 
   pipe_mutex_unlock(fenced_list->mutex);
425
 
   
426
 
   pb_fence(fenced_buf->buffer, fence);
427
 
 
428
 
   fenced_buf->vl = NULL;
429
 
   fenced_buf->validation_flags = 0;
 
834
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
 
835
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
836
   struct pb_fence_ops *ops = fenced_mgr->ops;
 
837
 
 
838
   pipe_mutex_lock(fenced_mgr->mutex);
 
839
 
 
840
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
 
841
   assert(fenced_buf->buffer);
 
842
 
 
843
   if(fence != fenced_buf->fence) {
 
844
      assert(fenced_buf->vl);
 
845
      assert(fenced_buf->validation_flags);
 
846
 
 
847
      if (fenced_buf->fence) {
 
848
         boolean destroyed;
 
849
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
 
850
         assert(!destroyed);
 
851
      }
 
852
      if (fence) {
 
853
         ops->fence_reference(ops, &fenced_buf->fence, fence);
 
854
         fenced_buf->flags |= fenced_buf->validation_flags;
 
855
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
 
856
      }
 
857
 
 
858
      pb_fence(fenced_buf->buffer, fence);
 
859
 
 
860
      fenced_buf->vl = NULL;
 
861
      fenced_buf->validation_flags = 0;
 
862
   }
 
863
 
 
864
   pipe_mutex_unlock(fenced_mgr->mutex);
430
865
}
431
866
 
432
867
 
436
871
                              pb_size *offset)
437
872
{
438
873
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
439
 
   pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
 
874
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
875
 
 
876
   pipe_mutex_lock(fenced_mgr->mutex);
 
877
 
 
878
   /*
 
879
    * This should only be called when the buffer is validated. Typically
 
880
    * when processing relocations.
 
881
    */
 
882
   assert(fenced_buf->vl);
 
883
   assert(fenced_buf->buffer);
 
884
 
 
885
   if(fenced_buf->buffer)
 
886
      pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
 
887
   else {
 
888
      *base_buf = buf;
 
889
      *offset = 0;
 
890
   }
 
891
 
 
892
   pipe_mutex_unlock(fenced_mgr->mutex);
440
893
}
441
894
 
442
895
 
443
 
static const struct pb_vtbl 
 
896
static const struct pb_vtbl
444
897
fenced_buffer_vtbl = {
445
898
      fenced_buffer_destroy,
446
899
      fenced_buffer_map,
451
904
};
452
905
 
453
906
 
454
 
struct pb_buffer *
455
 
fenced_buffer_create(struct fenced_buffer_list *fenced_list, 
456
 
                     struct pb_buffer *buffer)
457
 
{
458
 
   struct fenced_buffer *buf;
459
 
   
460
 
   if(!buffer)
461
 
      return NULL;
462
 
   
463
 
   buf = CALLOC_STRUCT(fenced_buffer);
464
 
   if(!buf) {
465
 
      pb_reference(&buffer, NULL);
466
 
      return NULL;
467
 
   }
468
 
   
469
 
   pipe_reference_init(&buf->base.base.reference, 1);
470
 
   buf->base.base.alignment = buffer->base.alignment;
471
 
   buf->base.base.usage = buffer->base.usage;
472
 
   buf->base.base.size = buffer->base.size;
473
 
   
474
 
   buf->base.vtbl = &fenced_buffer_vtbl;
475
 
   buf->buffer = buffer;
476
 
   buf->list = fenced_list;
477
 
   
478
 
#ifdef DEBUG
479
 
   pipe_mutex_lock(fenced_list->mutex);
480
 
   LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
481
 
   ++fenced_list->numUnfenced;
482
 
   pipe_mutex_unlock(fenced_list->mutex);
483
 
#endif
484
 
 
485
 
   return &buf->base;
486
 
}
487
 
 
488
 
 
489
 
struct fenced_buffer_list *
490
 
fenced_buffer_list_create(struct pb_fence_ops *ops) 
491
 
{
492
 
   struct fenced_buffer_list *fenced_list;
493
 
 
494
 
   fenced_list = CALLOC_STRUCT(fenced_buffer_list);
495
 
   if (!fenced_list)
496
 
      return NULL;
497
 
 
498
 
   fenced_list->ops = ops;
499
 
 
500
 
   LIST_INITHEAD(&fenced_list->delayed);
501
 
   fenced_list->numDelayed = 0;
502
 
   
503
 
#ifdef DEBUG
504
 
   LIST_INITHEAD(&fenced_list->unfenced);
505
 
   fenced_list->numUnfenced = 0;
506
 
#endif
507
 
 
508
 
   pipe_mutex_init(fenced_list->mutex);
509
 
 
510
 
   return fenced_list;
511
 
}
512
 
 
513
 
 
514
 
void
515
 
fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, 
516
 
                              int wait)
517
 
{
518
 
   pipe_mutex_lock(fenced_list->mutex);
519
 
   _fenced_buffer_list_check_free(fenced_list, wait);
520
 
   pipe_mutex_unlock(fenced_list->mutex);
521
 
}
522
 
 
523
 
 
524
 
#ifdef DEBUG
525
 
void
526
 
fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
527
 
{
528
 
   struct pb_fence_ops *ops = fenced_list->ops;
529
 
   struct list_head *curr, *next;
 
907
/**
 
908
 * Wrap a buffer in a fenced buffer.
 
909
 */
 
910
static struct pb_buffer *
 
911
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
 
912
                            pb_size size,
 
913
                            const struct pb_desc *desc)
 
914
{
 
915
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
530
916
   struct fenced_buffer *fenced_buf;
531
 
 
532
 
   pipe_mutex_lock(fenced_list->mutex);
533
 
 
534
 
   debug_printf("%10s %7s %7s %10s %s\n",
535
 
                "buffer", "size", "refcount", "fence", "signalled");
536
 
   
537
 
   curr = fenced_list->unfenced.next;
538
 
   next = curr->next;
539
 
   while(curr != &fenced_list->unfenced) {
540
 
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
541
 
      assert(!fenced_buf->fence);
542
 
      debug_printf("%10p %7u %7u\n",
543
 
                   (void *) fenced_buf,
544
 
                   fenced_buf->base.base.size,
545
 
                   p_atomic_read(&fenced_buf->base.base.reference.count));
546
 
      curr = next; 
547
 
      next = curr->next;
548
 
   }
549
 
   
550
 
   curr = fenced_list->delayed.next;
551
 
   next = curr->next;
552
 
   while(curr != &fenced_list->delayed) {
553
 
      int signaled;
554
 
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
555
 
      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
556
 
      debug_printf("%10p %7u %7u %10p %s\n",
557
 
                   (void *) fenced_buf,
558
 
                   fenced_buf->base.base.size,
559
 
                   p_atomic_read(&fenced_buf->base.base.reference.count),
560
 
                   (void *) fenced_buf->fence,
561
 
                   signaled == 0 ? "y" : "n");
562
 
      curr = next; 
563
 
      next = curr->next;
564
 
   }
565
 
   
566
 
   pipe_mutex_unlock(fenced_list->mutex);
567
 
}
568
 
#endif
569
 
 
570
 
 
571
 
void
572
 
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
573
 
{
574
 
   pipe_mutex_lock(fenced_list->mutex);
 
917
   enum pipe_error ret;
 
918
 
 
919
   /*
 
920
    * Don't stall the GPU, waste time evicting buffers, or waste memory
 
921
    * trying to create a buffer that will most likely never fit into the
 
922
    * graphics aperture.
 
923
    */
 
924
   if(size > fenced_mgr->max_buffer_size) {
 
925
      goto no_buffer;
 
926
   }
 
927
 
 
928
   fenced_buf = CALLOC_STRUCT(fenced_buffer);
 
929
   if(!fenced_buf)
 
930
      goto no_buffer;
 
931
 
 
932
   pipe_reference_init(&fenced_buf->base.base.reference, 1);
 
933
   fenced_buf->base.base.alignment = desc->alignment;
 
934
   fenced_buf->base.base.usage = desc->usage;
 
935
   fenced_buf->base.base.size = size;
 
936
   fenced_buf->size = size;
 
937
   fenced_buf->desc = *desc;
 
938
 
 
939
   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
 
940
   fenced_buf->mgr = fenced_mgr;
 
941
 
 
942
   pipe_mutex_lock(fenced_mgr->mutex);
 
943
 
 
944
   /*
 
945
    * Try to create GPU storage without stalling,
 
946
    */
 
947
   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
 
948
 
 
949
   /*
 
950
    * Attempt to use CPU memory to avoid stalling the GPU.
 
951
    */
 
952
   if(ret != PIPE_OK) {
 
953
      ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
 
954
   }
 
955
 
 
956
   /*
 
957
    * Create GPU storage, waiting for some to be available.
 
958
    */
 
959
   if(ret != PIPE_OK) {
 
960
      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
 
961
   }
 
962
 
 
963
   /*
 
964
    * Give up.
 
965
    */
 
966
   if(ret != PIPE_OK) {
 
967
      goto no_storage;
 
968
   }
 
969
 
 
970
   assert(fenced_buf->buffer || fenced_buf->data);
 
971
 
 
972
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
 
973
   ++fenced_mgr->num_unfenced;
 
974
   pipe_mutex_unlock(fenced_mgr->mutex);
 
975
 
 
976
   return &fenced_buf->base;
 
977
 
 
978
no_storage:
 
979
   pipe_mutex_unlock(fenced_mgr->mutex);
 
980
   FREE(fenced_buf);
 
981
no_buffer:
 
982
   return NULL;
 
983
}
 
984
 
 
985
 
 
986
static void
 
987
fenced_bufmgr_flush(struct pb_manager *mgr)
 
988
{
 
989
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
990
 
 
991
   pipe_mutex_lock(fenced_mgr->mutex);
 
992
   while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
 
993
      ;
 
994
   pipe_mutex_unlock(fenced_mgr->mutex);
 
995
 
 
996
   assert(fenced_mgr->provider->flush);
 
997
   if(fenced_mgr->provider->flush)
 
998
      fenced_mgr->provider->flush(fenced_mgr->provider);
 
999
}
 
1000
 
 
1001
 
 
1002
static void
 
1003
fenced_bufmgr_destroy(struct pb_manager *mgr)
 
1004
{
 
1005
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
1006
 
 
1007
   pipe_mutex_lock(fenced_mgr->mutex);
575
1008
 
576
1009
   /* Wait on outstanding fences */
577
 
   while (fenced_list->numDelayed) {
578
 
      pipe_mutex_unlock(fenced_list->mutex);
 
1010
   while (fenced_mgr->num_fenced) {
 
1011
      pipe_mutex_unlock(fenced_mgr->mutex);
579
1012
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
580
1013
      sched_yield();
581
1014
#endif
582
 
      _fenced_buffer_list_check_free(fenced_list, 1);
583
 
      pipe_mutex_lock(fenced_list->mutex);
 
1015
      pipe_mutex_lock(fenced_mgr->mutex);
 
1016
      while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
 
1017
         ;
584
1018
   }
585
1019
 
586
1020
#ifdef DEBUG
587
 
   /*assert(!fenced_list->numUnfenced);*/
 
1021
   /*assert(!fenced_mgr->num_unfenced);*/
588
1022
#endif
589
 
      
590
 
   pipe_mutex_unlock(fenced_list->mutex);
591
 
   
592
 
   fenced_list->ops->destroy(fenced_list->ops);
593
 
   
594
 
   FREE(fenced_list);
595
 
}
596
 
 
597
 
 
 
1023
 
 
1024
   pipe_mutex_unlock(fenced_mgr->mutex);
 
1025
   pipe_mutex_destroy(fenced_mgr->mutex);
 
1026
 
 
1027
   if(fenced_mgr->provider)
 
1028
      fenced_mgr->provider->destroy(fenced_mgr->provider);
 
1029
 
 
1030
   fenced_mgr->ops->destroy(fenced_mgr->ops);
 
1031
 
 
1032
   FREE(fenced_mgr);
 
1033
}
 
1034
 
 
1035
 
 
1036
struct pb_manager *
 
1037
fenced_bufmgr_create(struct pb_manager *provider,
 
1038
                     struct pb_fence_ops *ops,
 
1039
                     pb_size max_buffer_size,
 
1040
                     pb_size max_cpu_total_size)
 
1041
{
 
1042
   struct fenced_manager *fenced_mgr;
 
1043
 
 
1044
   if(!provider)
 
1045
      return NULL;
 
1046
 
 
1047
   fenced_mgr = CALLOC_STRUCT(fenced_manager);
 
1048
   if (!fenced_mgr)
 
1049
      return NULL;
 
1050
 
 
1051
   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
 
1052
   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
 
1053
   fenced_mgr->base.flush = fenced_bufmgr_flush;
 
1054
 
 
1055
   fenced_mgr->provider = provider;
 
1056
   fenced_mgr->ops = ops;
 
1057
   fenced_mgr->max_buffer_size = max_buffer_size;
 
1058
   fenced_mgr->max_cpu_total_size = max_cpu_total_size;
 
1059
 
 
1060
   LIST_INITHEAD(&fenced_mgr->fenced);
 
1061
   fenced_mgr->num_fenced = 0;
 
1062
 
 
1063
   LIST_INITHEAD(&fenced_mgr->unfenced);
 
1064
   fenced_mgr->num_unfenced = 0;
 
1065
 
 
1066
   pipe_mutex_init(fenced_mgr->mutex);
 
1067
 
 
1068
   return &fenced_mgr->base;
 
1069
}