163
164
struct bufmgr *bm = intel->bm;
164
165
struct pool *pool = &bm->pool[pool_nr];
165
166
struct block *block = (struct block *)calloc(sizeof *block, 1);
167
GLuint sz, align = (1<<buf->alignment);
169
block->mem = mmAllocMem(pool->heap, buf->size, buf->alignment, 0);
172
sz = (buf->size + align-1) & ~(align-1);
174
block->mem = mmAllocMem(pool->heap,
170
177
if (!block->mem) {
265
static int evict_lru( struct intel_context *intel, GLuint max_fence )
272
static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
267
274
struct bufmgr *bm = intel->bm;
268
275
struct block *block, *tmp;
299
307
#define foreach_s_rev(ptr, t, list) \
300
308
for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
302
static int evict_mru( struct intel_context *intel)
310
static int evict_mru( struct intel_context *intel, GLuint *pool )
304
312
struct bufmgr *bm = intel->bm;
305
313
struct block *block, *tmp;
473
485
/* Look for memory blocks not used for >1 frame:
475
while (evict_lru(intel, intel->second_last_swap_fence))
476
if (alloc_block(intel, buf))
487
while (evict_lru(intel, intel->second_last_swap_fence, &pool))
488
if (alloc_from_pool(intel, pool, buf))
479
491
/* If we're not thrashing, allow lru eviction to dig deeper into
480
492
* recently used textures. We'll probably be thrashing soon:
482
494
if (!intel->thrashing) {
483
while (evict_lru(intel, 0))
484
if (alloc_block(intel, buf))
495
while (evict_lru(intel, 0, &pool))
496
if (alloc_from_pool(intel, pool, buf))
508
520
if (!is_empty_list(&bm->on_hardware)) {
509
521
bmSetFence(intel);
511
if (!is_empty_list(&bm->fenced)) {
523
while (!is_empty_list(&bm->fenced)) {
512
524
GLuint fence = bm->fenced.next->fence;
513
525
bmFinishFence(intel, fence);
525
while (evict_mru(intel))
526
if (alloc_block(intel, buf))
537
while (evict_mru(intel, &pool))
538
if (alloc_from_pool(intel, pool, buf))
541
DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);
543
assert(is_empty_list(&bm->on_hardware));
544
assert(is_empty_list(&bm->fenced));
562
579
make_empty_list(&bm.referenced);
563
580
make_empty_list(&bm.fenced);
564
581
make_empty_list(&bm.on_hardware);
583
/* The context id of any of the share group. This won't be used
584
* in communication with the kernel, so it doesn't matter if
585
* this context is eventually deleted.
587
bm.ctxId = intel->hHWContext;
624
static struct buffer *do_GenBuffer(struct intel_context *intel, const char *name)
647
static struct buffer *do_GenBuffer(struct intel_context *intel, const char *name, int align)
626
649
struct bufmgr *bm = intel->bm;
627
650
struct buffer *buf = calloc(sizeof(*buf), 1);
629
652
buf->id = ++bm->buf_nr;
630
653
buf->name = name;
631
buf->alignment = 12; /* page-alignment to fit in with AGP swapping */
654
buf->alignment = align;
632
655
buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
661
void *bmFindVirtual( struct intel_context *intel,
665
struct bufmgr *bm = intel->bm;
668
for (i = 0; i < bm->nr_pools; i++)
669
if (offset >= bm->pool[i].low_offset &&
670
offset + sz <= bm->pool[i].low_offset + bm->pool[i].size)
671
return bm->pool[i].virtual + offset;
639
677
void bmGenBuffers(struct intel_context *intel,
640
678
const char *name, unsigned n,
641
struct buffer **buffers)
679
struct buffer **buffers,
643
682
struct bufmgr *bm = intel->bm;
694
733
if (bm->pool[pool].static_buffer)
695
734
buf = bm->pool[pool].static_buffer;
697
buf = do_GenBuffer(intel, "static");
736
buf = do_GenBuffer(intel, "static", 12);
699
738
bm->pool[pool].static_buffer = buf;
700
739
assert(!buf->block);
735
774
/* If buffer size changes, free and reallocate. Otherwise update in
738
void bmBufferData(struct intel_context *intel,
777
int bmBufferData(struct intel_context *intel,
744
783
struct bufmgr *bm = intel->bm;
774
814
buf->size = size;
775
815
if (buf->block) {
776
assert (buf->block->mem->size == size);
816
assert (buf->block->mem->size >= size);
779
819
if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
781
if (!buf->block && !evict_and_alloc_block(intel, buf))
821
assert(intel->locked || data == NULL);
824
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
784
830
wait_quiescent(intel, buf->block);
810
858
/* Update the buffer in place, in whatever space it is currently resident:
812
void bmBufferSubData(struct intel_context *intel,
860
int bmBufferSubData(struct intel_context *intel,
813
861
struct buffer *buf,
816
864
const void *data )
818
866
struct bufmgr *bm = intel->bm;
827
876
assert(offset+size <= buf->size);
829
878
if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
830
if (!buf->block && !evict_and_alloc_block(intel, buf))
880
assert(intel->locked);
882
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
833
888
if (!(buf->flags & BM_NO_FENCE_SUBDATA))
834
889
wait_quiescent(intel, buf->block);
847
902
do_memcpy(buf->backing_store + offset, data, size);
855
void bmBufferDataAUB(struct intel_context *intel,
912
int bmBufferDataAUB(struct intel_context *intel,
856
913
struct buffer *buf,
858
915
const void *data,
860
917
unsigned aubtype,
861
918
unsigned aubsubtype )
863
bmBufferData(intel, buf, size, data, flags);
920
int retval = bmBufferData(intel, buf, size, data, flags);
866
923
/* This only works because in this version of the buffer manager we
867
924
* allocate all buffers statically in agp space and so can emit the
868
925
* uploads to the aub file with the correct offsets as they happen.
870
if (data && intel->aub_file) {
927
if (retval == 0 && data && intel->aub_file) {
872
929
if (buf->block && !buf->dirty) {
873
930
intel->vtbl.aub_gtt_data(intel,
890
949
unsigned aubtype,
891
950
unsigned aubsubtype )
893
bmBufferSubData(intel, buf, offset, size, data);
952
int retval = bmBufferSubData(intel, buf, offset, size, data);
896
955
/* This only works because in this version of the buffer manager we
898
957
* uploads to the aub file with the correct offsets as they happen.
900
959
if (intel->aub_file) {
901
if (buf->block && !buf->dirty)
960
if (retval == 0 && buf->block && !buf->dirty)
902
961
intel->vtbl.aub_gtt_data(intel,
903
962
buf->block->mem->ofs + offset,
904
963
((const char *)buf->block->virtual) + offset,
1011
1072
else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
1074
assert(intel->locked);
1012
1076
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
1013
_mesa_printf("%s: alloc failed\n", __FUNCTION__);
1077
DBG("%s: alloc failed\n", __FUNCTION__);
1028
DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
1093
DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
1029
1094
set_dirty(intel, buf);
1031
1096
if (buf->backing_store == 0)
1104
1169
int bmValidateBuffers( struct intel_context *intel )
1106
1171
struct bufmgr *bm = intel->bm;
1111
1176
DBG("%s fail %d\n", __FUNCTION__, bm->fail);
1177
assert(intel->locked);
1113
1179
if (!bm->fail) {
1114
1180
struct block *block, *tmp;
1241
1307
GLuint dword[2];
1242
1308
dword[0] = intel->vtbl.flush_cmd();
1244
intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword), GL_TRUE);
1310
intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword));
1246
1312
intel->bm->last_fence = intelEmitIrqLocked( intel );
1303
1369
assert(is_empty_list(&bm->referenced));
1305
1371
bm->need_fence = 1;
1306
bmFinishFence(intel, bmSetFence(intel));
1308
for (i = 0; i < bm->nr_pools; i++) {
1309
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
1310
foreach_s(block, tmp, &bm->pool[i].lru) {
1311
assert(bmTestFence(intel, block->fence));
1312
set_dirty(intel, block->buf);
1373
bmFinishFence(intel, bmSetFence(intel));
1375
assert(is_empty_list(&bm->fenced));
1376
assert(is_empty_list(&bm->on_hardware));
1378
for (i = 0; i < bm->nr_pools; i++) {
1379
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
1380
foreach_s(block, tmp, &bm->pool[i].lru) {
1381
assert(bmTestFence(intel, block->fence));
1382
set_dirty(intel, block->buf);
1392
void bmEvictAll( struct intel_context *intel )
1394
struct bufmgr *bm = intel->bm;
1398
struct block *block, *tmp;
1401
DBG("%s\n", __FUNCTION__);
1403
assert(is_empty_list(&bm->referenced));
1407
bmFinishFence(intel, bmSetFence(intel));
1409
assert(is_empty_list(&bm->fenced));
1410
assert(is_empty_list(&bm->on_hardware));
1412
for (i = 0; i < bm->nr_pools; i++) {
1413
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
1414
foreach_s(block, tmp, &bm->pool[i].lru) {
1415
assert(bmTestFence(intel, block->fence));
1416
set_dirty(intel, block->buf);
1417
block->buf->block = NULL;
1419
free_block(intel, block);
1428
GLboolean bmError( struct intel_context *intel )
1430
struct bufmgr *bm = intel->bm;
1443
GLuint bmCtxId( struct intel_context *intel )
1445
return intel->bm->ctxId;