98
98
brw_clear_batch_cache(brw);
102
* Allocates a block of space in the batchbuffer for indirect state.
104
* We don't want to allocate separate BOs for every bit of indirect
105
* state in the driver. It means overallocating by a significant
106
* margin (4096 bytes, even if the object is just a 20-byte surface
107
* state), and more buffers to walk and count for aperture size checking.
109
* However, due to the restrictions inposed by the aperture size
110
* checking performance hacks, we can't have the batch point at a
111
* separate indirect state buffer, because once the batch points at
112
* it, no more relocations can be added to it. So, we sneak these
113
* buffers in at the top of the batchbuffer.
116
brw_state_batch(struct brw_context *brw,
119
drm_intel_bo **out_bo,
120
uint32_t *out_offset)
122
struct intel_batchbuffer *batch = brw->intel.batch;
125
assert(size < batch->buf->size);
126
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
128
/* If allocating from the top would wrap below the batchbuffer, or
129
* if the batch's used space (plus the reserved pad) collides with our
130
* space, then flush and try again.
132
if (batch->state_batch_offset < size ||
133
offset < batch->ptr - batch->map + batch->reserved_space) {
134
intel_batchbuffer_flush(batch);
135
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
138
batch->state_batch_offset = offset;
140
if (*out_bo != batch->buf) {
141
drm_intel_bo_unreference(*out_bo);
142
drm_intel_bo_reference(batch->buf);
143
*out_bo = batch->buf;
146
*out_offset = offset;
147
return batch->map + offset;