77
77
* programs be immune to the active primitive (ie. cope with all
78
78
* possibilities). That may not be realistic however.
80
static GLuint brw_set_prim(struct brw_context *brw, GLenum prim)
80
static GLuint brw_set_prim(struct brw_context *brw,
81
const struct _mesa_prim *prim)
82
83
GLcontext *ctx = &brw->intel.ctx;
84
GLenum mode = prim->mode;
84
86
if (INTEL_DEBUG & DEBUG_PRIMS)
85
printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim));
87
printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
87
89
/* Slight optimization to avoid the GS program when not needed:
89
if (prim == GL_QUAD_STRIP &&
91
if (mode == GL_QUAD_STRIP &&
90
92
ctx->Light.ShadeModel != GL_FLAT &&
91
93
ctx->Polygon.FrontMode == GL_FILL &&
92
94
ctx->Polygon.BackMode == GL_FILL)
93
prim = GL_TRIANGLE_STRIP;
95
if (prim != brw->primitive) {
96
brw->primitive = prim;
95
mode = GL_TRIANGLE_STRIP;
97
if (prim->mode == GL_QUADS && prim->count == 4 &&
98
ctx->Light.ShadeModel != GL_FLAT &&
99
ctx->Polygon.FrontMode == GL_FILL &&
100
ctx->Polygon.BackMode == GL_FILL) {
101
mode = GL_TRIANGLE_FAN;
104
if (mode != brw->primitive) {
105
brw->primitive = mode;
97
106
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
99
if (reduced_prim[prim] != brw->intel.reduced_primitive) {
100
brw->intel.reduced_primitive = reduced_prim[prim];
108
if (reduced_prim[mode] != brw->intel.reduced_primitive) {
109
brw->intel.reduced_primitive = reduced_prim[mode];
101
110
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
105
return prim_to_hw_prim[prim];
114
return prim_to_hw_prim[mode];
142
151
prim_packet.start_instance_location = 0;
143
152
prim_packet.base_vert_location = prim->basevertex;
145
/* Can't wrap here, since we rely on the validated state. */
146
intel->no_batch_wrap = GL_TRUE;
148
154
/* If we're set to always flush, do it before and after the primitive emit.
149
155
* We want to catch both missed flushes that hurt instruction/state cache
150
156
* and missed flushes of the render cache as it heads to other parts of
160
166
if (intel->always_flush_cache) {
161
167
intel_batchbuffer_emit_mi_flush(intel->batch);
164
intel->no_batch_wrap = GL_FALSE;
167
171
static void brw_merge_inputs( struct brw_context *brw,
173
177
for (i = 0; i < VERT_ATTRIB_MAX; i++)
174
dri_bo_unreference(brw->vb.inputs[i].bo);
178
drm_intel_bo_unreference(brw->vb.inputs[i].bo);
176
180
memset(&brw->vb.inputs, 0, sizeof(brw->vb.inputs));
177
181
memset(&brw->vb.info, 0, sizeof(brw->vb.info));
352
356
intel_batchbuffer_require_space(intel->batch, intel->batch->size / 4);
354
hw_prim = brw_set_prim(brw, prim[i].mode);
358
hw_prim = brw_set_prim(brw, &prim[i]);
356
360
if (first_time || (brw->state.dirty.brw & BRW_NEW_PRIMITIVE)) {
357
361
first_time = GL_FALSE;
468
475
if (brw->vb.upload.bo != NULL) {
469
dri_bo_unreference(brw->vb.upload.bo);
476
drm_intel_bo_unreference(brw->vb.upload.bo);
470
477
brw->vb.upload.bo = NULL;
473
480
for (i = 0; i < VERT_ATTRIB_MAX; i++) {
474
dri_bo_unreference(brw->vb.inputs[i].bo);
481
drm_intel_bo_unreference(brw->vb.inputs[i].bo);
475
482
brw->vb.inputs[i].bo = NULL;
478
dri_bo_unreference(brw->ib.bo);
485
drm_intel_bo_unreference(brw->ib.bo);
479
486
brw->ib.bo = NULL;