227
233
state->last_primitive = sna->kgem.nbatch;
230
static void gen5_vertex_flush(struct sna *sna)
232
assert(sna->render_state.gen5.vertex_offset);
233
assert(sna->render.vertex_index > sna->render.vertex_start);
235
DBG(("%s[%x] = %d\n", __FUNCTION__,
236
4*sna->render_state.gen5.vertex_offset,
237
sna->render.vertex_index - sna->render.vertex_start));
238
sna->kgem.batch[sna->render_state.gen5.vertex_offset] =
239
sna->render.vertex_index - sna->render.vertex_start;
240
sna->render_state.gen5.vertex_offset = 0;
243
static int gen5_vertex_finish(struct sna *sna)
248
assert(sna->render.vertex_used);
249
assert(sna->render.nvertex_reloc);
251
/* Note: we only need dword alignment (currently) */
253
bo = sna->render.vbo;
255
if (sna->render_state.gen5.vertex_offset)
256
gen5_vertex_flush(sna);
258
for (i = 0; i < sna->render.nvertex_reloc; i++) {
259
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
260
i, sna->render.vertex_reloc[i]));
262
sna->kgem.batch[sna->render.vertex_reloc[i]] =
263
kgem_add_reloc(&sna->kgem,
264
sna->render.vertex_reloc[i], bo,
265
I915_GEM_DOMAIN_VERTEX << 16,
269
sna->render.nvertex_reloc = 0;
270
sna->render.vertex_used = 0;
271
sna->render.vertex_index = 0;
272
sna->render.vbo = NULL;
273
sna->render_state.gen5.vb_id = 0;
275
kgem_bo_destroy(&sna->kgem, bo);
278
sna->render.vertices = NULL;
279
sna->render.vbo = kgem_create_linear(&sna->kgem,
280
256*1024, CREATE_GTT_MAP);
282
sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
283
if (sna->render.vertices == NULL) {
285
kgem_bo_destroy(&sna->kgem, sna->render.vbo);
286
sna->render.vbo = NULL;
290
if (sna->render.vertex_used) {
291
memcpy(sna->render.vertices,
292
sna->render.vertex_data,
293
sizeof(float)*sna->render.vertex_used);
295
sna->render.vertex_size = 64 * 1024 - 1;
296
return sna->render.vertex_size - sna->render.vertex_used;
299
static void gen5_vertex_close(struct sna *sna)
301
struct kgem_bo *bo, *free_bo = NULL;
302
unsigned int i, delta = 0;
304
assert(sna->render_state.gen5.vertex_offset == 0);
305
if (!sna->render_state.gen5.vb_id)
308
DBG(("%s: used=%d, vbo active? %d\n",
309
__FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
311
bo = sna->render.vbo;
313
if (sna->render.vertex_size - sna->render.vertex_used < 64) {
314
DBG(("%s: discarding full vbo\n", __FUNCTION__));
315
sna->render.vbo = NULL;
316
sna->render.vertices = sna->render.vertex_data;
317
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
319
} else if (IS_CPU_MAP(bo->map)) {
320
DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
321
sna->render.vertices =
322
kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
323
if (sna->render.vertices == NULL) {
324
sna->render.vbo = NULL;
325
sna->render.vertices = sna->render.vertex_data;
326
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
331
if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
332
DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
333
sna->render.vertex_used, sna->kgem.nbatch));
334
memcpy(sna->kgem.batch + sna->kgem.nbatch,
335
sna->render.vertex_data,
336
sna->render.vertex_used * 4);
337
delta = sna->kgem.nbatch * 4;
339
sna->kgem.nbatch += sna->render.vertex_used;
341
bo = kgem_create_linear(&sna->kgem,
342
4*sna->render.vertex_used, 0);
343
if (bo && !kgem_bo_write(&sna->kgem, bo,
344
sna->render.vertex_data,
345
4*sna->render.vertex_used)) {
346
kgem_bo_destroy(&sna->kgem, bo);
349
DBG(("%s: new vbo: %d\n", __FUNCTION__,
350
sna->render.vertex_used));
355
assert(sna->render.nvertex_reloc);
356
for (i = 0; i < sna->render.nvertex_reloc; i++) {
357
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
358
i, sna->render.vertex_reloc[i]));
360
sna->kgem.batch[sna->render.vertex_reloc[i]] =
361
kgem_add_reloc(&sna->kgem,
362
sna->render.vertex_reloc[i], bo,
363
I915_GEM_DOMAIN_VERTEX << 16,
366
sna->render.nvertex_reloc = 0;
368
if (sna->render.vbo == NULL) {
369
sna->render.vertex_used = 0;
370
sna->render.vertex_index = 0;
374
kgem_bo_destroy(&sna->kgem, free_bo);
377
236
static uint32_t gen5_get_blend(int op,
378
237
bool has_component_alpha,
379
238
uint32_t dst_format)
670
529
return offset * sizeof(uint32_t);
674
gen5_emit_composite_primitive_solid(struct sna *sna,
675
const struct sna_composite_op *op,
676
const struct sna_composite_rectangles *r)
680
struct sna_coordinate p;
684
v = sna->render.vertices + sna->render.vertex_used;
685
sna->render.vertex_used += 9;
687
dst.p.x = r->dst.x + r->width;
688
dst.p.y = r->dst.y + r->height;
705
gen5_emit_composite_primitive_identity_source(struct sna *sna,
706
const struct sna_composite_op *op,
707
const struct sna_composite_rectangles *r)
709
const float *sf = op->src.scale;
712
struct sna_coordinate p;
716
v = sna->render.vertices + sna->render.vertex_used;
717
sna->render.vertex_used += 9;
719
sx = r->src.x + op->src.offset[0];
720
sy = r->src.y + op->src.offset[1];
722
dst.p.x = r->dst.x + r->width;
723
dst.p.y = r->dst.y + r->height;
725
v[1] = (sx + r->width) * sf[0];
726
v[5] = v[2] = (sy + r->height) * sf[1];
730
v[7] = v[4] = sx * sf[0];
738
gen5_emit_composite_primitive_affine_source(struct sna *sna,
739
const struct sna_composite_op *op,
740
const struct sna_composite_rectangles *r)
743
struct sna_coordinate p;
748
v = sna->render.vertices + sna->render.vertex_used;
749
sna->render.vertex_used += 9;
751
dst.p.x = r->dst.x + r->width;
752
dst.p.y = r->dst.y + r->height;
754
_sna_get_transformed_coordinates(op->src.offset[0] + r->src.x + r->width,
755
op->src.offset[1] + r->src.y + r->height,
758
v[1] *= op->src.scale[0];
759
v[2] *= op->src.scale[1];
763
_sna_get_transformed_coordinates(op->src.offset[0] + r->src.x,
764
op->src.offset[1] + r->src.y + r->height,
767
v[4] *= op->src.scale[0];
768
v[5] *= op->src.scale[1];
772
_sna_get_transformed_coordinates(op->src.offset[0] + r->src.x,
773
op->src.offset[1] + r->src.y,
776
v[7] *= op->src.scale[0];
777
v[8] *= op->src.scale[1];
781
gen5_emit_composite_primitive_identity_mask(struct sna *sna,
782
const struct sna_composite_op *op,
783
const struct sna_composite_rectangles *r)
786
struct sna_coordinate p;
793
msk_x = r->mask.x + op->mask.offset[0];
794
msk_y = r->mask.y + op->mask.offset[1];
798
v = sna->render.vertices + sna->render.vertex_used;
799
sna->render.vertex_used += 15;
801
dst.p.x = r->dst.x + r->width;
802
dst.p.y = r->dst.y + r->height;
804
v[3] = (msk_x + w) * op->mask.scale[0];
805
v[9] = v[4] = (msk_y + h) * op->mask.scale[1];
809
v[13] = v[8] = msk_x * op->mask.scale[0];
813
v[14] = msk_y * op->mask.scale[1];
815
v[7] = v[2] = v[1] = 1;
816
v[12] = v[11] = v[6] = 0;
820
gen5_emit_composite_primitive_identity_source_mask(struct sna *sna,
821
const struct sna_composite_op *op,
822
const struct sna_composite_rectangles *r)
825
struct sna_coordinate p;
833
src_x = r->src.x + op->src.offset[0];
834
src_y = r->src.y + op->src.offset[1];
835
msk_x = r->mask.x + op->mask.offset[0];
836
msk_y = r->mask.y + op->mask.offset[1];
840
v = sna->render.vertices + sna->render.vertex_used;
841
sna->render.vertex_used += 15;
843
dst.p.x = r->dst.x + r->width;
844
dst.p.y = r->dst.y + r->height;
846
v[1] = (src_x + w) * op->src.scale[0];
847
v[2] = (src_y + h) * op->src.scale[1];
848
v[3] = (msk_x + w) * op->mask.scale[0];
849
v[4] = (msk_y + h) * op->mask.scale[1];
853
v[6] = src_x * op->src.scale[0];
855
v[8] = msk_x * op->mask.scale[0];
861
v[12] = src_y * op->src.scale[1];
863
v[14] = msk_y * op->mask.scale[1];
867
gen5_emit_composite_primitive(struct sna *sna,
868
const struct sna_composite_op *op,
869
const struct sna_composite_rectangles *r)
871
float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
872
bool is_affine = op->is_affine;
873
const float *src_sf = op->src.scale;
874
const float *mask_sf = op->mask.scale;
876
if (op->src.is_solid) {
883
src_w[0] = src_w[1] = src_w[2] = 1;
884
} else if (is_affine) {
885
sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
886
r->src.y + op->src.offset[1],
891
sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
892
r->src.y + op->src.offset[1] + r->height,
897
sna_get_transformed_coordinates(r->src.x + op->src.offset[0] + r->width,
898
r->src.y + op->src.offset[1] + r->height,
903
sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
904
r->src.y + op->src.offset[1],
909
sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
910
r->src.y + op->src.offset[1] + r->height,
915
sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
916
r->src.y + op->src.offset[1] + r->height,
924
if (op->mask.is_solid) {
931
mask_w[0] = mask_w[1] = mask_w[2] = 1;
932
} else if (is_affine) {
933
sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
934
r->mask.y + op->mask.offset[1],
939
sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
940
r->mask.y + op->mask.offset[1] + r->height,
945
sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0] + r->width,
946
r->mask.y + op->mask.offset[1] + r->height,
951
sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
952
r->mask.y + op->mask.offset[1],
958
sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
959
r->mask.y + op->mask.offset[1] + r->height,
964
sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
965
r->mask.y + op->mask.offset[1] + r->height,
973
OUT_VERTEX(r->dst.x + r->width, r->dst.y + r->height);
974
OUT_VERTEX_F(src_x[2] * src_sf[0]);
975
OUT_VERTEX_F(src_y[2] * src_sf[1]);
977
OUT_VERTEX_F(src_w[2]);
979
OUT_VERTEX_F(mask_x[2] * mask_sf[0]);
980
OUT_VERTEX_F(mask_y[2] * mask_sf[1]);
982
OUT_VERTEX_F(mask_w[2]);
985
OUT_VERTEX(r->dst.x, r->dst.y + r->height);
986
OUT_VERTEX_F(src_x[1] * src_sf[0]);
987
OUT_VERTEX_F(src_y[1] * src_sf[1]);
989
OUT_VERTEX_F(src_w[1]);
991
OUT_VERTEX_F(mask_x[1] * mask_sf[0]);
992
OUT_VERTEX_F(mask_y[1] * mask_sf[1]);
994
OUT_VERTEX_F(mask_w[1]);
997
OUT_VERTEX(r->dst.x, r->dst.y);
998
OUT_VERTEX_F(src_x[0] * src_sf[0]);
999
OUT_VERTEX_F(src_y[0] * src_sf[1]);
1001
OUT_VERTEX_F(src_w[0]);
1003
OUT_VERTEX_F(mask_x[0] * mask_sf[0]);
1004
OUT_VERTEX_F(mask_y[0] * mask_sf[1]);
1006
OUT_VERTEX_F(mask_w[0]);
1010
532
static void gen5_emit_vertex_buffer(struct sna *sna,
1011
533
const struct sna_composite_op *op)
1013
535
int id = op->u.gen5.ve_id;
1015
assert((unsigned)id <= 3);
537
assert((sna->render.vb_id & (1 << id)) == 0);
1017
539
OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3);
1018
OUT_BATCH((id << VB0_BUFFER_INDEX_SHIFT) | VB0_VERTEXDATA |
540
OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA |
1019
541
(4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
542
assert(sna->render.nvertex_reloc < ARRAY_SIZE(sna->render.vertex_reloc));
1020
543
sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
1022
545
OUT_BATCH(~0); /* max address: disabled */
1025
sna->render_state.gen5.vb_id |= 1 << id;
548
sna->render.vb_id |= 1 << id;
1028
551
static void gen5_emit_primitive(struct sna *sna)
1030
553
if (sna->kgem.nbatch == sna->render_state.gen5.last_primitive) {
1031
sna->render_state.gen5.vertex_offset = sna->kgem.nbatch - 5;
554
sna->render.vertex_offset = sna->kgem.nbatch - 5;
1285
802
const struct sna_composite_op *op,
1286
803
int blend, int kernel)
1288
uint16_t offset = sna->kgem.nbatch, last;
808
DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
809
__FUNCTION__, op->u.gen4.ve_id & 2,
810
op->src.filter, op->src.repeat,
811
op->mask.filter, op->mask.repeat,
812
kernel, blend, op->has_component_alpha, (int)op->dst.format));
814
sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
815
op->mask.filter, op->mask.repeat,
817
bp = gen5_get_blend(blend, op->has_component_alpha, op->dst.format);
819
DBG(("%s: sp=%d, bp=%d\n", __FUNCTION__, sp, bp));
820
key = sp | (uint32_t)bp << 16 | (op->mask.bo != NULL) << 31;
821
if (key == sna->render_state.gen5.last_pipelined_pointers)
1290
825
OUT_BATCH(GEN5_3DSTATE_PIPELINED_POINTERS | 5);
1291
826
OUT_BATCH(sna->render_state.gen5.vs);
1292
827
OUT_BATCH(GEN5_GS_DISABLE); /* passthrough */
1293
828
OUT_BATCH(GEN5_CLIP_DISABLE); /* passthrough */
1294
829
OUT_BATCH(sna->render_state.gen5.sf[op->mask.bo != NULL]);
1295
OUT_BATCH(sna->render_state.gen5.wm +
1296
SAMPLER_OFFSET(op->src.filter, op->src.repeat,
1297
op->mask.filter, op->mask.repeat,
1299
OUT_BATCH(sna->render_state.gen5.cc +
1300
gen5_get_blend(blend, op->has_component_alpha, op->dst.format));
830
OUT_BATCH(sna->render_state.gen5.wm + sp);
831
OUT_BATCH(sna->render_state.gen5.cc + bp);
1302
last = sna->render_state.gen5.last_pipelined_pointers;
1303
if (!DBG_NO_STATE_CACHE && last &&
1304
sna->kgem.batch[offset + 1] == sna->kgem.batch[last + 1] &&
1305
sna->kgem.batch[offset + 3] == sna->kgem.batch[last + 3] &&
1306
sna->kgem.batch[offset + 4] == sna->kgem.batch[last + 4] &&
1307
sna->kgem.batch[offset + 5] == sna->kgem.batch[last + 5] &&
1308
sna->kgem.batch[offset + 6] == sna->kgem.batch[last + 6]) {
1309
sna->kgem.nbatch = offset;
1312
sna->render_state.gen5.last_pipelined_pointers = offset;
833
sna->render_state.gen5.last_pipelined_pointers = key;
1348
868
* texture coordinate 1 if (has_mask is true): same as above
1350
870
struct gen5_render_state *render = &sna->render_state.gen5;
1351
bool has_mask = op->mask.bo != NULL;
1352
bool is_affine = op->is_affine;
1353
int nelem = has_mask ? 2 : 1;
1354
int selem = is_affine ? 2 : 3;
1355
uint32_t w_component;
1356
uint32_t src_format;
1357
871
int id = op->u.gen5.ve_id;
872
bool has_mask = id >> 2;
1359
assert((unsigned)id <= 3);
1360
876
if (!DBG_NO_STATE_CACHE && render->ve_id == id)
879
DBG(("%s: changing %d -> %d\n", __FUNCTION__, render->ve_id, id));
1363
880
render->ve_id = id;
1366
src_format = GEN5_SURFACEFORMAT_R32G32_FLOAT;
1367
w_component = GEN5_VFCOMPONENT_STORE_1_FLT;
1369
src_format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT;
1370
w_component = GEN5_VFCOMPONENT_STORE_SRC;
882
if (id == VERTEX_2s2s) {
883
DBG(("%s: setup COPY\n", __FUNCTION__));
884
assert(op->floats_per_rect == 6);
886
OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS | ((2 * (1 + 2)) + 1 - 2));
888
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
889
GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
890
0 << VE0_OFFSET_SHIFT);
891
OUT_BATCH(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
892
VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
893
VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
894
VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
897
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
898
GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
899
0 << VE0_OFFSET_SHIFT);
900
OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
901
VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
902
VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
903
VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
906
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
907
GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
908
4 << VE0_OFFSET_SHIFT);
909
OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
910
VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
911
VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
912
VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
1373
916
/* The VUE layout
1379
922
* dword 4-15 are fetched from vertex buffer
1381
924
OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS |
1382
((2 * (2 + nelem)) + 1 - 2));
925
((2 * (has_mask ? 4 : 3)) + 1 - 2));
1384
927
OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
1385
928
(GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT) |
1386
929
(0 << VE0_OFFSET_SHIFT));
1387
OUT_BATCH((GEN5_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT) |
1388
(GEN5_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT) |
1389
(GEN5_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT) |
1390
(GEN5_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT));
930
OUT_BATCH((VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT) |
931
(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT) |
932
(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT) |
933
(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT));
1393
OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
1394
(GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT) |
1395
(0 << VE0_OFFSET_SHIFT)); /* offsets vb in bytes */
1396
OUT_BATCH((GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1397
(GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1398
(GEN5_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1399
(GEN5_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
936
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
937
GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
938
0 << VE0_OFFSET_SHIFT);
939
OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
940
VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
941
VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT |
942
VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
1401
945
/* u0, v0, w0 */
1402
OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
1403
(src_format << VE0_FORMAT_SHIFT) |
1404
(4 << VE0_OFFSET_SHIFT)); /* offset vb in bytes */
1405
OUT_BATCH((GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1406
(GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1407
(w_component << VE1_VFCOMPONENT_2_SHIFT) |
1408
(GEN5_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
946
DBG(("%s: id=%d, first channel %d floats, offset=%d\n", __FUNCTION__,
947
id, id & 3, offset));
948
dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
951
format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
952
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
953
dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
954
dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
959
format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
960
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
961
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
962
dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
965
format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
966
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
967
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
968
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
971
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
972
format | offset << VE0_OFFSET_SHIFT);
1410
975
/* u1, v1, w1 */
1412
OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
1413
(src_format << VE0_FORMAT_SHIFT) |
1414
(((1 + selem) * 4) << VE0_OFFSET_SHIFT)); /* vb offset in bytes */
1415
OUT_BATCH((GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1416
(GEN5_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1417
(w_component << VE1_VFCOMPONENT_2_SHIFT) |
1418
(GEN5_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
977
offset += (id & 3) * sizeof(float);
978
DBG(("%s: id=%x, second channel %d floats, offset=%d\n", __FUNCTION__,
979
id, (id >> 2) & 3, offset));
980
dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
981
switch ((id >> 2) & 3) {
983
format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
984
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
985
dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
986
dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
991
format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
992
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
993
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
994
dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
997
format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
998
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
999
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
1000
dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
1003
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
1004
format | offset << VE0_OFFSET_SHIFT);
2494
2060
#if !NO_COMPOSITE_SPANS
2496
gen5_emit_composite_texcoord(struct sna *sna,
2497
const struct sna_composite_channel *channel,
2498
int16_t x, int16_t y)
2502
if (channel->is_affine) {
2503
sna_get_transformed_coordinates(x + channel->offset[0],
2504
y + channel->offset[1],
2507
OUT_VERTEX_F(t[0] * channel->scale[0]);
2508
OUT_VERTEX_F(t[1] * channel->scale[1]);
2510
t[0] = t[1] = 0; t[2] = 1;
2511
sna_get_transformed_coordinates_3d(x + channel->offset[0],
2512
y + channel->offset[1],
2514
&t[0], &t[1], &t[2]);
2515
OUT_VERTEX_F(t[0] * channel->scale[0]);
2516
OUT_VERTEX_F(t[1] * channel->scale[1]);
2522
gen5_emit_composite_texcoord_affine(struct sna *sna,
2523
const struct sna_composite_channel *channel,
2524
int16_t x, int16_t y)
2528
sna_get_transformed_coordinates(x + channel->offset[0],
2529
y + channel->offset[1],
2532
OUT_VERTEX_F(t[0] * channel->scale[0]);
2533
OUT_VERTEX_F(t[1] * channel->scale[1]);
2537
gen5_emit_composite_spans_vertex(struct sna *sna,
2538
const struct sna_composite_spans_op *op,
2539
int16_t x, int16_t y)
2542
gen5_emit_composite_texcoord(sna, &op->base.src, x, y);
2545
fastcall static void
2546
gen5_emit_composite_spans_primitive(struct sna *sna,
2547
const struct sna_composite_spans_op *op,
2551
gen5_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
2552
OUT_VERTEX_F(opacity);
2554
if (!op->base.is_affine)
2557
gen5_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
2558
OUT_VERTEX_F(opacity);
2560
if (!op->base.is_affine)
2563
gen5_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
2564
OUT_VERTEX_F(opacity);
2566
if (!op->base.is_affine)
2570
fastcall static void
2571
gen5_emit_composite_spans_solid(struct sna *sna,
2572
const struct sna_composite_spans_op *op,
2576
OUT_VERTEX(box->x2, box->y2);
2577
OUT_VERTEX_F(1); OUT_VERTEX_F(1);
2578
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
2580
OUT_VERTEX(box->x1, box->y2);
2581
OUT_VERTEX_F(0); OUT_VERTEX_F(1);
2582
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
2584
OUT_VERTEX(box->x1, box->y1);
2585
OUT_VERTEX_F(0); OUT_VERTEX_F(0);
2586
OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
2589
fastcall static void
2590
gen5_emit_composite_spans_affine(struct sna *sna,
2591
const struct sna_composite_spans_op *op,
2595
OUT_VERTEX(box->x2, box->y2);
2596
gen5_emit_composite_texcoord_affine(sna, &op->base.src,
2598
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
2600
OUT_VERTEX(box->x1, box->y2);
2601
gen5_emit_composite_texcoord_affine(sna, &op->base.src,
2603
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
2605
OUT_VERTEX(box->x1, box->y1);
2606
gen5_emit_composite_texcoord_affine(sna, &op->base.src,
2608
OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
2611
2061
fastcall static void
2612
2062
gen5_render_composite_spans_box(struct sna *sna,
2613
2063
const struct sna_composite_spans_op *op,