2
* Copyright 2008 Ben Skeggs
3
* Copyright 2010 Christoph Bumiller
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice shall be included in
13
* all copies or substantial portions of the Software.
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
* OTHER DEALINGS IN THE SOFTWARE.
24
#include "pipe/p_context.h"
25
#include "pipe/p_defines.h"
26
#include "pipe/p_state.h"
27
#include "util/u_inlines.h"
29
#include "nv50/nv50_context.h"
30
#include "nv50/nv50_query_hw.h"
32
#include "nv50/nv50_compute.xml.h"
35
nv50_constbufs_validate(struct nv50_context *nv50)
37
struct nouveau_pushbuf *push = nv50->base.pushbuf;
40
for (s = 0; s < NV50_MAX_3D_SHADER_STAGES; ++s) {
43
if (s == NV50_SHADER_STAGE_FRAGMENT)
44
p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
46
if (s == NV50_SHADER_STAGE_GEOMETRY)
47
p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
49
p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
51
while (nv50->constbuf_dirty[s]) {
52
const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
54
assert(i < NV50_MAX_PIPE_CONSTBUFS);
55
nv50->constbuf_dirty[s] &= ~(1 << i);
57
if (nv50->constbuf[s][i].user) {
58
const unsigned b = NV50_CB_PVP + s;
60
unsigned words = nv50->constbuf[s][0].size / 4;
62
NOUVEAU_ERR("user constbufs only supported in slot 0\n");
65
if (!nv50->state.uniform_buffer_bound[s]) {
66
nv50->state.uniform_buffer_bound[s] = true;
67
BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
68
PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
71
unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
73
PUSH_SPACE(push, nr + 3);
74
BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
75
PUSH_DATA (push, (start << 8) | b);
76
BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
77
PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
83
struct nv04_resource *res =
84
nv04_resource(nv50->constbuf[s][i].u.buf);
86
/* TODO: allocate persistent bindings */
87
const unsigned b = s * 16 + i;
89
assert(nouveau_resource_mapped_by_gpu(&res->base));
91
BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
92
PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
93
PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
94
PUSH_DATA (push, (b << 16) |
95
(nv50->constbuf[s][i].size & 0xffff));
96
BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
97
PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
99
BCTX_REFN(nv50->bufctx_3d, 3D_CB(s, i), res, RD);
101
nv50->cb_dirty = 1; /* Force cache flush for UBO. */
102
res->cb_bindings[s] |= 1 << i;
104
BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
105
PUSH_DATA (push, (i << 8) | p | 0);
108
nv50->state.uniform_buffer_bound[s] = false;
113
/* Invalidate all COMPUTE constbufs because they are aliased with 3D. */
114
nv50->dirty_cp |= NV50_NEW_CP_CONSTBUF;
115
nv50->constbuf_dirty[NV50_SHADER_STAGE_COMPUTE] |= nv50->constbuf_valid[NV50_SHADER_STAGE_COMPUTE];
116
nv50->state.uniform_buffer_bound[NV50_SHADER_STAGE_COMPUTE] = false;
120
nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
122
if (!prog->translated) {
123
prog->translated = nv50_program_translate(
124
prog, nv50->screen->base.device->chipset, &nv50->base.debug);
125
if (!prog->translated)
131
return nv50_program_upload_code(nv50, prog);
135
nv50_program_update_context_state(struct nv50_context *nv50,
136
struct nv50_program *prog, int stage)
138
const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
140
if (prog && prog->tls_space) {
141
if (nv50->state.new_tls_space)
142
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
143
if (!nv50->state.tls_required || nv50->state.new_tls_space)
144
BCTX_REFN_bo(nv50->bufctx_3d, 3D_TLS, flags, nv50->screen->tls_bo);
145
nv50->state.new_tls_space = false;
146
nv50->state.tls_required |= 1 << stage;
148
if (nv50->state.tls_required == (1 << stage))
149
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
150
nv50->state.tls_required &= ~(1 << stage);
155
nv50_vertprog_validate(struct nv50_context *nv50)
157
struct nouveau_pushbuf *push = nv50->base.pushbuf;
158
struct nv50_program *vp = nv50->vertprog;
160
if (!nv50_program_validate(nv50, vp))
162
nv50_program_update_context_state(nv50, vp, 0);
164
BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
165
PUSH_DATA (push, vp->vp.attrs[0]);
166
PUSH_DATA (push, vp->vp.attrs[1]);
167
BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
168
PUSH_DATA (push, vp->max_out);
169
BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
170
PUSH_DATA (push, vp->max_gpr);
171
BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
172
PUSH_DATA (push, vp->code_base);
176
nv50_fragprog_validate(struct nv50_context *nv50)
178
struct nouveau_pushbuf *push = nv50->base.pushbuf;
179
struct nv50_program *fp = nv50->fragprog;
180
struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
185
if (nv50->zsa && nv50->zsa->pipe.alpha_enabled) {
186
struct pipe_framebuffer_state *fb = &nv50->framebuffer;
187
bool blendable = fb->nr_cbufs == 0 || !fb->cbufs[0] ||
188
nv50->screen->base.base.is_format_supported(
189
&nv50->screen->base.base,
190
fb->cbufs[0]->format,
191
fb->cbufs[0]->texture->target,
192
fb->cbufs[0]->texture->nr_samples,
193
fb->cbufs[0]->texture->nr_storage_samples,
194
PIPE_BIND_BLENDABLE);
195
/* If we already have alphatest code, we have to keep updating
196
* it. However we only have to have different code if the current RT0 is
197
* non-blendable. Otherwise we just set it to always pass and use the
198
* hardware alpha test.
200
if (fp->fp.alphatest || !blendable) {
201
uint8_t alphatest = PIPE_FUNC_ALWAYS + 1;
203
alphatest = nv50->zsa->pipe.alpha_func + 1;
204
if (!fp->fp.alphatest)
205
nv50_program_destroy(nv50, fp);
206
else if (fp->mem && fp->fp.alphatest != alphatest)
207
nouveau_heap_free(&fp->mem);
209
fp->fp.alphatest = alphatest;
211
} else if (fp->fp.alphatest && fp->fp.alphatest != PIPE_FUNC_ALWAYS + 1) {
212
/* Alpha test is disabled but we have a shader where it's filled
213
* in. Make sure to reset the function to 'always', otherwise it'll end
214
* up discarding fragments incorrectly.
217
nouveau_heap_free(&fp->mem);
219
fp->fp.alphatest = PIPE_FUNC_ALWAYS + 1;
222
if (fp->fp.force_persample_interp != rast->force_persample_interp) {
223
/* Force the program to be reuploaded, which will trigger interp fixups
227
nouveau_heap_free(&fp->mem);
229
fp->fp.force_persample_interp = rast->force_persample_interp;
232
if (fp->mem && !(nv50->dirty_3d & (NV50_NEW_3D_FRAGPROG | NV50_NEW_3D_MIN_SAMPLES)))
235
if (!nv50_program_validate(nv50, fp))
237
nv50_program_update_context_state(nv50, fp, 1);
239
BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
240
PUSH_DATA (push, fp->max_gpr);
241
BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
242
PUSH_DATA (push, fp->max_out);
243
BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
244
PUSH_DATA (push, fp->fp.flags[0]);
245
BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
246
PUSH_DATA (push, fp->fp.flags[1]);
247
BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
248
PUSH_DATA (push, fp->code_base);
250
if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
251
BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
252
if (nv50->min_samples > 1 || fp->fp.has_samplemask)
254
NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
255
(NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
256
fp->fp.has_samplemask));
263
nv50_gmtyprog_validate(struct nv50_context *nv50)
265
struct nouveau_pushbuf *push = nv50->base.pushbuf;
266
struct nv50_program *gp = nv50->gmtyprog;
269
if (!nv50_program_validate(nv50, gp))
271
BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
272
PUSH_DATA (push, gp->max_gpr);
273
BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
274
PUSH_DATA (push, gp->max_out);
275
BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
276
PUSH_DATA (push, gp->gp.prim_type);
277
BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
278
PUSH_DATA (push, gp->gp.vert_count);
279
BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
280
PUSH_DATA (push, gp->code_base);
282
nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
284
nv50_program_update_context_state(nv50, gp, 2);
286
/* GP_ENABLE is updated in linkage validation */
290
nv50_compprog_validate(struct nv50_context *nv50)
292
struct nouveau_pushbuf *push = nv50->base.pushbuf;
293
struct nv50_program *cp = nv50->compprog;
295
if (cp && !nv50_program_validate(nv50, cp))
298
BEGIN_NV04(push, NV50_CP(CODE_CB_FLUSH), 1);
303
nv50_sprite_coords_validate(struct nv50_context *nv50)
305
struct nouveau_pushbuf *push = nv50->base.pushbuf;
306
uint32_t pntc[8], mode;
307
struct nv50_program *fp = nv50->fragprog;
309
unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
311
if (!nv50->rast->pipe.point_quad_rasterization) {
312
if (nv50->state.point_sprite) {
313
BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
314
for (i = 0; i < 8; ++i)
317
nv50->state.point_sprite = false;
321
nv50->state.point_sprite = true;
324
memset(pntc, 0, sizeof(pntc));
326
for (i = 0; i < fp->in_nr; i++) {
327
unsigned n = util_bitcount(fp->in[i].mask);
329
if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
333
if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
338
for (c = 0; c < 4; ++c) {
339
if (fp->in[i].mask & (1 << c)) {
340
pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
346
if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
351
BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
352
PUSH_DATA (push, mode);
354
BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
355
PUSH_DATAp(push, pntc, 8);
358
/* Validate state derived from shaders and the rasterizer cso. */
360
nv50_validate_derived_rs(struct nv50_context *nv50)
362
struct nouveau_pushbuf *push = nv50->base.pushbuf;
363
uint32_t color, psize;
365
nv50_sprite_coords_validate(nv50);
367
if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
368
nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
369
BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
370
PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
373
if (nv50->dirty_3d & NV50_NEW_3D_FRAGPROG)
375
psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
376
color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
378
if (nv50->rast->pipe.clamp_vertex_color)
379
color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
381
if (color != nv50->state.semantic_color) {
382
nv50->state.semantic_color = color;
383
BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
384
PUSH_DATA (push, color);
387
if (nv50->rast->pipe.point_size_per_vertex)
388
psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
390
if (psize != nv50->state.semantic_psize) {
391
nv50->state.semantic_psize = psize;
392
BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
393
PUSH_DATA (push, psize);
398
nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
399
struct nv50_varying *in, struct nv50_varying *out)
402
uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
404
for (c = 0; c < 4; ++c) {
407
lin[mid / 32] |= 1 << (mid % 32);
425
nv50_fp_linkage_validate(struct nv50_context *nv50)
427
struct nouveau_pushbuf *push = nv50->base.pushbuf;
428
struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
429
struct nv50_program *fp = nv50->fragprog;
430
struct nv50_varying dummy;
433
uint32_t layerid = 0;
434
uint32_t viewportid = 0;
435
uint32_t psiz = 0x000;
436
uint32_t interp = fp->fp.interp;
437
uint32_t colors = fp->fp.colors;
438
uint32_t clpd_nr = util_last_bit(vp->vp.clip_enable | vp->vp.cull_enable);
443
if (!(nv50->dirty_3d & (NV50_NEW_3D_VERTPROG |
444
NV50_NEW_3D_FRAGPROG |
445
NV50_NEW_3D_GMTYPROG))) {
447
ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
448
bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
450
if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
454
memset(lin, 0x00, sizeof(lin));
456
/* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
457
* or is it the first byte ?
459
memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
461
dummy.mask = 0xf; /* map all components of HPOS */
463
m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
465
for (c = 0; c < clpd_nr; ++c)
466
map[m++] = vp->vp.clpd[c / 4] + (c % 4);
468
colors |= m << 8; /* adjust BFC0 id */
472
/* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
473
if (nv50->rast->pipe.light_twoside) {
474
for (i = 0; i < 2; ++i) {
476
if (fp->vp.bfc[i] >= fp->in_nr)
478
m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
479
(n < vp->out_nr) ? &vp->out[n] : &dummy);
482
colors += m - 4; /* adjust FFC0 id */
483
interp |= m << 8; /* set map id where 'normal' FP inputs start */
485
for (i = 0; i < fp->in_nr; ++i) {
486
for (n = 0; n < vp->out_nr; ++n)
487
if (vp->out[n].sn == fp->in[i].sn &&
488
vp->out[n].si == fp->in[i].si)
490
switch (fp->in[i].sn) {
491
case TGSI_SEMANTIC_PRIMID:
494
case TGSI_SEMANTIC_LAYER:
497
case TGSI_SEMANTIC_VIEWPORT_INDEX:
501
m = nv50_vec4_map(map, m, lin,
502
&fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
505
if (vp->gp.has_layer && !layerid) {
507
map[m++] = vp->gp.layerid;
510
if (vp->gp.has_viewport && !viewportid) {
512
map[m++] = vp->gp.viewportid;
515
if (nv50->rast->pipe.point_size_per_vertex) {
517
map[m++] = vp->vp.psiz;
520
if (nv50->rast->pipe.clamp_vertex_color)
521
colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
523
if (unlikely(vp->so)) {
524
/* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
528
* Inverting vp->so->map (output -> offset) would probably speed this up.
530
memset(so_map, 0, sizeof(so_map));
531
for (i = 0; i < vp->so->map_size; ++i) {
532
if (vp->so->map[i] == 0xff)
534
for (c = 0; c < m; ++c)
535
if (map[c] == vp->so->map[i] && !so_map[c])
539
map[m++] = vp->so->map[i];
541
so_map[c] = 0x80 | i;
543
for (c = m; c & 3; ++c)
550
if (unlikely(nv50->gmtyprog)) {
551
BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
553
BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
554
PUSH_DATAp(push, map, n);
556
BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
557
PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
559
BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
560
PUSH_DATA (push, primid);
563
BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
565
BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
566
PUSH_DATAp(push, map, n);
569
BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
570
PUSH_DATA (push, vp->gp.has_viewport);
571
PUSH_DATA (push, colors);
572
PUSH_DATA (push, (clpd_nr << 8) | 4);
573
PUSH_DATA (push, layerid);
574
PUSH_DATA (push, psiz);
576
BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
577
PUSH_DATA (push, viewportid);
579
BEGIN_NV04(push, NV50_3D(LAYER), 1);
580
PUSH_DATA (push, vp->gp.has_layer << 16);
582
BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
583
PUSH_DATA (push, interp);
585
nv50->state.interpolant_ctrl = interp;
587
nv50->state.semantic_color = colors;
588
nv50->state.semantic_psize = psiz;
590
BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
591
PUSH_DATAp(push, lin, 4);
593
BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
594
PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
597
BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
598
PUSH_DATAp(push, so_map, n);
603
nv50_vp_gp_mapping(uint8_t *map, int m,
604
struct nv50_program *vp, struct nv50_program *gp)
608
for (i = 0; i < gp->in_nr; ++i) {
609
uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
611
for (j = 0; j < vp->out_nr; ++j) {
612
if (vp->out[j].sn == gp->in[i].sn &&
613
vp->out[j].si == gp->in[i].si) {
614
mv = vp->out[j].mask;
620
for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
625
map[m++] = (c == 3) ? 0x41 : 0x40;
635
nv50_gp_linkage_validate(struct nv50_context *nv50)
637
struct nouveau_pushbuf *push = nv50->base.pushbuf;
638
struct nv50_program *vp = nv50->vertprog;
639
struct nv50_program *gp = nv50->gmtyprog;
646
memset(map, 0, sizeof(map));
648
m = nv50_vp_gp_mapping(map, m, vp, gp);
652
BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
653
PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
656
BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
658
BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
659
PUSH_DATAp(push, map, n);
663
nv50_stream_output_validate(struct nv50_context *nv50)
665
struct nouveau_pushbuf *push = nv50->base.pushbuf;
666
struct nv50_stream_output_state *so;
671
so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
673
BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
675
if (!so || !nv50->num_so_targets) {
676
if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
677
BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
680
BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
685
/* previous TFB needs to complete */
686
if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
687
BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
692
if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
693
ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
695
BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
696
PUSH_DATA (push, ctrl);
698
for (i = 0; i < nv50->num_so_targets; ++i) {
699
struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
700
struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
702
const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
704
uint32_t so_used = 0;
708
nv84_hw_query_fifo_wait(push, nv50_query(targ->pq));
710
so_used = nv50->so_used[i];
712
BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
713
PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset + so_used);
714
PUSH_DATA (push, buf->address + targ->pipe.buffer_offset + so_used);
715
PUSH_DATA (push, so->num_attribs[i]);
717
PUSH_DATA(push, targ->pipe.buffer_size);
720
nv50_hw_query_pushbuf_submit(push, NVA0_3D_STRMOUT_OFFSET(i),
721
nv50_query(targ->pq), 0x4);
723
BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
728
const unsigned limit = (targ->pipe.buffer_size - so_used) /
729
(so->stride[i] * nv50->state.prim_size);
730
prims = MIN2(prims, limit);
733
targ->stride = so->stride[i];
734
BCTX_REFN(nv50->bufctx_3d, 3D_SO, buf, WR);
737
BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
738
PUSH_DATA (push, prims);
740
BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
742
BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);