1
/**********************************************************
2
* Copyright 2008-2009 VMware, Inc. All rights reserved.
4
* Permission is hereby granted, free of charge, to any person
5
* obtaining a copy of this software and associated documentation
6
* files (the "Software"), to deal in the Software without
7
* restriction, including without limitation the rights to use, copy,
8
* modify, merge, publish, distribute, sublicense, and/or sell copies
9
* of the Software, and to permit persons to whom the Software is
10
* furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice shall be
13
* included in all copies or substantial portions of the Software.
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
**********************************************************/
26
#include "util/u_inlines.h"
27
#include "pipe/p_defines.h"
28
#include "util/u_math.h"
29
#include "util/u_memory.h"
30
#include "util/u_bitmask.h"
31
#include "translate/translate.h"
32
#include "tgsi/tgsi_ureg.h"
34
#include "svga_context.h"
35
#include "svga_state.h"
37
#include "svga_shader.h"
38
#include "svga_tgsi.h"
40
#include "svga_hw_reg.h"
44
* If we fail to compile a vertex shader we'll use a dummy/fallback shader
45
* that simply emits a (0,0,0,1) vertex position.
47
static const struct tgsi_token *
48
get_dummy_vertex_shader(void)
50
static const float zero[4] = { 0.0, 0.0, 0.0, 1.0 };
51
struct ureg_program *ureg;
52
const struct tgsi_token *tokens;
56
ureg = ureg_create(PIPE_SHADER_VERTEX);
60
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
61
src = ureg_DECL_immediate(ureg, zero, 4);
62
ureg_MOV(ureg, dst, src);
65
tokens = ureg_get_tokens(ureg, NULL);
73
static struct svga_shader_variant *
74
translate_vertex_program(struct svga_context *svga,
75
const struct svga_vertex_shader *vs,
76
const struct svga_compile_key *key)
78
if (svga_have_vgpu10(svga)) {
79
return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
83
return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
90
* Replace the given shader's instruction with a simple / dummy shader.
91
* We use this when normal shader translation fails.
93
static struct svga_shader_variant *
94
get_compiled_dummy_vertex_shader(struct svga_context *svga,
95
struct svga_vertex_shader *vs,
96
const struct svga_compile_key *key)
98
const struct tgsi_token *dummy = get_dummy_vertex_shader();
99
struct svga_shader_variant *variant;
105
FREE((void *) vs->base.tokens);
106
vs->base.tokens = dummy;
108
tgsi_scan_shader(vs->base.tokens, &vs->base.info);
109
vs->generic_outputs = svga_get_generic_outputs_mask(&vs->base.info);
111
variant = translate_vertex_program(svga, vs, key);
117
* Translate TGSI shader into an svga shader variant.
119
static enum pipe_error
120
compile_vs(struct svga_context *svga,
121
struct svga_vertex_shader *vs,
122
const struct svga_compile_key *key,
123
struct svga_shader_variant **out_variant)
125
struct svga_shader_variant *variant;
126
enum pipe_error ret = PIPE_ERROR;
128
variant = translate_vertex_program(svga, vs, key);
129
if (variant == NULL) {
130
debug_printf("Failed to compile vertex shader,"
131
" using dummy shader instead.\n");
132
variant = get_compiled_dummy_vertex_shader(svga, vs, key);
134
else if (svga_shader_too_large(svga, variant)) {
135
/* too big, use dummy shader */
136
debug_printf("Shader too large (%u bytes),"
137
" using dummy shader instead.\n",
138
(unsigned) (variant->nr_tokens
139
* sizeof(variant->tokens[0])));
140
/* Free the too-large variant */
141
svga_destroy_shader_variant(svga, variant);
142
/* Use simple pass-through shader instead */
143
variant = get_compiled_dummy_vertex_shader(svga, vs, key);
150
ret = svga_define_shader(svga, variant);
151
if (ret != PIPE_OK) {
152
svga_destroy_shader_variant(svga, variant);
156
*out_variant = variant;
162
/* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
165
make_vs_key(struct svga_context *svga, struct svga_compile_key *key)
167
struct svga_vertex_shader *vs = svga->curr.vs;
169
memset(key, 0, sizeof *key);
171
if (svga->state.sw.need_swtnl && svga_have_vgpu10(svga)) {
172
/* Set both of these flags, to match compile_passthrough_vs() */
173
key->vs.passthrough = 1;
174
key->vs.undo_viewport = 1;
178
if (svga_have_vgpu10(svga)) {
179
key->vs.need_vertex_id_bias = 1;
182
/* SVGA_NEW_PRESCALE */
183
key->vs.need_prescale = svga->state.hw_clear.prescale[0].enabled &&
184
(svga->curr.tes == NULL) &&
185
(svga->curr.gs == NULL);
188
key->vs.allow_psiz = svga->curr.rast->templ.point_size_per_vertex;
191
key->vs.fs_generic_inputs = svga->curr.fs->generic_inputs;
193
svga_remap_generics(key->vs.fs_generic_inputs, key->generic_remap_table);
195
/* SVGA_NEW_VELEMENT */
196
key->vs.adjust_attrib_range = svga->curr.velems->adjust_attrib_range;
197
key->vs.adjust_attrib_w_1 = svga->curr.velems->adjust_attrib_w_1;
198
key->vs.attrib_is_pure_int = svga->curr.velems->attrib_is_pure_int;
199
key->vs.adjust_attrib_itof = svga->curr.velems->adjust_attrib_itof;
200
key->vs.adjust_attrib_utof = svga->curr.velems->adjust_attrib_utof;
201
key->vs.attrib_is_bgra = svga->curr.velems->attrib_is_bgra;
202
key->vs.attrib_puint_to_snorm = svga->curr.velems->attrib_puint_to_snorm;
203
key->vs.attrib_puint_to_uscaled = svga->curr.velems->attrib_puint_to_uscaled;
204
key->vs.attrib_puint_to_sscaled = svga->curr.velems->attrib_puint_to_sscaled;
206
/* SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER */
207
svga_init_shader_key_common(svga, PIPE_SHADER_VERTEX, &vs->base, key);
210
key->clip_plane_enable = svga->curr.rast->templ.clip_plane_enable;
212
/* Determine if this shader is the last shader in the vertex
215
key->last_vertex_stage = !(svga->curr.gs ||
216
svga->curr.tcs || svga->curr.tes);
218
if (svga_have_gl43(svga))
219
key->image_size_used = vs->base.info.opcode_count[TGSI_OPCODE_RESQ] ? 1 : 0;
224
* svga_reemit_vs_bindings - Reemit the vertex shader bindings
227
svga_reemit_vs_bindings(struct svga_context *svga)
230
struct svga_winsys_gb_shader *gbshader = NULL;
231
SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;
233
assert(svga->rebind.flags.vs);
234
assert(svga_have_gb_objects(svga));
236
if (svga->state.hw_draw.vs) {
237
gbshader = svga->state.hw_draw.vs->gb_shader;
238
shaderId = svga->state.hw_draw.vs->id;
241
if (!svga_need_to_rebind_resources(svga)) {
242
ret = svga->swc->resource_rebind(svga->swc, NULL, gbshader,
246
if (svga_have_vgpu10(svga))
247
ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_VS,
250
ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_VS, gbshader);
256
svga->rebind.flags.vs = FALSE;
262
* The current vertex shader is already executed by the 'draw'
263
* module, so we just need to generate a simple vertex shader
264
* to pass through all those VS outputs that will
265
* be consumed by the fragment shader.
266
* Used when we employ the 'draw' module.
268
static enum pipe_error
269
compile_passthrough_vs(struct svga_context *svga,
270
struct svga_vertex_shader *vs,
271
struct svga_fragment_shader *fs,
272
struct svga_shader_variant **out_variant)
274
struct svga_shader_variant *variant = NULL;
277
unsigned num_elements;
278
struct svga_vertex_shader new_vs;
279
struct ureg_src src[PIPE_MAX_SHADER_INPUTS];
280
struct ureg_dst dst[PIPE_MAX_SHADER_OUTPUTS];
281
struct ureg_program *ureg;
282
struct svga_compile_key key;
285
assert(svga_have_vgpu10(svga));
288
num_inputs = fs->base.info.num_inputs;
290
ureg = ureg_create(PIPE_SHADER_VERTEX);
292
return PIPE_ERROR_OUT_OF_MEMORY;
294
/* draw will always add position */
295
dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
296
src[0] = ureg_DECL_vs_input(ureg, 0);
300
* swtnl backend redefines the input layout based on the
301
* fragment shader's inputs. So we only need to passthrough
302
* those inputs that will be consumed by the fragment shader.
303
* Note: DX10 requires the number of vertex elements
304
* specified in the input layout to be no less than the
305
* number of inputs to the vertex shader.
307
for (i = 0; i < num_inputs; i++) {
308
switch (fs->base.info.input_semantic_name[i]) {
309
case TGSI_SEMANTIC_COLOR:
310
case TGSI_SEMANTIC_GENERIC:
311
case TGSI_SEMANTIC_FOG:
312
dst[num_elements] = ureg_DECL_output(ureg,
313
fs->base.info.input_semantic_name[i],
314
fs->base.info.input_semantic_index[i]);
315
src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
323
for (i = 0; i < num_elements; i++) {
324
ureg_MOV(ureg, dst[i], src[i]);
329
memset(&new_vs, 0, sizeof(new_vs));
330
new_vs.base.tokens = ureg_get_tokens(ureg, NULL);
331
tgsi_scan_shader(new_vs.base.tokens, &new_vs.base.info);
333
memset(&key, 0, sizeof(key));
334
key.vs.undo_viewport = 1;
336
ret = compile_vs(svga, &new_vs, &key, &variant);
340
ureg_free_tokens(new_vs.base.tokens);
343
/* Overwrite the variant key to indicate it's a pass-through VS */
344
memset(&variant->key, 0, sizeof(variant->key));
345
variant->key.vs.passthrough = 1;
346
variant->key.vs.undo_viewport = 1;
348
*out_variant = variant;
354
static enum pipe_error
355
emit_hw_vs(struct svga_context *svga, uint64_t dirty)
357
struct svga_shader_variant *variant;
358
struct svga_vertex_shader *vs = svga->curr.vs;
359
struct svga_fragment_shader *fs = svga->curr.fs;
360
enum pipe_error ret = PIPE_OK;
361
struct svga_compile_key key;
363
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
365
/* If there is an active geometry shader, and it has stream output
366
* defined, then we will skip the stream output from the vertex shader
368
if (!svga_have_gs_streamout(svga)) {
369
/* No GS stream out */
370
if (svga_have_vs_streamout(svga)) {
371
/* Set VS stream out */
372
ret = svga_set_stream_output(svga, vs->base.stream_output);
375
/* turn off stream out */
376
ret = svga_set_stream_output(svga, NULL);
378
if (ret != PIPE_OK) {
383
/* SVGA_NEW_NEED_SWTNL */
384
if (svga->state.sw.need_swtnl && !svga_have_vgpu10(svga)) {
385
/* No vertex shader is needed */
389
make_vs_key(svga, &key);
391
/* See if we already have a VS variant that matches the key */
392
variant = svga_search_shader_key(&vs->base, &key);
395
/* Create VS variant now */
396
if (key.vs.passthrough) {
397
ret = compile_passthrough_vs(svga, vs, fs, &variant);
400
ret = compile_vs(svga, vs, &key, &variant);
405
/* insert the new variant at head of linked list */
407
variant->next = vs->base.variants;
408
vs->base.variants = variant;
412
if (variant != svga->state.hw_draw.vs) {
413
/* Bind the new variant */
415
ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
418
svga->rebind.flags.vs = FALSE;
421
svga->dirty |= SVGA_NEW_VS_VARIANT;
422
svga->state.hw_draw.vs = variant;
426
SVGA_STATS_TIME_POP(svga_sws(svga));
430
struct svga_tracked_state svga_hw_vs =
432
"vertex shader (hwtnl)",
435
SVGA_NEW_TEXTURE_BINDING |
440
SVGA_NEW_NEED_SWTNL |
441
SVGA_NEW_VS_RAW_BUFFER),