1
/**************************************************************************
3
* Copyright 2009-2010 VMware, Inc.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
**************************************************************************/
29
#include "pipe/p_screen.h"
30
#include "pipe/p_context.h"
31
#include "pipe/p_state.h"
32
#include "tgsi/tgsi_ureg.h"
33
#include "tgsi/tgsi_build.h"
34
#include "tgsi/tgsi_from_mesa.h"
35
#include "tgsi/tgsi_info.h"
36
#include "tgsi/tgsi_dump.h"
37
#include "tgsi/tgsi_sanity.h"
38
#include "util/u_debug.h"
39
#include "util/u_inlines.h"
40
#include "util/u_memory.h"
41
#include "util/u_math.h"
42
#include "util/u_prim.h"
43
#include "util/u_bitmask.h"
45
#include "compiler/shader_info.h"
47
union tgsi_any_token {
48
struct tgsi_header header;
49
struct tgsi_processor processor;
50
struct tgsi_token token;
51
struct tgsi_property prop;
52
struct tgsi_property_data prop_data;
53
struct tgsi_declaration decl;
54
struct tgsi_declaration_range decl_range;
55
struct tgsi_declaration_dimension decl_dim;
56
struct tgsi_declaration_interp decl_interp;
57
struct tgsi_declaration_image decl_image;
58
struct tgsi_declaration_semantic decl_semantic;
59
struct tgsi_declaration_sampler_view decl_sampler_view;
60
struct tgsi_declaration_array array;
61
struct tgsi_immediate imm;
62
union tgsi_immediate_data imm_data;
63
struct tgsi_instruction insn;
64
struct tgsi_instruction_label insn_label;
65
struct tgsi_instruction_texture insn_texture;
66
struct tgsi_instruction_memory insn_memory;
67
struct tgsi_texture_offset insn_texture_offset;
68
struct tgsi_src_register src;
69
struct tgsi_ind_register ind;
70
struct tgsi_dimension dim;
71
struct tgsi_dst_register dst;
77
union tgsi_any_token *tokens;
83
#define UREG_MAX_INPUT (4 * PIPE_MAX_SHADER_INPUTS)
84
#define UREG_MAX_SYSTEM_VALUE PIPE_MAX_ATTRIBS
85
#define UREG_MAX_OUTPUT (4 * PIPE_MAX_SHADER_OUTPUTS)
86
#define UREG_MAX_CONSTANT_RANGE 32
87
#define UREG_MAX_HW_ATOMIC_RANGE 32
88
#define UREG_MAX_IMMEDIATE 4096
89
#define UREG_MAX_ADDR 3
90
#define UREG_MAX_ARRAY_TEMPS 256
96
} constant_range[UREG_MAX_CONSTANT_RANGE];
97
unsigned nr_constant_ranges;
100
struct hw_atomic_decl {
105
} hw_atomic_range[UREG_MAX_HW_ATOMIC_RANGE];
106
unsigned nr_hw_atomic_ranges;
109
#define DOMAIN_DECL 0
110
#define DOMAIN_INSN 1
114
enum pipe_shader_type processor;
115
bool supports_any_inout_decl_range;
116
int next_shader_processor;
118
struct ureg_input_decl {
119
enum tgsi_semantic semantic_name;
120
unsigned semantic_index;
121
enum tgsi_interpolate_mode interp;
122
unsigned char usage_mask;
123
enum tgsi_interpolate_loc interp_location;
127
} input[UREG_MAX_INPUT];
128
unsigned nr_inputs, nr_input_regs;
130
unsigned vs_inputs[PIPE_MAX_ATTRIBS/32];
133
enum tgsi_semantic semantic_name;
134
unsigned semantic_index;
135
} system_value[UREG_MAX_SYSTEM_VALUE];
136
unsigned nr_system_values;
138
struct ureg_output_decl {
139
enum tgsi_semantic semantic_name;
140
unsigned semantic_index;
142
unsigned usage_mask; /* = TGSI_WRITEMASK_* */
147
} output[UREG_MAX_OUTPUT];
148
unsigned nr_outputs, nr_output_regs;
158
} immediate[UREG_MAX_IMMEDIATE];
159
unsigned nr_immediates;
161
struct ureg_src sampler[PIPE_MAX_SAMPLERS];
162
unsigned nr_samplers;
166
enum tgsi_texture_type target;
167
enum tgsi_return_type return_type_x;
168
enum tgsi_return_type return_type_y;
169
enum tgsi_return_type return_type_z;
170
enum tgsi_return_type return_type_w;
171
} sampler_view[PIPE_MAX_SHADER_SAMPLER_VIEWS];
172
unsigned nr_sampler_views;
176
enum tgsi_texture_type target;
177
enum pipe_format format;
180
} image[PIPE_MAX_SHADER_IMAGES];
186
} buffer[PIPE_MAX_SHADER_BUFFERS];
189
struct util_bitmask *free_temps;
190
struct util_bitmask *local_temps;
191
struct util_bitmask *decl_temps;
194
unsigned array_temps[UREG_MAX_ARRAY_TEMPS];
195
unsigned nr_array_temps;
197
struct const_decl const_decls[PIPE_MAX_CONSTANT_BUFFERS];
199
struct hw_atomic_decl hw_atomic_decls[PIPE_MAX_HW_ATOMIC_BUFFERS];
201
unsigned properties[TGSI_PROPERTY_COUNT];
204
unsigned nr_instructions;
206
struct ureg_tokens domain[2];
208
bool use_memory[TGSI_MEMORY_TYPE_COUNT];
213
static union tgsi_any_token error_tokens[32];
215
static void tokens_error( struct ureg_tokens *tokens )
217
if (tokens->tokens && tokens->tokens != error_tokens)
218
FREE(tokens->tokens);
220
tokens->tokens = error_tokens;
221
tokens->size = ARRAY_SIZE(error_tokens);
226
static void tokens_expand( struct ureg_tokens *tokens,
229
unsigned old_size = tokens->size * sizeof(unsigned);
231
if (tokens->tokens == error_tokens) {
235
while (tokens->count + count > tokens->size) {
236
tokens->size = (1 << ++tokens->order);
239
tokens->tokens = REALLOC(tokens->tokens,
241
tokens->size * sizeof(unsigned));
242
if (tokens->tokens == NULL) {
243
tokens_error(tokens);
247
static void set_bad( struct ureg_program *ureg )
249
tokens_error(&ureg->domain[0]);
254
static union tgsi_any_token *get_tokens( struct ureg_program *ureg,
258
struct ureg_tokens *tokens = &ureg->domain[domain];
259
union tgsi_any_token *result;
261
if (tokens->count + count > tokens->size)
262
tokens_expand(tokens, count);
264
result = &tokens->tokens[tokens->count];
265
tokens->count += count;
270
static union tgsi_any_token *retrieve_token( struct ureg_program *ureg,
274
if (ureg->domain[domain].tokens == error_tokens)
275
return &error_tokens[0];
277
return &ureg->domain[domain].tokens[nr];
282
ureg_property(struct ureg_program *ureg, unsigned name, unsigned value)
284
assert(name < ARRAY_SIZE(ureg->properties));
285
ureg->properties[name] = value;
289
ureg_DECL_fs_input_centroid_layout(struct ureg_program *ureg,
290
enum tgsi_semantic semantic_name,
291
unsigned semantic_index,
292
enum tgsi_interpolate_mode interp_mode,
293
enum tgsi_interpolate_loc interp_location,
301
assert(usage_mask != 0);
302
assert(usage_mask <= TGSI_WRITEMASK_XYZW);
304
for (i = 0; i < ureg->nr_inputs; i++) {
305
if (ureg->input[i].semantic_name == semantic_name &&
306
ureg->input[i].semantic_index == semantic_index) {
307
assert(ureg->input[i].interp == interp_mode);
308
assert(ureg->input[i].interp_location == interp_location);
309
if (ureg->input[i].array_id == array_id) {
310
ureg->input[i].usage_mask |= usage_mask;
313
assert((ureg->input[i].usage_mask & usage_mask) == 0);
317
if (ureg->nr_inputs < UREG_MAX_INPUT) {
318
assert(array_size >= 1);
319
ureg->input[i].semantic_name = semantic_name;
320
ureg->input[i].semantic_index = semantic_index;
321
ureg->input[i].interp = interp_mode;
322
ureg->input[i].interp_location = interp_location;
323
ureg->input[i].first = index;
324
ureg->input[i].last = index + array_size - 1;
325
ureg->input[i].array_id = array_id;
326
ureg->input[i].usage_mask = usage_mask;
327
ureg->nr_input_regs = MAX2(ureg->nr_input_regs, index + array_size);
334
return ureg_src_array_register(TGSI_FILE_INPUT, ureg->input[i].first,
339
ureg_DECL_fs_input_centroid(struct ureg_program *ureg,
340
enum tgsi_semantic semantic_name,
341
unsigned semantic_index,
342
enum tgsi_interpolate_mode interp_mode,
343
enum tgsi_interpolate_loc interp_location,
347
return ureg_DECL_fs_input_centroid_layout(ureg,
348
semantic_name, semantic_index, interp_mode,
350
ureg->nr_input_regs, TGSI_WRITEMASK_XYZW, array_id, array_size);
355
ureg_DECL_vs_input( struct ureg_program *ureg,
358
assert(ureg->processor == PIPE_SHADER_VERTEX);
359
assert(index / 32 < ARRAY_SIZE(ureg->vs_inputs));
361
ureg->vs_inputs[index/32] |= 1 << (index % 32);
362
return ureg_src_register( TGSI_FILE_INPUT, index );
367
ureg_DECL_input_layout(struct ureg_program *ureg,
368
enum tgsi_semantic semantic_name,
369
unsigned semantic_index,
375
return ureg_DECL_fs_input_centroid_layout(ureg,
376
semantic_name, semantic_index,
377
TGSI_INTERPOLATE_CONSTANT, TGSI_INTERPOLATE_LOC_CENTER,
378
index, usage_mask, array_id, array_size);
383
ureg_DECL_input(struct ureg_program *ureg,
384
enum tgsi_semantic semantic_name,
385
unsigned semantic_index,
389
return ureg_DECL_fs_input_centroid(ureg, semantic_name, semantic_index,
390
TGSI_INTERPOLATE_CONSTANT,
391
TGSI_INTERPOLATE_LOC_CENTER,
392
array_id, array_size);
397
ureg_DECL_system_value(struct ureg_program *ureg,
398
enum tgsi_semantic semantic_name,
399
unsigned semantic_index)
403
for (i = 0; i < ureg->nr_system_values; i++) {
404
if (ureg->system_value[i].semantic_name == semantic_name &&
405
ureg->system_value[i].semantic_index == semantic_index) {
410
if (ureg->nr_system_values < UREG_MAX_SYSTEM_VALUE) {
411
ureg->system_value[ureg->nr_system_values].semantic_name = semantic_name;
412
ureg->system_value[ureg->nr_system_values].semantic_index = semantic_index;
413
i = ureg->nr_system_values;
414
ureg->nr_system_values++;
420
return ureg_src_register(TGSI_FILE_SYSTEM_VALUE, i);
425
ureg_DECL_output_layout(struct ureg_program *ureg,
426
enum tgsi_semantic semantic_name,
427
unsigned semantic_index,
437
assert(usage_mask != 0);
438
assert(!(streams & 0x03) || (usage_mask & 1));
439
assert(!(streams & 0x0c) || (usage_mask & 2));
440
assert(!(streams & 0x30) || (usage_mask & 4));
441
assert(!(streams & 0xc0) || (usage_mask & 8));
443
for (i = 0; i < ureg->nr_outputs; i++) {
444
if (ureg->output[i].semantic_name == semantic_name &&
445
ureg->output[i].semantic_index == semantic_index) {
446
if (ureg->output[i].array_id == array_id) {
447
ureg->output[i].usage_mask |= usage_mask;
450
assert((ureg->output[i].usage_mask & usage_mask) == 0);
454
if (ureg->nr_outputs < UREG_MAX_OUTPUT) {
455
ureg->output[i].semantic_name = semantic_name;
456
ureg->output[i].semantic_index = semantic_index;
457
ureg->output[i].usage_mask = usage_mask;
458
ureg->output[i].first = index;
459
ureg->output[i].last = index + array_size - 1;
460
ureg->output[i].array_id = array_id;
461
ureg->output[i].invariant = invariant;
462
ureg->nr_output_regs = MAX2(ureg->nr_output_regs, index + array_size);
471
ureg->output[i].streams |= streams;
473
return ureg_dst_array_register(TGSI_FILE_OUTPUT, ureg->output[i].first,
479
ureg_DECL_output_masked(struct ureg_program *ureg,
486
return ureg_DECL_output_layout(ureg, name, index, 0,
487
ureg->nr_output_regs, usage_mask, array_id,
493
ureg_DECL_output(struct ureg_program *ureg,
494
enum tgsi_semantic name,
497
return ureg_DECL_output_masked(ureg, name, index, TGSI_WRITEMASK_XYZW,
502
ureg_DECL_output_array(struct ureg_program *ureg,
503
enum tgsi_semantic semantic_name,
504
unsigned semantic_index,
508
return ureg_DECL_output_masked(ureg, semantic_name, semantic_index,
510
array_id, array_size);
514
/* Returns a new constant register. Keep track of which have been
515
* referred to so that we can emit decls later.
517
* Constant operands declared with this function must be addressed
518
* with a two-dimensional index.
520
* There is nothing in this code to bind this constant to any tracked
521
* value or manage any constant_buffer contents -- that's the
522
* resposibility of the calling code.
525
ureg_DECL_constant2D(struct ureg_program *ureg,
530
struct const_decl *decl = &ureg->const_decls[index2D];
532
assert(index2D < PIPE_MAX_CONSTANT_BUFFERS);
534
if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
535
uint i = decl->nr_constant_ranges++;
537
decl->constant_range[i].first = first;
538
decl->constant_range[i].last = last;
543
/* A one-dimensional, deprecated version of ureg_DECL_constant2D().
545
* Constant operands declared with this function must be addressed
546
* with a one-dimensional index.
549
ureg_DECL_constant(struct ureg_program *ureg,
552
struct const_decl *decl = &ureg->const_decls[0];
553
unsigned minconst = index, maxconst = index;
556
/* Inside existing range?
558
for (i = 0; i < decl->nr_constant_ranges; i++) {
559
if (decl->constant_range[i].first <= index &&
560
decl->constant_range[i].last >= index) {
565
/* Extend existing range?
567
for (i = 0; i < decl->nr_constant_ranges; i++) {
568
if (decl->constant_range[i].last == index - 1) {
569
decl->constant_range[i].last = index;
573
if (decl->constant_range[i].first == index + 1) {
574
decl->constant_range[i].first = index;
578
minconst = MIN2(minconst, decl->constant_range[i].first);
579
maxconst = MAX2(maxconst, decl->constant_range[i].last);
584
if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
585
i = decl->nr_constant_ranges++;
586
decl->constant_range[i].first = index;
587
decl->constant_range[i].last = index;
591
/* Collapse all ranges down to one:
594
decl->constant_range[0].first = minconst;
595
decl->constant_range[0].last = maxconst;
596
decl->nr_constant_ranges = 1;
599
assert(i < decl->nr_constant_ranges);
600
assert(decl->constant_range[i].first <= index);
601
assert(decl->constant_range[i].last >= index);
603
struct ureg_src src = ureg_src_register(TGSI_FILE_CONSTANT, index);
604
return ureg_src_dimension(src, 0);
608
/* Returns a new hw atomic register. Keep track of which have been
609
* referred to so that we can emit decls later.
612
ureg_DECL_hw_atomic(struct ureg_program *ureg,
618
struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[buffer_id];
620
if (decl->nr_hw_atomic_ranges < UREG_MAX_HW_ATOMIC_RANGE) {
621
uint i = decl->nr_hw_atomic_ranges++;
623
decl->hw_atomic_range[i].first = first;
624
decl->hw_atomic_range[i].last = last;
625
decl->hw_atomic_range[i].array_id = array_id;
631
static struct ureg_dst alloc_temporary( struct ureg_program *ureg,
636
/* Look for a released temporary.
638
for (i = util_bitmask_get_first_index(ureg->free_temps);
639
i != UTIL_BITMASK_INVALID_INDEX;
640
i = util_bitmask_get_next_index(ureg->free_temps, i + 1)) {
641
if (util_bitmask_get(ureg->local_temps, i) == local)
645
/* Or allocate a new one.
647
if (i == UTIL_BITMASK_INVALID_INDEX) {
648
i = ureg->nr_temps++;
651
util_bitmask_set(ureg->local_temps, i);
653
/* Start a new declaration when the local flag changes */
654
if (!i || util_bitmask_get(ureg->local_temps, i - 1) != local)
655
util_bitmask_set(ureg->decl_temps, i);
658
util_bitmask_clear(ureg->free_temps, i);
660
return ureg_dst_register( TGSI_FILE_TEMPORARY, i );
663
struct ureg_dst ureg_DECL_temporary( struct ureg_program *ureg )
665
return alloc_temporary(ureg, FALSE);
668
struct ureg_dst ureg_DECL_local_temporary( struct ureg_program *ureg )
670
return alloc_temporary(ureg, TRUE);
673
struct ureg_dst ureg_DECL_array_temporary( struct ureg_program *ureg,
677
unsigned i = ureg->nr_temps;
678
struct ureg_dst dst = ureg_dst_register( TGSI_FILE_TEMPORARY, i );
681
util_bitmask_set(ureg->local_temps, i);
683
/* Always start a new declaration at the start */
684
util_bitmask_set(ureg->decl_temps, i);
686
ureg->nr_temps += size;
688
/* and also at the end of the array */
689
util_bitmask_set(ureg->decl_temps, ureg->nr_temps);
691
if (ureg->nr_array_temps < UREG_MAX_ARRAY_TEMPS) {
692
ureg->array_temps[ureg->nr_array_temps++] = i;
693
dst.ArrayID = ureg->nr_array_temps;
699
void ureg_release_temporary( struct ureg_program *ureg,
700
struct ureg_dst tmp )
702
if(tmp.File == TGSI_FILE_TEMPORARY)
703
util_bitmask_set(ureg->free_temps, tmp.Index);
707
/* Allocate a new address register.
709
struct ureg_dst ureg_DECL_address( struct ureg_program *ureg )
711
if (ureg->nr_addrs < UREG_MAX_ADDR)
712
return ureg_dst_register( TGSI_FILE_ADDRESS, ureg->nr_addrs++ );
715
return ureg_dst_register( TGSI_FILE_ADDRESS, 0 );
718
/* Allocate a new sampler.
720
struct ureg_src ureg_DECL_sampler( struct ureg_program *ureg,
725
for (i = 0; i < ureg->nr_samplers; i++)
726
if (ureg->sampler[i].Index == (int)nr)
727
return ureg->sampler[i];
729
if (i < PIPE_MAX_SAMPLERS) {
730
ureg->sampler[i] = ureg_src_register( TGSI_FILE_SAMPLER, nr );
732
return ureg->sampler[i];
736
return ureg->sampler[0];
740
* Allocate a new shader sampler view.
743
ureg_DECL_sampler_view(struct ureg_program *ureg,
745
enum tgsi_texture_type target,
746
enum tgsi_return_type return_type_x,
747
enum tgsi_return_type return_type_y,
748
enum tgsi_return_type return_type_z,
749
enum tgsi_return_type return_type_w)
751
struct ureg_src reg = ureg_src_register(TGSI_FILE_SAMPLER_VIEW, index);
754
for (i = 0; i < ureg->nr_sampler_views; i++) {
755
if (ureg->sampler_view[i].index == index) {
760
if (i < PIPE_MAX_SHADER_SAMPLER_VIEWS) {
761
ureg->sampler_view[i].index = index;
762
ureg->sampler_view[i].target = target;
763
ureg->sampler_view[i].return_type_x = return_type_x;
764
ureg->sampler_view[i].return_type_y = return_type_y;
765
ureg->sampler_view[i].return_type_z = return_type_z;
766
ureg->sampler_view[i].return_type_w = return_type_w;
767
ureg->nr_sampler_views++;
775
/* Allocate a new image.
778
ureg_DECL_image(struct ureg_program *ureg,
780
enum tgsi_texture_type target,
781
enum pipe_format format,
785
struct ureg_src reg = ureg_src_register(TGSI_FILE_IMAGE, index);
788
for (i = 0; i < ureg->nr_images; i++)
789
if (ureg->image[i].index == index)
792
if (i < PIPE_MAX_SHADER_IMAGES) {
793
ureg->image[i].index = index;
794
ureg->image[i].target = target;
795
ureg->image[i].wr = wr;
796
ureg->image[i].raw = raw;
797
ureg->image[i].format = format;
806
/* Allocate a new buffer.
808
struct ureg_src ureg_DECL_buffer(struct ureg_program *ureg, unsigned nr,
811
struct ureg_src reg = ureg_src_register(TGSI_FILE_BUFFER, nr);
814
for (i = 0; i < ureg->nr_buffers; i++)
815
if (ureg->buffer[i].index == nr)
818
if (i < PIPE_MAX_SHADER_BUFFERS) {
819
ureg->buffer[i].index = nr;
820
ureg->buffer[i].atomic = atomic;
829
/* Allocate a memory area.
831
struct ureg_src ureg_DECL_memory(struct ureg_program *ureg,
832
unsigned memory_type)
834
struct ureg_src reg = ureg_src_register(TGSI_FILE_MEMORY, memory_type);
836
ureg->use_memory[memory_type] = true;
841
match_or_expand_immediate64( const unsigned *v,
847
unsigned nr2 = *pnr2;
851
for (i = 0; i < nr; i += 2) {
852
boolean found = FALSE;
854
for (j = 0; j < nr2 && !found; j += 2) {
855
if (v[i] == v2[j] && v[i + 1] == v2[j + 1]) {
856
*swizzle |= (j << (i * 2)) | ((j + 1) << ((i + 1) * 2));
866
v2[nr2 + 1] = v[i + 1];
868
*swizzle |= (nr2 << (i * 2)) | ((nr2 + 1) << ((i + 1) * 2));
873
/* Actually expand immediate only when fully succeeded.
880
match_or_expand_immediate( const unsigned *v,
887
unsigned nr2 = *pnr2;
890
if (type == TGSI_IMM_FLOAT64 ||
891
type == TGSI_IMM_UINT64 ||
892
type == TGSI_IMM_INT64)
893
return match_or_expand_immediate64(v, nr, v2, pnr2, swizzle);
897
for (i = 0; i < nr; i++) {
898
boolean found = FALSE;
900
for (j = 0; j < nr2 && !found; j++) {
902
*swizzle |= j << (i * 2);
913
*swizzle |= nr2 << (i * 2);
918
/* Actually expand immediate only when fully succeeded.
925
static struct ureg_src
926
decl_immediate( struct ureg_program *ureg,
932
unsigned swizzle = 0;
934
/* Could do a first pass where we examine all existing immediates
938
for (i = 0; i < ureg->nr_immediates; i++) {
939
if (ureg->immediate[i].type != type) {
942
if (match_or_expand_immediate(v,
945
ureg->immediate[i].value.u,
946
&ureg->immediate[i].nr,
952
if (ureg->nr_immediates < UREG_MAX_IMMEDIATE) {
953
i = ureg->nr_immediates++;
954
ureg->immediate[i].type = type;
955
if (match_or_expand_immediate(v,
958
ureg->immediate[i].value.u,
959
&ureg->immediate[i].nr,
968
/* Make sure that all referenced elements are from this immediate.
969
* Has the effect of making size-one immediates into scalars.
971
if (type == TGSI_IMM_FLOAT64 ||
972
type == TGSI_IMM_UINT64 ||
973
type == TGSI_IMM_INT64) {
974
for (j = nr; j < 4; j+=2) {
975
swizzle |= (swizzle & 0xf) << (j * 2);
978
for (j = nr; j < 4; j++) {
979
swizzle |= (swizzle & 0x3) << (j * 2);
982
return ureg_swizzle(ureg_src_register(TGSI_FILE_IMMEDIATE, i),
983
(swizzle >> 0) & 0x3,
984
(swizzle >> 2) & 0x3,
985
(swizzle >> 4) & 0x3,
986
(swizzle >> 6) & 0x3);
991
ureg_DECL_immediate( struct ureg_program *ureg,
1001
for (i = 0; i < nr; i++) {
1005
return decl_immediate(ureg, fu.u, nr, TGSI_IMM_FLOAT32);
1009
ureg_DECL_immediate_f64( struct ureg_program *ureg,
1019
assert((nr / 2) < 3);
1020
for (i = 0; i < nr / 2; i++) {
1024
return decl_immediate(ureg, fu.u, nr, TGSI_IMM_FLOAT64);
1028
ureg_DECL_immediate_uint( struct ureg_program *ureg,
1032
return decl_immediate(ureg, v, nr, TGSI_IMM_UINT32);
1037
ureg_DECL_immediate_block_uint( struct ureg_program *ureg,
1044
if (ureg->nr_immediates + (nr + 3) / 4 > UREG_MAX_IMMEDIATE) {
1046
return ureg_src_register(TGSI_FILE_IMMEDIATE, 0);
1049
index = ureg->nr_immediates;
1050
ureg->nr_immediates += (nr + 3) / 4;
1052
for (i = index; i < ureg->nr_immediates; i++) {
1053
ureg->immediate[i].type = TGSI_IMM_UINT32;
1054
ureg->immediate[i].nr = nr > 4 ? 4 : nr;
1055
memcpy(ureg->immediate[i].value.u,
1056
&v[(i - index) * 4],
1057
ureg->immediate[i].nr * sizeof(uint));
1061
return ureg_src_register(TGSI_FILE_IMMEDIATE, index);
1066
ureg_DECL_immediate_int( struct ureg_program *ureg,
1070
return decl_immediate(ureg, (const unsigned *)v, nr, TGSI_IMM_INT32);
1074
ureg_DECL_immediate_uint64( struct ureg_program *ureg,
1084
assert((nr / 2) < 3);
1085
for (i = 0; i < nr / 2; i++) {
1089
return decl_immediate(ureg, fu.u, nr, TGSI_IMM_UINT64);
1093
ureg_DECL_immediate_int64( struct ureg_program *ureg,
1103
assert((nr / 2) < 3);
1104
for (i = 0; i < nr / 2; i++) {
1108
return decl_immediate(ureg, fu.u, nr, TGSI_IMM_INT64);
1112
ureg_emit_src( struct ureg_program *ureg,
1113
struct ureg_src src )
1115
unsigned size = 1 + (src.Indirect ? 1 : 0) +
1116
(src.Dimension ? (src.DimIndirect ? 2 : 1) : 0);
1118
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
1121
assert(src.File != TGSI_FILE_NULL);
1122
assert(src.File < TGSI_FILE_COUNT);
1125
out[n].src.File = src.File;
1126
out[n].src.SwizzleX = src.SwizzleX;
1127
out[n].src.SwizzleY = src.SwizzleY;
1128
out[n].src.SwizzleZ = src.SwizzleZ;
1129
out[n].src.SwizzleW = src.SwizzleW;
1130
out[n].src.Index = src.Index;
1131
out[n].src.Negate = src.Negate;
1132
out[0].src.Absolute = src.Absolute;
1136
out[0].src.Indirect = 1;
1138
out[n].ind.File = src.IndirectFile;
1139
out[n].ind.Swizzle = src.IndirectSwizzle;
1140
out[n].ind.Index = src.IndirectIndex;
1141
if (!ureg->supports_any_inout_decl_range &&
1142
(src.File == TGSI_FILE_INPUT || src.File == TGSI_FILE_OUTPUT))
1143
out[n].ind.ArrayID = 0;
1145
out[n].ind.ArrayID = src.ArrayID;
1149
if (src.Dimension) {
1150
out[0].src.Dimension = 1;
1151
out[n].dim.Dimension = 0;
1152
out[n].dim.Padding = 0;
1153
if (src.DimIndirect) {
1154
out[n].dim.Indirect = 1;
1155
out[n].dim.Index = src.DimensionIndex;
1158
out[n].ind.File = src.DimIndFile;
1159
out[n].ind.Swizzle = src.DimIndSwizzle;
1160
out[n].ind.Index = src.DimIndIndex;
1161
if (!ureg->supports_any_inout_decl_range &&
1162
(src.File == TGSI_FILE_INPUT || src.File == TGSI_FILE_OUTPUT))
1163
out[n].ind.ArrayID = 0;
1165
out[n].ind.ArrayID = src.ArrayID;
1167
out[n].dim.Indirect = 0;
1168
out[n].dim.Index = src.DimensionIndex;
1178
ureg_emit_dst( struct ureg_program *ureg,
1179
struct ureg_dst dst )
1181
unsigned size = 1 + (dst.Indirect ? 1 : 0) +
1182
(dst.Dimension ? (dst.DimIndirect ? 2 : 1) : 0);
1184
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
1187
assert(dst.File != TGSI_FILE_NULL);
1188
assert(dst.File != TGSI_FILE_SAMPLER);
1189
assert(dst.File != TGSI_FILE_SAMPLER_VIEW);
1190
assert(dst.File != TGSI_FILE_IMMEDIATE);
1191
assert(dst.File < TGSI_FILE_COUNT);
1194
out[n].dst.File = dst.File;
1195
out[n].dst.WriteMask = dst.WriteMask;
1196
out[n].dst.Indirect = dst.Indirect;
1197
out[n].dst.Index = dst.Index;
1202
out[n].ind.File = dst.IndirectFile;
1203
out[n].ind.Swizzle = dst.IndirectSwizzle;
1204
out[n].ind.Index = dst.IndirectIndex;
1205
if (!ureg->supports_any_inout_decl_range &&
1206
(dst.File == TGSI_FILE_INPUT || dst.File == TGSI_FILE_OUTPUT))
1207
out[n].ind.ArrayID = 0;
1209
out[n].ind.ArrayID = dst.ArrayID;
1213
if (dst.Dimension) {
1214
out[0].dst.Dimension = 1;
1215
out[n].dim.Dimension = 0;
1216
out[n].dim.Padding = 0;
1217
if (dst.DimIndirect) {
1218
out[n].dim.Indirect = 1;
1219
out[n].dim.Index = dst.DimensionIndex;
1222
out[n].ind.File = dst.DimIndFile;
1223
out[n].ind.Swizzle = dst.DimIndSwizzle;
1224
out[n].ind.Index = dst.DimIndIndex;
1225
if (!ureg->supports_any_inout_decl_range &&
1226
(dst.File == TGSI_FILE_INPUT || dst.File == TGSI_FILE_OUTPUT))
1227
out[n].ind.ArrayID = 0;
1229
out[n].ind.ArrayID = dst.ArrayID;
1231
out[n].dim.Indirect = 0;
1232
out[n].dim.Index = dst.DimensionIndex;
1241
static void validate( enum tgsi_opcode opcode,
1246
const struct tgsi_opcode_info *info = tgsi_get_opcode_info( opcode );
1249
assert(nr_dst == info->num_dst);
1250
assert(nr_src == info->num_src);
1255
struct ureg_emit_insn_result
1256
ureg_emit_insn(struct ureg_program *ureg,
1257
enum tgsi_opcode opcode,
1263
union tgsi_any_token *out;
1265
struct ureg_emit_insn_result result;
1267
validate( opcode, num_dst, num_src );
1269
out = get_tokens( ureg, DOMAIN_INSN, count );
1270
out[0].insn = tgsi_default_instruction();
1271
out[0].insn.Opcode = opcode;
1272
out[0].insn.Saturate = saturate;
1273
out[0].insn.Precise = precise || ureg->precise;
1274
out[0].insn.NumDstRegs = num_dst;
1275
out[0].insn.NumSrcRegs = num_src;
1277
result.insn_token = ureg->domain[DOMAIN_INSN].count - count;
1278
result.extended_token = result.insn_token;
1280
ureg->nr_instructions++;
1287
* Emit a label token.
1288
* \param label_token returns a token number indicating where the label
1289
* needs to be patched later. Later, this value should be passed to the
1290
* ureg_fixup_label() function.
1293
ureg_emit_label(struct ureg_program *ureg,
1294
unsigned extended_token,
1295
unsigned *label_token )
1297
union tgsi_any_token *out, *insn;
1302
out = get_tokens( ureg, DOMAIN_INSN, 1 );
1305
insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1306
insn->insn.Label = 1;
1308
*label_token = ureg->domain[DOMAIN_INSN].count - 1;
1311
/* Will return a number which can be used in a label to point to the
1312
* next instruction to be emitted.
1315
ureg_get_instruction_number( struct ureg_program *ureg )
1317
return ureg->nr_instructions;
1320
/* Patch a given label (expressed as a token number) to point to a
1321
* given instruction (expressed as an instruction number).
1324
ureg_fixup_label(struct ureg_program *ureg,
1325
unsigned label_token,
1326
unsigned instruction_number )
1328
union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, label_token );
1330
out->insn_label.Label = instruction_number;
1335
ureg_emit_texture(struct ureg_program *ureg,
1336
unsigned extended_token,
1337
enum tgsi_texture_type target,
1338
enum tgsi_return_type return_type, unsigned num_offsets)
1340
union tgsi_any_token *out, *insn;
1342
out = get_tokens( ureg, DOMAIN_INSN, 1 );
1343
insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1345
insn->insn.Texture = 1;
1348
out[0].insn_texture.Texture = target;
1349
out[0].insn_texture.NumOffsets = num_offsets;
1350
out[0].insn_texture.ReturnType = return_type;
1354
ureg_emit_texture_offset(struct ureg_program *ureg,
1355
const struct tgsi_texture_offset *offset)
1357
union tgsi_any_token *out;
1359
out = get_tokens( ureg, DOMAIN_INSN, 1);
1362
out[0].insn_texture_offset = *offset;
1366
ureg_emit_memory(struct ureg_program *ureg,
1367
unsigned extended_token,
1369
enum tgsi_texture_type texture,
1370
enum pipe_format format)
1372
union tgsi_any_token *out, *insn;
1374
out = get_tokens( ureg, DOMAIN_INSN, 1 );
1375
insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1377
insn->insn.Memory = 1;
1380
out[0].insn_memory.Qualifier = qualifier;
1381
out[0].insn_memory.Texture = texture;
1382
out[0].insn_memory.Format = format;
1386
ureg_fixup_insn_size(struct ureg_program *ureg,
1389
union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, insn );
1391
assert(out->insn.Type == TGSI_TOKEN_TYPE_INSTRUCTION);
1392
out->insn.NrTokens = ureg->domain[DOMAIN_INSN].count - insn - 1;
1397
ureg_insn(struct ureg_program *ureg,
1398
enum tgsi_opcode opcode,
1399
const struct ureg_dst *dst,
1401
const struct ureg_src *src,
1405
struct ureg_emit_insn_result insn;
1409
if (nr_dst && ureg_dst_is_empty(dst[0])) {
1413
saturate = nr_dst ? dst[0].Saturate : FALSE;
1415
insn = ureg_emit_insn(ureg,
1422
for (i = 0; i < nr_dst; i++)
1423
ureg_emit_dst( ureg, dst[i] );
1425
for (i = 0; i < nr_src; i++)
1426
ureg_emit_src( ureg, src[i] );
1428
ureg_fixup_insn_size( ureg, insn.insn_token );
1432
ureg_tex_insn(struct ureg_program *ureg,
1433
enum tgsi_opcode opcode,
1434
const struct ureg_dst *dst,
1436
enum tgsi_texture_type target,
1437
enum tgsi_return_type return_type,
1438
const struct tgsi_texture_offset *texoffsets,
1440
const struct ureg_src *src,
1443
struct ureg_emit_insn_result insn;
1447
if (nr_dst && ureg_dst_is_empty(dst[0])) {
1451
saturate = nr_dst ? dst[0].Saturate : FALSE;
1453
insn = ureg_emit_insn(ureg,
1460
ureg_emit_texture( ureg, insn.extended_token, target, return_type,
1463
for (i = 0; i < nr_offset; i++)
1464
ureg_emit_texture_offset( ureg, &texoffsets[i]);
1466
for (i = 0; i < nr_dst; i++)
1467
ureg_emit_dst( ureg, dst[i] );
1469
for (i = 0; i < nr_src; i++)
1470
ureg_emit_src( ureg, src[i] );
1472
ureg_fixup_insn_size( ureg, insn.insn_token );
1477
ureg_memory_insn(struct ureg_program *ureg,
1478
enum tgsi_opcode opcode,
1479
const struct ureg_dst *dst,
1481
const struct ureg_src *src,
1484
enum tgsi_texture_type texture,
1485
enum pipe_format format)
1487
struct ureg_emit_insn_result insn;
1490
insn = ureg_emit_insn(ureg,
1497
ureg_emit_memory(ureg, insn.extended_token, qualifier, texture, format);
1499
for (i = 0; i < nr_dst; i++)
1500
ureg_emit_dst(ureg, dst[i]);
1502
for (i = 0; i < nr_src; i++)
1503
ureg_emit_src(ureg, src[i]);
1505
ureg_fixup_insn_size(ureg, insn.insn_token);
1510
emit_decl_semantic(struct ureg_program *ureg,
1514
enum tgsi_semantic semantic_name,
1515
unsigned semantic_index,
1517
unsigned usage_mask,
1521
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
1524
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1525
out[0].decl.NrTokens = 3;
1526
out[0].decl.File = file;
1527
out[0].decl.UsageMask = usage_mask;
1528
out[0].decl.Semantic = 1;
1529
out[0].decl.Array = array_id != 0;
1530
out[0].decl.Invariant = invariant;
1533
out[1].decl_range.First = first;
1534
out[1].decl_range.Last = last;
1537
out[2].decl_semantic.Name = semantic_name;
1538
out[2].decl_semantic.Index = semantic_index;
1539
out[2].decl_semantic.StreamX = streams & 3;
1540
out[2].decl_semantic.StreamY = (streams >> 2) & 3;
1541
out[2].decl_semantic.StreamZ = (streams >> 4) & 3;
1542
out[2].decl_semantic.StreamW = (streams >> 6) & 3;
1546
out[3].array.ArrayID = array_id;
1551
emit_decl_atomic_2d(struct ureg_program *ureg,
1557
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
1560
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1561
out[0].decl.NrTokens = 3;
1562
out[0].decl.File = TGSI_FILE_HW_ATOMIC;
1563
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1564
out[0].decl.Dimension = 1;
1565
out[0].decl.Array = array_id != 0;
1568
out[1].decl_range.First = first;
1569
out[1].decl_range.Last = last;
1572
out[2].decl_dim.Index2D = index2D;
1576
out[3].array.ArrayID = array_id;
1581
emit_decl_fs(struct ureg_program *ureg,
1585
enum tgsi_semantic semantic_name,
1586
unsigned semantic_index,
1587
enum tgsi_interpolate_mode interpolate,
1588
enum tgsi_interpolate_loc interpolate_location,
1590
unsigned usage_mask)
1592
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL,
1596
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1597
out[0].decl.NrTokens = 4;
1598
out[0].decl.File = file;
1599
out[0].decl.UsageMask = usage_mask;
1600
out[0].decl.Interpolate = 1;
1601
out[0].decl.Semantic = 1;
1602
out[0].decl.Array = array_id != 0;
1605
out[1].decl_range.First = first;
1606
out[1].decl_range.Last = last;
1609
out[2].decl_interp.Interpolate = interpolate;
1610
out[2].decl_interp.Location = interpolate_location;
1613
out[3].decl_semantic.Name = semantic_name;
1614
out[3].decl_semantic.Index = semantic_index;
1618
out[4].array.ArrayID = array_id;
1623
emit_decl_temps( struct ureg_program *ureg,
1624
unsigned first, unsigned last,
1628
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL,
1632
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1633
out[0].decl.NrTokens = 2;
1634
out[0].decl.File = TGSI_FILE_TEMPORARY;
1635
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1636
out[0].decl.Local = local;
1639
out[1].decl_range.First = first;
1640
out[1].decl_range.Last = last;
1643
out[0].decl.Array = 1;
1645
out[2].array.ArrayID = arrayid;
1649
static void emit_decl_range( struct ureg_program *ureg,
1654
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
1657
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1658
out[0].decl.NrTokens = 2;
1659
out[0].decl.File = file;
1660
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1661
out[0].decl.Semantic = 0;
1664
out[1].decl_range.First = first;
1665
out[1].decl_range.Last = first + count - 1;
1669
emit_decl_range2D(struct ureg_program *ureg,
1675
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1678
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1679
out[0].decl.NrTokens = 3;
1680
out[0].decl.File = file;
1681
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1682
out[0].decl.Dimension = 1;
1685
out[1].decl_range.First = first;
1686
out[1].decl_range.Last = last;
1689
out[2].decl_dim.Index2D = index2D;
1693
emit_decl_sampler_view(struct ureg_program *ureg,
1695
enum tgsi_texture_type target,
1696
enum tgsi_return_type return_type_x,
1697
enum tgsi_return_type return_type_y,
1698
enum tgsi_return_type return_type_z,
1699
enum tgsi_return_type return_type_w )
1701
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1704
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1705
out[0].decl.NrTokens = 3;
1706
out[0].decl.File = TGSI_FILE_SAMPLER_VIEW;
1707
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1710
out[1].decl_range.First = index;
1711
out[1].decl_range.Last = index;
1714
out[2].decl_sampler_view.Resource = target;
1715
out[2].decl_sampler_view.ReturnTypeX = return_type_x;
1716
out[2].decl_sampler_view.ReturnTypeY = return_type_y;
1717
out[2].decl_sampler_view.ReturnTypeZ = return_type_z;
1718
out[2].decl_sampler_view.ReturnTypeW = return_type_w;
1722
emit_decl_image(struct ureg_program *ureg,
1724
enum tgsi_texture_type target,
1725
enum pipe_format format,
1729
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1732
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1733
out[0].decl.NrTokens = 3;
1734
out[0].decl.File = TGSI_FILE_IMAGE;
1735
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1738
out[1].decl_range.First = index;
1739
out[1].decl_range.Last = index;
1742
out[2].decl_image.Resource = target;
1743
out[2].decl_image.Writable = wr;
1744
out[2].decl_image.Raw = raw;
1745
out[2].decl_image.Format = format;
1749
emit_decl_buffer(struct ureg_program *ureg,
1753
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1756
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1757
out[0].decl.NrTokens = 2;
1758
out[0].decl.File = TGSI_FILE_BUFFER;
1759
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1760
out[0].decl.Atomic = atomic;
1763
out[1].decl_range.First = index;
1764
out[1].decl_range.Last = index;
1768
emit_decl_memory(struct ureg_program *ureg, unsigned memory_type)
1770
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1773
out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1774
out[0].decl.NrTokens = 2;
1775
out[0].decl.File = TGSI_FILE_MEMORY;
1776
out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1777
out[0].decl.MemType = memory_type;
1780
out[1].decl_range.First = memory_type;
1781
out[1].decl_range.Last = memory_type;
1785
emit_immediate( struct ureg_program *ureg,
1789
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 5 );
1792
out[0].imm.Type = TGSI_TOKEN_TYPE_IMMEDIATE;
1793
out[0].imm.NrTokens = 5;
1794
out[0].imm.DataType = type;
1795
out[0].imm.Padding = 0;
1797
out[1].imm_data.Uint = v[0];
1798
out[2].imm_data.Uint = v[1];
1799
out[3].imm_data.Uint = v[2];
1800
out[4].imm_data.Uint = v[3];
1804
emit_property(struct ureg_program *ureg,
1808
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1811
out[0].prop.Type = TGSI_TOKEN_TYPE_PROPERTY;
1812
out[0].prop.NrTokens = 2;
1813
out[0].prop.PropertyName = name;
1815
out[1].prop_data.Data = data;
1819
input_sort(const void *in_a, const void *in_b)
1821
const struct ureg_input_decl *a = in_a, *b = in_b;
1823
return a->first - b->first;
1827
output_sort(const void *in_a, const void *in_b)
1829
const struct ureg_output_decl *a = in_a, *b = in_b;
1831
return a->first - b->first;
1834
static void emit_decls( struct ureg_program *ureg )
1838
for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
1839
if (ureg->properties[i] != ~0u)
1840
emit_property(ureg, i, ureg->properties[i]);
1842
/* While not required by TGSI spec, virglrenderer has a dependency on the
1843
* inputs being sorted.
1845
qsort(ureg->input, ureg->nr_inputs, sizeof(ureg->input[0]), input_sort);
1847
if (ureg->processor == PIPE_SHADER_VERTEX) {
1848
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
1849
if (ureg->vs_inputs[i/32] & (1u << (i%32))) {
1850
emit_decl_range( ureg, TGSI_FILE_INPUT, i, 1 );
1853
} else if (ureg->processor == PIPE_SHADER_FRAGMENT) {
1854
if (ureg->supports_any_inout_decl_range) {
1855
for (i = 0; i < ureg->nr_inputs; i++) {
1858
ureg->input[i].first,
1859
ureg->input[i].last,
1860
ureg->input[i].semantic_name,
1861
ureg->input[i].semantic_index,
1862
ureg->input[i].interp,
1863
ureg->input[i].interp_location,
1864
ureg->input[i].array_id,
1865
ureg->input[i].usage_mask);
1869
for (i = 0; i < ureg->nr_inputs; i++) {
1870
for (j = ureg->input[i].first; j <= ureg->input[i].last; j++) {
1874
ureg->input[i].semantic_name,
1875
ureg->input[i].semantic_index +
1876
(j - ureg->input[i].first),
1877
ureg->input[i].interp,
1878
ureg->input[i].interp_location, 0,
1879
ureg->input[i].usage_mask);
1884
if (ureg->supports_any_inout_decl_range) {
1885
for (i = 0; i < ureg->nr_inputs; i++) {
1886
emit_decl_semantic(ureg,
1888
ureg->input[i].first,
1889
ureg->input[i].last,
1890
ureg->input[i].semantic_name,
1891
ureg->input[i].semantic_index,
1893
TGSI_WRITEMASK_XYZW,
1894
ureg->input[i].array_id,
1899
for (i = 0; i < ureg->nr_inputs; i++) {
1900
for (j = ureg->input[i].first; j <= ureg->input[i].last; j++) {
1901
emit_decl_semantic(ureg,
1904
ureg->input[i].semantic_name,
1905
ureg->input[i].semantic_index +
1906
(j - ureg->input[i].first),
1908
TGSI_WRITEMASK_XYZW, 0, FALSE);
1914
for (i = 0; i < ureg->nr_system_values; i++) {
1915
emit_decl_semantic(ureg,
1916
TGSI_FILE_SYSTEM_VALUE,
1919
ureg->system_value[i].semantic_name,
1920
ureg->system_value[i].semantic_index,
1922
TGSI_WRITEMASK_XYZW, 0, FALSE);
1925
/* While not required by TGSI spec, virglrenderer has a dependency on the
1926
* outputs being sorted.
1928
qsort(ureg->output, ureg->nr_outputs, sizeof(ureg->output[0]), output_sort);
1930
if (ureg->supports_any_inout_decl_range) {
1931
for (i = 0; i < ureg->nr_outputs; i++) {
1932
emit_decl_semantic(ureg,
1934
ureg->output[i].first,
1935
ureg->output[i].last,
1936
ureg->output[i].semantic_name,
1937
ureg->output[i].semantic_index,
1938
ureg->output[i].streams,
1939
ureg->output[i].usage_mask,
1940
ureg->output[i].array_id,
1941
ureg->output[i].invariant);
1945
for (i = 0; i < ureg->nr_outputs; i++) {
1946
for (j = ureg->output[i].first; j <= ureg->output[i].last; j++) {
1947
emit_decl_semantic(ureg,
1950
ureg->output[i].semantic_name,
1951
ureg->output[i].semantic_index +
1952
(j - ureg->output[i].first),
1953
ureg->output[i].streams,
1954
ureg->output[i].usage_mask,
1956
ureg->output[i].invariant);
1961
for (i = 0; i < ureg->nr_samplers; i++) {
1962
emit_decl_range( ureg,
1964
ureg->sampler[i].Index, 1 );
1967
for (i = 0; i < ureg->nr_sampler_views; i++) {
1968
emit_decl_sampler_view(ureg,
1969
ureg->sampler_view[i].index,
1970
ureg->sampler_view[i].target,
1971
ureg->sampler_view[i].return_type_x,
1972
ureg->sampler_view[i].return_type_y,
1973
ureg->sampler_view[i].return_type_z,
1974
ureg->sampler_view[i].return_type_w);
1977
for (i = 0; i < ureg->nr_images; i++) {
1978
emit_decl_image(ureg,
1979
ureg->image[i].index,
1980
ureg->image[i].target,
1981
ureg->image[i].format,
1983
ureg->image[i].raw);
1986
for (i = 0; i < ureg->nr_buffers; i++) {
1987
emit_decl_buffer(ureg, ureg->buffer[i].index, ureg->buffer[i].atomic);
1990
for (i = 0; i < TGSI_MEMORY_TYPE_COUNT; i++) {
1991
if (ureg->use_memory[i])
1992
emit_decl_memory(ureg, i);
1995
for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
1996
struct const_decl *decl = &ureg->const_decls[i];
1998
if (decl->nr_constant_ranges) {
2001
for (j = 0; j < decl->nr_constant_ranges; j++) {
2002
emit_decl_range2D(ureg,
2004
decl->constant_range[j].first,
2005
decl->constant_range[j].last,
2011
for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
2012
struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[i];
2014
if (decl->nr_hw_atomic_ranges) {
2017
for (j = 0; j < decl->nr_hw_atomic_ranges; j++) {
2018
emit_decl_atomic_2d(ureg,
2019
decl->hw_atomic_range[j].first,
2020
decl->hw_atomic_range[j].last,
2022
decl->hw_atomic_range[j].array_id);
2027
if (ureg->nr_temps) {
2029
for (i = 0; i < ureg->nr_temps;) {
2030
boolean local = util_bitmask_get(ureg->local_temps, i);
2032
i = util_bitmask_get_next_index(ureg->decl_temps, i + 1);
2033
if (i == UTIL_BITMASK_INVALID_INDEX)
2036
if (array < ureg->nr_array_temps && ureg->array_temps[array] == first)
2037
emit_decl_temps( ureg, first, i - 1, local, ++array );
2039
emit_decl_temps( ureg, first, i - 1, local, 0 );
2043
if (ureg->nr_addrs) {
2044
emit_decl_range( ureg,
2046
0, ureg->nr_addrs );
2049
for (i = 0; i < ureg->nr_immediates; i++) {
2050
emit_immediate( ureg,
2051
ureg->immediate[i].value.u,
2052
ureg->immediate[i].type );
2056
/* Append the instruction tokens onto the declarations to build a
2057
* contiguous stream suitable to send to the driver.
2059
static void copy_instructions( struct ureg_program *ureg )
2061
unsigned nr_tokens = ureg->domain[DOMAIN_INSN].count;
2062
union tgsi_any_token *out = get_tokens( ureg,
2067
ureg->domain[DOMAIN_INSN].tokens,
2068
nr_tokens * sizeof out[0] );
2073
fixup_header_size(struct ureg_program *ureg)
2075
union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_DECL, 0 );
2077
out->header.BodySize = ureg->domain[DOMAIN_DECL].count - 2;
2082
emit_header( struct ureg_program *ureg )
2084
union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
2086
out[0].header.HeaderSize = 2;
2087
out[0].header.BodySize = 0;
2089
out[1].processor.Processor = ureg->processor;
2090
out[1].processor.Padding = 0;
2094
const struct tgsi_token *ureg_finalize( struct ureg_program *ureg )
2096
const struct tgsi_token *tokens;
2098
switch (ureg->processor) {
2099
case PIPE_SHADER_VERTEX:
2100
case PIPE_SHADER_TESS_EVAL:
2101
ureg_property(ureg, TGSI_PROPERTY_NEXT_SHADER,
2102
ureg->next_shader_processor == -1 ?
2103
PIPE_SHADER_FRAGMENT :
2104
ureg->next_shader_processor);
2110
emit_header( ureg );
2112
copy_instructions( ureg );
2113
fixup_header_size( ureg );
2115
if (ureg->domain[0].tokens == error_tokens ||
2116
ureg->domain[1].tokens == error_tokens) {
2117
debug_printf("%s: error in generated shader\n", __FUNCTION__);
2122
tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
2125
debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
2126
ureg->domain[DOMAIN_DECL].count);
2127
tgsi_dump( tokens, 0 );
2131
/* tgsi_sanity doesn't seem to return if there are too many constants. */
2132
bool too_many_constants = false;
2133
for (unsigned i = 0; i < ARRAY_SIZE(ureg->const_decls); i++) {
2134
for (unsigned j = 0; j < ureg->const_decls[i].nr_constant_ranges; j++) {
2135
if (ureg->const_decls[i].constant_range[j].last > 4096) {
2136
too_many_constants = true;
2142
if (tokens && !too_many_constants && !tgsi_sanity_check(tokens)) {
2143
debug_printf("tgsi_ureg.c, sanity check failed on generated tokens:\n");
2144
tgsi_dump(tokens, 0);
2154
void *ureg_create_shader( struct ureg_program *ureg,
2155
struct pipe_context *pipe,
2156
const struct pipe_stream_output_info *so )
2158
struct pipe_shader_state state = {0};
2160
pipe_shader_state_from_tgsi(&state, ureg_finalize(ureg));
2165
state.stream_output = *so;
2167
switch (ureg->processor) {
2168
case PIPE_SHADER_VERTEX:
2169
return pipe->create_vs_state(pipe, &state);
2170
case PIPE_SHADER_TESS_CTRL:
2171
return pipe->create_tcs_state(pipe, &state);
2172
case PIPE_SHADER_TESS_EVAL:
2173
return pipe->create_tes_state(pipe, &state);
2174
case PIPE_SHADER_GEOMETRY:
2175
return pipe->create_gs_state(pipe, &state);
2176
case PIPE_SHADER_FRAGMENT:
2177
return pipe->create_fs_state(pipe, &state);
2184
const struct tgsi_token *ureg_get_tokens( struct ureg_program *ureg,
2185
unsigned *nr_tokens )
2187
const struct tgsi_token *tokens;
2189
ureg_finalize(ureg);
2191
tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
2194
*nr_tokens = ureg->domain[DOMAIN_DECL].count;
2196
ureg->domain[DOMAIN_DECL].tokens = NULL;
2197
ureg->domain[DOMAIN_DECL].size = 0;
2198
ureg->domain[DOMAIN_DECL].order = 0;
2199
ureg->domain[DOMAIN_DECL].count = 0;
2205
void ureg_free_tokens( const struct tgsi_token *tokens )
2207
FREE((struct tgsi_token *)tokens);
2211
struct ureg_program *
2212
ureg_create(enum pipe_shader_type processor)
2214
return ureg_create_with_screen(processor, NULL);
2218
struct ureg_program *
2219
ureg_create_with_screen(enum pipe_shader_type processor,
2220
struct pipe_screen *screen)
2223
struct ureg_program *ureg = CALLOC_STRUCT( ureg_program );
2227
ureg->processor = processor;
2228
ureg->supports_any_inout_decl_range =
2230
screen->get_shader_param(screen, processor,
2231
PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE) != 0;
2232
ureg->next_shader_processor = -1;
2234
for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
2235
ureg->properties[i] = ~0;
2237
ureg->free_temps = util_bitmask_create();
2238
if (ureg->free_temps == NULL)
2241
ureg->local_temps = util_bitmask_create();
2242
if (ureg->local_temps == NULL)
2243
goto no_local_temps;
2245
ureg->decl_temps = util_bitmask_create();
2246
if (ureg->decl_temps == NULL)
2252
util_bitmask_destroy(ureg->local_temps);
2254
util_bitmask_destroy(ureg->free_temps);
2263
ureg_set_next_shader_processor(struct ureg_program *ureg, unsigned processor)
2265
ureg->next_shader_processor = processor;
2270
ureg_get_nr_outputs( const struct ureg_program *ureg )
2274
return ureg->nr_outputs;
2278
ureg_setup_clipdist_info(struct ureg_program *ureg,
2279
const struct shader_info *info)
2281
if (info->clip_distance_array_size)
2282
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
2283
info->clip_distance_array_size);
2284
if (info->cull_distance_array_size)
2285
ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
2286
info->cull_distance_array_size);
2290
ureg_setup_tess_ctrl_shader(struct ureg_program *ureg,
2291
const struct shader_info *info)
2293
ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
2294
info->tess.tcs_vertices_out);
2298
ureg_setup_tess_eval_shader(struct ureg_program *ureg,
2299
const struct shader_info *info)
2301
ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, u_tess_prim_from_shader(info->tess._primitive_mode));
2303
STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
2304
STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
2305
PIPE_TESS_SPACING_FRACTIONAL_ODD);
2306
STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
2307
PIPE_TESS_SPACING_FRACTIONAL_EVEN);
2309
ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
2310
(info->tess.spacing + 1) % 3);
2312
ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
2314
ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
2315
info->tess.point_mode);
2319
ureg_setup_geometry_shader(struct ureg_program *ureg,
2320
const struct shader_info *info)
2322
ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
2323
info->gs.input_primitive);
2324
ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
2325
info->gs.output_primitive);
2326
ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
2327
info->gs.vertices_out);
2328
ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
2329
info->gs.invocations);
2333
ureg_setup_fragment_shader(struct ureg_program *ureg,
2334
const struct shader_info *info)
2336
if (info->fs.early_fragment_tests || info->fs.post_depth_coverage) {
2337
ureg_property(ureg, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL, 1);
2339
if (info->fs.post_depth_coverage)
2340
ureg_property(ureg, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE, 1);
2343
if (info->fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
2344
switch (info->fs.depth_layout) {
2345
case FRAG_DEPTH_LAYOUT_ANY:
2346
ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2347
TGSI_FS_DEPTH_LAYOUT_ANY);
2349
case FRAG_DEPTH_LAYOUT_GREATER:
2350
ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2351
TGSI_FS_DEPTH_LAYOUT_GREATER);
2353
case FRAG_DEPTH_LAYOUT_LESS:
2354
ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2355
TGSI_FS_DEPTH_LAYOUT_LESS);
2357
case FRAG_DEPTH_LAYOUT_UNCHANGED:
2358
ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2359
TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
2366
if (info->fs.advanced_blend_modes) {
2367
ureg_property(ureg, TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED,
2368
info->fs.advanced_blend_modes);
2373
ureg_setup_compute_shader(struct ureg_program *ureg,
2374
const struct shader_info *info)
2376
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
2377
info->workgroup_size[0]);
2378
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
2379
info->workgroup_size[1]);
2380
ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
2381
info->workgroup_size[2]);
2383
if (info->shared_size)
2384
ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
2388
ureg_setup_shader_info(struct ureg_program *ureg,
2389
const struct shader_info *info)
2391
if (info->layer_viewport_relative)
2392
ureg_property(ureg, TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE, 1);
2394
switch (info->stage) {
2395
case MESA_SHADER_VERTEX:
2396
ureg_setup_clipdist_info(ureg, info);
2397
ureg_set_next_shader_processor(ureg, pipe_shader_type_from_mesa(info->next_stage));
2399
case MESA_SHADER_TESS_CTRL:
2400
ureg_setup_tess_ctrl_shader(ureg, info);
2402
case MESA_SHADER_TESS_EVAL:
2403
ureg_setup_tess_eval_shader(ureg, info);
2404
ureg_setup_clipdist_info(ureg, info);
2405
ureg_set_next_shader_processor(ureg, pipe_shader_type_from_mesa(info->next_stage));
2407
case MESA_SHADER_GEOMETRY:
2408
ureg_setup_geometry_shader(ureg, info);
2409
ureg_setup_clipdist_info(ureg, info);
2411
case MESA_SHADER_FRAGMENT:
2412
ureg_setup_fragment_shader(ureg, info);
2414
case MESA_SHADER_COMPUTE:
2415
ureg_setup_compute_shader(ureg, info);
2423
void ureg_destroy( struct ureg_program *ureg )
2427
for (i = 0; i < ARRAY_SIZE(ureg->domain); i++) {
2428
if (ureg->domain[i].tokens &&
2429
ureg->domain[i].tokens != error_tokens)
2430
FREE(ureg->domain[i].tokens);
2433
util_bitmask_destroy(ureg->free_temps);
2434
util_bitmask_destroy(ureg->local_temps);
2435
util_bitmask_destroy(ureg->decl_temps);
2440
void ureg_set_precise( struct ureg_program *ureg, bool precise )
2442
ureg->precise = precise;