231
/* - create a gl_PointSizeMESA variable
232
* - find every gl_Position write
233
* - store 1.0 to gl_PointSizeMESA after every gl_Position write
236
st_nir_add_point_size(nir_shader *nir)
238
nir_variable *psiz = nir_variable_create(nir, nir_var_shader_out, glsl_float_type(), "gl_PointSizeMESA");
239
psiz->data.location = VARYING_SLOT_PSIZ;
240
psiz->data.how_declared = nir_var_hidden;
243
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
244
nir_builder_init(&b, impl);
246
nir_foreach_block_safe(block, impl) {
247
nir_foreach_instr_safe(instr, block) {
248
if (instr->type == nir_instr_type_intrinsic) {
249
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
250
if (intr->intrinsic == nir_intrinsic_store_deref ||
251
intr->intrinsic == nir_intrinsic_copy_deref) {
252
nir_variable *var = nir_intrinsic_get_var(intr, 0);
253
if (var->data.location == VARYING_SLOT_POS) {
254
b.cursor = nir_after_instr(instr);
255
nir_deref_instr *deref = nir_build_deref_var(&b, psiz);
256
nir_store_deref(&b, deref, nir_imm_float(&b, 1.0), BITFIELD_BIT(0));
264
b.cursor = nir_before_cf_list(&impl->body);
265
nir_deref_instr *deref = nir_build_deref_var(&b, psiz);
266
nir_store_deref(&b, deref, nir_imm_float(&b, 1.0), BITFIELD_BIT(0));
271
shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
273
assert(glsl_type_is_vector_or_scalar(type));
275
uint32_t comp_size = glsl_type_is_boolean(type)
276
? 4 : glsl_get_bit_size(type) / 8;
277
unsigned length = glsl_get_vector_elements(type);
278
*size = comp_size * length,
279
*align = comp_size * (length == 3 ? 4 : length);
283
st_can_remove_varying_before_linking(nir_variable *var, void *data)
285
bool *is_sso = (bool *) data;
287
/* Allow the removal of unused builtins in SSO */
288
return var->data.location > -1 && var->data.location < VARYING_SLOT_VAR0;
294
zero_array_members(nir_builder *b, nir_variable *var)
296
nir_deref_instr *deref = nir_build_deref_var(b, var);
297
nir_ssa_def *zero = nir_imm_zero(b, 4, 32);
298
for (int i = 0; i < glsl_array_size(var->type); i++) {
299
nir_deref_instr *arr = nir_build_deref_array_imm(b, deref, i);
300
uint32_t mask = BITFIELD_MASK(glsl_get_vector_elements(arr->type));
301
nir_store_deref(b, arr, nir_channels(b, zero, mask), mask);
305
/* GL has an implicit default of 0 for unwritten gl_ClipDistance members;
306
* to achieve this, write 0 to all members at the start of the shader and
307
* let them be naturally overwritten later
310
st_nir_zero_initialize_clip_distance(nir_shader *nir)
312
nir_variable *clip_dist0 = nir_find_variable_with_location(nir, nir_var_shader_out, VARYING_SLOT_CLIP_DIST0);
313
nir_variable *clip_dist1 = nir_find_variable_with_location(nir, nir_var_shader_out, VARYING_SLOT_CLIP_DIST1);
314
if (!clip_dist0 && !clip_dist1)
317
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
318
nir_builder_init(&b, impl);
319
b.cursor = nir_before_block(nir_start_block(impl));
321
zero_array_members(&b, clip_dist0);
323
zero_array_members(&b, clip_dist1);
327
/* First third of converting glsl_to_nir.. this leaves things in a pre-
328
* nir_lower_io state, so that shader variants can more easily insert/
329
* replace variables, etc.
332
st_nir_preprocess(struct st_context *st, struct gl_program *prog,
333
struct gl_shader_program *shader_program,
334
gl_shader_stage stage)
336
struct pipe_screen *screen = st->screen;
337
const nir_shader_compiler_options *options =
338
st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
340
nir_shader *nir = prog->nir;
342
/* Set the next shader stage hint for VS and TES. */
343
if (!nir->info.separate_shader &&
344
(nir->info.stage == MESA_SHADER_VERTEX ||
345
nir->info.stage == MESA_SHADER_TESS_EVAL)) {
347
unsigned prev_stages = (1 << (prog->info.stage + 1)) - 1;
348
unsigned stages_mask =
349
~prev_stages & shader_program->data->linked_stages;
351
nir->info.next_stage = stages_mask ?
352
(gl_shader_stage) u_bit_scan(&stages_mask) : MESA_SHADER_FRAGMENT;
354
nir->info.next_stage = MESA_SHADER_FRAGMENT;
357
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
358
if (!st->ctx->SoftFP64 && ((nir->info.bit_sizes_int | nir->info.bit_sizes_float) & 64) &&
359
(options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
361
/* It's not possible to use float64 on GLSL ES, so don't bother trying to
362
* build the support code. The support code depends on higher versions of
363
* desktop GLSL, so it will fail to compile (below) anyway.
365
if (_mesa_is_desktop_gl(st->ctx) && st->ctx->Const.GLSLVersion >= 400)
366
st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
369
prog->skip_pointsize_xfb = !(nir->info.outputs_written & VARYING_BIT_PSIZ);
370
if (st->lower_point_size && prog->skip_pointsize_xfb &&
371
stage < MESA_SHADER_FRAGMENT && stage != MESA_SHADER_TESS_CTRL &&
372
st_can_add_pointsize_to_program(st, prog)) {
373
NIR_PASS_V(nir, st_nir_add_point_size);
376
if (stage < MESA_SHADER_FRAGMENT && stage != MESA_SHADER_TESS_CTRL &&
377
(nir->info.outputs_written & (VARYING_BIT_CLIP_DIST0 | VARYING_BIT_CLIP_DIST1)))
378
NIR_PASS_V(nir, st_nir_zero_initialize_clip_distance);
380
struct nir_remove_dead_variables_options opts;
381
bool is_sso = nir->info.separate_shader;
382
opts.can_remove_var_data = &is_sso;
383
opts.can_remove_var = &st_can_remove_varying_before_linking;
384
nir_variable_mode mask = nir_var_shader_in | nir_var_shader_out;
385
nir_remove_dead_variables(nir, mask, &opts);
387
if (options->lower_all_io_to_temps ||
388
nir->info.stage == MESA_SHADER_VERTEX ||
389
nir->info.stage == MESA_SHADER_GEOMETRY) {
390
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
391
nir_shader_get_entrypoint(nir),
393
} else if (nir->info.stage == MESA_SHADER_FRAGMENT ||
394
!screen->get_param(screen, PIPE_CAP_SHADER_CAN_READ_OUTPUTS)) {
395
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
396
nir_shader_get_entrypoint(nir),
400
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
401
NIR_PASS_V(nir, nir_split_var_copies);
402
NIR_PASS_V(nir, nir_lower_var_copies);
404
enum pipe_shader_type pstage = pipe_shader_type_from_mesa(stage);
405
if (st->screen->get_shader_param(st->screen, pstage, PIPE_SHADER_CAP_FP16) &&
406
st->screen->get_shader_param(st->screen, pstage, PIPE_SHADER_CAP_INT16)) {
407
NIR_PASS_V(nir, nir_lower_mediump_vars, nir_var_function_temp | nir_var_shader_temp | nir_var_mem_shared);
410
if (options->lower_to_scalar) {
411
NIR_PASS_V(nir, nir_remove_dead_variables,
412
nir_var_function_temp | nir_var_shader_temp |
413
nir_var_mem_shared, NULL);
414
NIR_PASS_V(nir, nir_opt_copy_prop_vars);
415
NIR_PASS_V(nir, nir_lower_alu_to_scalar,
416
options->lower_to_scalar_filter, NULL);
419
/* before buffers and vars_to_ssa */
420
NIR_PASS_V(nir, gl_nir_lower_images, true);
422
if (prog->nir->info.stage == MESA_SHADER_COMPUTE) {
423
NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
424
nir_var_mem_shared, shared_type_info);
425
NIR_PASS_V(prog->nir, nir_lower_explicit_io,
426
nir_var_mem_shared, nir_address_format_32bit_offset);
429
/* Do a round of constant folding to clean up address calculations */
430
NIR_PASS_V(nir, nir_opt_constant_folding);
434
234
dest_is_64bit(nir_dest *dest, void *state)
708
st_link_nir(struct gl_context *ctx,
709
struct gl_shader_program *shader_program)
487
st_link_glsl_to_nir(struct gl_context *ctx,
488
struct gl_shader_program *shader_program)
711
490
struct st_context *st = st_context(ctx);
491
struct pipe_screen *pscreen = st->screen;
712
492
struct gl_linked_shader *linked_shader[MESA_SHADER_STAGES];
713
493
unsigned num_shaders = 0;
495
/* Return early if we are loading the shader from on-disk cache */
496
if (st_load_nir_from_disk_cache(ctx, shader_program)) {
502
assert(shader_program->data->LinkStatus);
504
/* Skip the GLSL steps when using SPIR-V. */
505
if (!shader_program->data->spirv) {
506
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
507
if (shader_program->_LinkedShaders[i] == NULL)
510
struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
511
exec_list *ir = shader->ir;
512
gl_shader_stage stage = shader->Stage;
513
const struct gl_shader_compiler_options *options =
514
&ctx->Const.ShaderCompilerOptions[stage];
516
enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
517
bool have_dround = pscreen->get_shader_param(pscreen, ptarget,
518
PIPE_SHADER_CAP_DROUND_SUPPORTED);
520
if (!pscreen->get_param(pscreen, PIPE_CAP_INT64_DIVMOD))
521
lower_64bit_integer_instructions(ir, DIV64 | MOD64);
523
lower_packing_builtins(ir, ctx->Extensions.ARB_shading_language_packing,
524
ctx->Extensions.ARB_gpu_shader5,
525
ctx->st->has_half_float_packing);
526
do_mat_op_to_vec(ir);
528
lower_instructions(ir, have_dround,
529
ctx->Extensions.ARB_gpu_shader5);
531
do_vec_index_to_cond_assign(ir);
532
if (options->MaxIfDepth == 0) {
536
validate_ir_tree(ir);
715
540
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
716
541
if (shader_program->_LinkedShaders[i])
717
542
linked_shader[num_shaders++] = shader_program->_LinkedShaders[i];
752
577
memcpy(prog->nir->info.source_sha1, shader->linked_source_sha1,
753
578
SHA1_DIGEST_LENGTH);
754
st_nir_preprocess(st, prog, shader_program, shader->Stage);
756
if (prog->nir->info.shared_size > ctx->Const.MaxComputeSharedMemorySize) {
757
linker_error(shader_program, "Too much shared memory used (%u/%u)\n",
758
prog->nir->info.shared_size,
759
ctx->Const.MaxComputeSharedMemorySize);
763
if (options->lower_to_scalar) {
764
NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
768
st_lower_patch_vertices_in(shader_program);
770
/* Linking shaders also optimizes them. Separate shaders, compute shaders
771
* and shaders with a fixed-func VS or FS that don't need linking are
774
if (num_shaders == 1)
775
gl_nir_opts(linked_shader[0]->Program->nir);
777
/* nir_opt_access() needs to run before linking so that ImageAccess[]
778
* and BindlessImage[].access are filled out with the correct modes.
780
for (unsigned i = 0; i < num_shaders; i++) {
781
nir_shader *nir = linked_shader[i]->Program->nir;
783
nir_opt_access_options opt_access_options;
784
opt_access_options.is_vulkan = false;
785
NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
787
/* Combine clip and cull outputs into one array and set:
788
* - shader_info::clip_distance_array_size
789
* - shader_info::cull_distance_array_size
791
if (!st->screen->get_param(st->screen, PIPE_CAP_CULL_DISTANCE_NOCOMBINE))
792
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
580
nir_shader_gather_info(prog->nir, nir_shader_get_entrypoint(prog->nir));
581
if (!st->ctx->SoftFP64 && ((prog->nir->info.bit_sizes_int | prog->nir->info.bit_sizes_float) & 64) &&
582
(options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
584
/* It's not possible to use float64 on GLSL ES, so don't bother trying to
585
* build the support code. The support code depends on higher versions of
586
* desktop GLSL, so it will fail to compile (below) anyway.
588
if (_mesa_is_desktop_gl(st->ctx) && st->ctx->Const.GLSLVersion >= 400)
589
st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
795
593
if (shader_program->data->spirv) {
796
594
static const gl_nir_linker_options opts = {
797
595
true /*fill_parameters */
799
if (!gl_nir_link_spirv(&ctx->Const, shader_program, &opts))
597
if (!gl_nir_link_spirv(&ctx->Const, &ctx->Extensions, shader_program,
802
601
if (!gl_nir_link_glsl(&ctx->Const, &ctx->Extensions, ctx->API,
930
* Link a GLSL shader program. Called via glLinkProgram().
933
st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
940
_mesa_clear_shader_program_data(ctx, prog);
942
prog->data = _mesa_create_shader_program_data();
944
prog->data->LinkStatus = LINKING_SUCCESS;
946
for (i = 0; i < prog->NumShaders; i++) {
947
if (!prog->Shaders[i]->CompileStatus) {
948
linker_error(prog, "linking with uncompiled/unspecialized shader");
952
spirv = (prog->Shaders[i]->spirv_data != NULL);
953
} else if (spirv && !prog->Shaders[i]->spirv_data) {
954
/* The GL_ARB_gl_spirv spec adds a new bullet point to the list of
955
* reasons LinkProgram can fail:
957
* "All the shader objects attached to <program> do not have the
958
* same value for the SPIR_V_BINARY_ARB state."
961
"not all attached shaders have the same "
962
"SPIR_V_BINARY_ARB state");
965
prog->data->spirv = spirv;
967
if (prog->data->LinkStatus) {
969
link_shaders(ctx, prog);
971
_mesa_spirv_link_shaders(ctx, prog);
974
/* If LinkStatus is LINKING_SUCCESS, then reset sampler validated to true.
975
* Validation happens via the LinkShader call below. If LinkStatus is
976
* LINKING_SKIPPED, then SamplersValidated will have been restored from the
979
if (prog->data->LinkStatus == LINKING_SUCCESS) {
980
prog->SamplersValidated = GL_TRUE;
983
if (prog->data->LinkStatus && !st_link_glsl_to_nir(ctx, prog)) {
984
prog->data->LinkStatus = LINKING_FAILURE;
987
if (prog->data->LinkStatus != LINKING_FAILURE)
988
_mesa_create_program_resource_hash(prog);
990
/* Return early if we are loading the shader from on-disk cache */
991
if (prog->data->LinkStatus == LINKING_SKIPPED)
994
if (ctx->_Shader->Flags & GLSL_DUMP) {
995
if (!prog->data->LinkStatus) {
996
fprintf(stderr, "GLSL shader program %d failed to link\n", prog->Name);
999
if (prog->data->InfoLog && prog->data->InfoLog[0] != 0) {
1000
fprintf(stderr, "GLSL shader program %d info log:\n", prog->Name);
1001
fprintf(stderr, "%s\n", prog->data->InfoLog);
1005
#ifdef ENABLE_SHADER_CACHE
1006
if (prog->data->LinkStatus)
1007
shader_cache_write_program_metadata(ctx, prog);
1106
1011
} /* extern "C" */