2
* Copyright 2008 Ben Skeggs
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
23
#include "nv50/nv50_context.h"
24
#include "nv50/nv50_resource.h"
25
#include "nv50/g80_texture.xml.h"
26
#include "nv50/g80_defs.xml.h"
28
#include "util/format/u_format.h"
30
static inline uint32_t
31
nv50_tic_swizzle(const struct nv50_format *fmt, unsigned swz, bool tex_int)
34
case PIPE_SWIZZLE_X : return fmt->tic.src_x;
35
case PIPE_SWIZZLE_Y: return fmt->tic.src_y;
36
case PIPE_SWIZZLE_Z : return fmt->tic.src_z;
37
case PIPE_SWIZZLE_W: return fmt->tic.src_w;
39
return tex_int ? G80_TIC_SOURCE_ONE_INT : G80_TIC_SOURCE_ONE_FLOAT;
42
return G80_TIC_SOURCE_ZERO;
46
struct pipe_sampler_view *
47
nv50_create_sampler_view(struct pipe_context *pipe,
48
struct pipe_resource *res,
49
const struct pipe_sampler_view *templ)
53
if (templ->target == PIPE_TEXTURE_RECT || templ->target == PIPE_BUFFER)
54
flags |= NV50_TEXVIEW_SCALED_COORDS;
56
return nv50_create_texture_view(pipe, res, templ, flags);
59
struct pipe_sampler_view *
60
nv50_create_texture_view(struct pipe_context *pipe,
61
struct pipe_resource *texture,
62
const struct pipe_sampler_view *templ,
65
const uint32_t class_3d = nouveau_context(pipe)->screen->class_3d;
66
const struct util_format_description *desc;
67
const struct nv50_format *fmt;
72
struct nv50_tic_entry *view;
73
struct nv50_miptree *mt = nv50_miptree(texture);
76
view = MALLOC_STRUCT(nv50_tic_entry);
81
view->pipe.reference.count = 1;
82
view->pipe.texture = NULL;
83
view->pipe.context = pipe;
87
pipe_resource_reference(&view->pipe.texture, texture);
91
desc = util_format_description(view->pipe.format);
95
fmt = &nv50_format_table[view->pipe.format];
97
tex_int = util_format_is_pure_integer(view->pipe.format);
99
swz[0] = nv50_tic_swizzle(fmt, view->pipe.swizzle_r, tex_int);
100
swz[1] = nv50_tic_swizzle(fmt, view->pipe.swizzle_g, tex_int);
101
swz[2] = nv50_tic_swizzle(fmt, view->pipe.swizzle_b, tex_int);
102
swz[3] = nv50_tic_swizzle(fmt, view->pipe.swizzle_a, tex_int);
103
tic[0] = (fmt->tic.format << G80_TIC_0_COMPONENTS_SIZES__SHIFT) |
104
(fmt->tic.type_r << G80_TIC_0_R_DATA_TYPE__SHIFT) |
105
(fmt->tic.type_g << G80_TIC_0_G_DATA_TYPE__SHIFT) |
106
(fmt->tic.type_b << G80_TIC_0_B_DATA_TYPE__SHIFT) |
107
(fmt->tic.type_a << G80_TIC_0_A_DATA_TYPE__SHIFT) |
108
(swz[0] << G80_TIC_0_X_SOURCE__SHIFT) |
109
(swz[1] << G80_TIC_0_Y_SOURCE__SHIFT) |
110
(swz[2] << G80_TIC_0_Z_SOURCE__SHIFT) |
111
(swz[3] << G80_TIC_0_W_SOURCE__SHIFT);
113
addr = mt->base.address;
115
depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);
117
if (mt->base.base.array_size > 1) {
118
/* there doesn't seem to be a base layer field in TIC */
119
addr += view->pipe.u.tex.first_layer * mt->layer_stride;
120
depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
123
tic[2] = 0x10001000 | G80_TIC_2_BORDER_SOURCE_COLOR;
125
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
126
tic[2] |= G80_TIC_2_SRGB_CONVERSION;
128
if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
129
tic[2] |= G80_TIC_2_NORMALIZED_COORDS;
131
if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
132
if (templ->target == PIPE_BUFFER) {
133
addr += view->pipe.u.buf.offset;
134
tic[2] |= G80_TIC_2_LAYOUT_PITCH | G80_TIC_2_TEXTURE_TYPE_ONE_D_BUFFER;
137
view->pipe.u.buf.size / (desc->block.bits / 8);
140
tic[2] |= G80_TIC_2_LAYOUT_PITCH | G80_TIC_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP;
141
tic[3] = mt->level[0].pitch;
142
tic[4] = mt->base.base.width0;
143
tic[5] = (1 << 16) | (mt->base.base.height0);
148
tic[2] |= addr >> 32;
153
tic[2] |= (addr >> 32) & 0xff;
156
((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
157
((mt->level[0].tile_mode & 0xf00) << (25 - 8));
159
switch (templ->target) {
160
case PIPE_TEXTURE_1D:
161
tic[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D;
163
case PIPE_TEXTURE_2D:
165
tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP;
167
tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D;
169
case PIPE_TEXTURE_RECT:
170
tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP;
172
case PIPE_TEXTURE_3D:
173
tic[2] |= G80_TIC_2_TEXTURE_TYPE_THREE_D;
175
case PIPE_TEXTURE_CUBE:
177
tic[2] |= G80_TIC_2_TEXTURE_TYPE_CUBEMAP;
179
case PIPE_TEXTURE_1D_ARRAY:
180
tic[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D_ARRAY;
182
case PIPE_TEXTURE_2D_ARRAY:
183
tic[2] |= G80_TIC_2_TEXTURE_TYPE_TWO_D_ARRAY;
185
case PIPE_TEXTURE_CUBE_ARRAY:
187
tic[2] |= G80_TIC_2_TEXTURE_TYPE_CUBE_ARRAY;
190
assert(0); /* should be linear and handled above ! */
191
tic[2] |= G80_TIC_2_TEXTURE_TYPE_ONE_D_BUFFER | G80_TIC_2_LAYOUT_PITCH;
194
unreachable("unexpected/invalid texture target");
197
tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
199
tic[4] = (1 << 31) | (mt->base.base.width0 << mt->ms_x);
201
tic[5] = (mt->base.base.height0 << mt->ms_y) & 0xffff;
202
tic[5] |= depth << 16;
203
if (class_3d > NV50_3D_CLASS)
204
tic[5] |= mt->base.base.last_level << G80_TIC_5_MAP_MIP_LEVEL__SHIFT;
206
tic[5] |= view->pipe.u.tex.last_level << G80_TIC_5_MAP_MIP_LEVEL__SHIFT;
208
tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000; /* sampling points */
210
if (class_3d > NV50_3D_CLASS)
211
tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;
215
if (unlikely(!(tic[2] & G80_TIC_2_NORMALIZED_COORDS)))
216
if (mt->base.base.last_level)
217
tic[5] &= ~G80_TIC_5_MAP_MIP_LEVEL__MASK;
223
nv50_update_tic(struct nv50_context *nv50, struct nv50_tic_entry *tic,
224
struct nv04_resource *res)
226
uint64_t address = res->address;
227
if (res->base.target != PIPE_BUFFER)
229
address += tic->pipe.u.buf.offset;
230
if (tic->tic[1] == (uint32_t)address &&
231
(tic->tic[2] & 0xff) == address >> 32)
234
nv50_screen_tic_unlock(nv50->screen, tic);
236
tic->tic[1] = address;
237
tic->tic[2] &= 0xffffff00;
238
tic->tic[2] |= address >> 32;
242
nv50_validate_tic(struct nv50_context *nv50, int s)
244
struct nouveau_pushbuf *push = nv50->base.pushbuf;
245
struct nouveau_bo *txc = nv50->screen->txc;
247
bool need_flush = false;
248
const bool is_compute_stage = s == NV50_SHADER_STAGE_COMPUTE;
250
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
251
for (i = 0; i < nv50->num_textures[s]; ++i) {
252
struct nv50_tic_entry *tic = nv50_tic_entry(nv50->textures[s][i]);
253
struct nv04_resource *res;
256
if (unlikely(is_compute_stage))
257
BEGIN_NV04(push, NV50_CP(BIND_TIC), 1);
259
BEGIN_NV04(push, NV50_3D(BIND_TIC(s)), 1);
260
PUSH_DATA (push, (i << 1) | 0);
263
res = &nv50_miptree(tic->pipe.texture)->base;
264
nv50_update_tic(nv50, tic, res);
267
tic->id = nv50_screen_tic_alloc(nv50->screen, tic);
269
BEGIN_NV04(push, NV50_2D(DST_FORMAT), 2);
270
PUSH_DATA (push, G80_SURFACE_FORMAT_R8_UNORM);
272
BEGIN_NV04(push, NV50_2D(DST_PITCH), 5);
273
PUSH_DATA (push, 262144);
274
PUSH_DATA (push, 65536);
276
PUSH_DATAh(push, txc->offset);
277
PUSH_DATA (push, txc->offset);
278
BEGIN_NV04(push, NV50_2D(SIFC_BITMAP_ENABLE), 2);
280
PUSH_DATA (push, G80_SURFACE_FORMAT_R8_UNORM);
281
BEGIN_NV04(push, NV50_2D(SIFC_WIDTH), 10);
282
PUSH_DATA (push, 32);
289
PUSH_DATA (push, tic->id * 32);
292
BEGIN_NI04(push, NV50_2D(SIFC_DATA), 8);
293
PUSH_DATAp(push, &tic->tic[0], 8);
297
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
298
if (unlikely(is_compute_stage))
299
BEGIN_NV04(push, NV50_CP(TEX_CACHE_CTL), 1);
301
BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
302
PUSH_DATA (push, 0x20);
305
nv50->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
307
res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
308
res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
310
if (unlikely(is_compute_stage)) {
311
BCTX_REFN(nv50->bufctx_cp, CP_TEXTURES, res, RD);
312
BEGIN_NV04(push, NV50_CP(BIND_TIC), 1);
314
BCTX_REFN(nv50->bufctx_3d, 3D_TEXTURES, res, RD);
315
BEGIN_NV04(push, NV50_3D(BIND_TIC(s)), 1);
317
PUSH_DATA (push, (tic->id << 9) | (i << 1) | 1);
319
for (; i < nv50->state.num_textures[s]; ++i) {
320
if (unlikely(is_compute_stage))
321
BEGIN_NV04(push, NV50_CP(BIND_TIC), 1);
323
BEGIN_NV04(push, NV50_3D(BIND_TIC(s)), 1);
324
PUSH_DATA (push, (i << 1) | 0);
326
if (nv50->num_textures[s]) {
327
if (unlikely(is_compute_stage))
328
BEGIN_NV04(push, NV50_CP(CB_ADDR), 1);
330
BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
331
PUSH_DATA (push, ((NV50_CB_AUX_TEX_MS_OFFSET + 16 * s * 2 * 4) << (8 - 2)) | NV50_CB_AUX);
332
if (unlikely(is_compute_stage))
333
BEGIN_NV04(push, NV50_CP(CB_DATA(0)), nv50->num_textures[s] * 2);
335
BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nv50->num_textures[s] * 2);
336
for (i = 0; i < nv50->num_textures[s]; i++) {
337
struct nv50_tic_entry *tic = nv50_tic_entry(nv50->textures[s][i]);
338
struct nv50_miptree *res;
340
if (!tic || tic->pipe.target == PIPE_BUFFER) {
345
res = nv50_miptree(tic->pipe.texture);
346
PUSH_DATA (push, res->ms_x);
347
PUSH_DATA (push, res->ms_y);
350
nv50->state.num_textures[s] = nv50->num_textures[s];
355
void nv50_validate_textures(struct nv50_context *nv50)
358
bool need_flush = false;
360
for (s = 0; s < NV50_MAX_3D_SHADER_STAGES; ++s)
361
need_flush |= nv50_validate_tic(nv50, s);
364
BEGIN_NV04(nv50->base.pushbuf, NV50_3D(TIC_FLUSH), 1);
365
PUSH_DATA (nv50->base.pushbuf, 0);
368
/* Invalidate all CP textures because they are aliased. */
369
nouveau_bufctx_reset(nv50->bufctx_cp, NV50_BIND_CP_TEXTURES);
370
nv50->dirty_cp |= NV50_NEW_CP_TEXTURES;
374
nv50_validate_tsc(struct nv50_context *nv50, int s)
376
struct nouveau_pushbuf *push = nv50->base.pushbuf;
378
bool need_flush = false;
379
const bool is_compute_stage = s == NV50_SHADER_STAGE_COMPUTE;
381
assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS);
382
for (i = 0; i < nv50->num_samplers[s]; ++i) {
383
struct nv50_tsc_entry *tsc = nv50_tsc_entry(nv50->samplers[s][i]);
386
if (is_compute_stage)
387
BEGIN_NV04(push, NV50_CP(BIND_TSC), 1);
389
BEGIN_NV04(push, NV50_3D(BIND_TSC(s)), 1);
390
PUSH_DATA (push, (i << 4) | 0);
393
nv50->seamless_cube_map = tsc->seamless_cube_map;
395
tsc->id = nv50_screen_tsc_alloc(nv50->screen, tsc);
397
nv50_sifc_linear_u8(&nv50->base, nv50->screen->txc,
398
65536 + tsc->id * 32,
399
NOUVEAU_BO_VRAM, 32, tsc->tsc);
402
nv50->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
404
if (is_compute_stage)
405
BEGIN_NV04(push, NV50_CP(BIND_TSC), 1);
407
BEGIN_NV04(push, NV50_3D(BIND_TSC(s)), 1);
408
PUSH_DATA (push, (tsc->id << 12) | (i << 4) | 1);
410
for (; i < nv50->state.num_samplers[s]; ++i) {
411
if (is_compute_stage)
412
BEGIN_NV04(push, NV50_CP(BIND_TSC), 1);
414
BEGIN_NV04(push, NV50_3D(BIND_TSC(s)), 1);
415
PUSH_DATA (push, (i << 4) | 0);
417
nv50->state.num_samplers[s] = nv50->num_samplers[s];
419
// TXF, in unlinked tsc mode, will always use sampler 0. So we have to
420
// ensure that it remains bound. Its contents don't matter, all samplers we
421
// ever create have the SRGB_CONVERSION bit set, so as long as the first
422
// entry is initialized, we're good to go. This is the only bit that has
423
// any effect on what TXF does.
424
if (!nv50->samplers[s][0]) {
425
if (is_compute_stage)
426
BEGIN_NV04(push, NV50_CP(BIND_TSC), 1);
428
BEGIN_NV04(push, NV50_3D(BIND_TSC(s)), 1);
435
void nv50_validate_samplers(struct nv50_context *nv50)
438
bool need_flush = false;
440
for (s = 0; s < NV50_MAX_3D_SHADER_STAGES; ++s)
441
need_flush |= nv50_validate_tsc(nv50, s);
444
if (unlikely(s == NV50_SHADER_STAGE_COMPUTE))
445
// TODO(pmoreau): Is this needed? Not done on nvc0
446
BEGIN_NV04(nv50->base.pushbuf, NV50_CP(TSC_FLUSH), 1);
448
BEGIN_NV04(nv50->base.pushbuf, NV50_3D(TSC_FLUSH), 1);
449
PUSH_DATA (nv50->base.pushbuf, 0);
452
/* Invalidate all CP samplers because they are aliased. */
453
nv50->dirty_cp |= NV50_NEW_CP_SAMPLERS;
456
/* There can be up to 4 different MS levels (1, 2, 4, 8). To simplify the
457
* shader logic, allow each one to take up 8 offsets.
459
#define COMBINE(x, y) x, y
461
static const uint32_t msaa_sample_xy_offsets[] = {
503
void nv50_upload_ms_info(struct nouveau_pushbuf *push)
505
BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
506
PUSH_DATA (push, (NV50_CB_AUX_MS_OFFSET << (8 - 2)) | NV50_CB_AUX);
507
BEGIN_NI04(push, NV50_3D(CB_DATA(0)), ARRAY_SIZE(msaa_sample_xy_offsets));
508
PUSH_DATAp(push, msaa_sample_xy_offsets, ARRAY_SIZE(msaa_sample_xy_offsets));
511
void nv50_upload_tsc0(struct nv50_context *nv50)
513
struct nouveau_pushbuf *push = nv50->base.pushbuf;
514
u32 data[8] = { G80_TSC_0_SRGB_CONVERSION };
515
nv50_sifc_linear_u8(&nv50->base, nv50->screen->txc,
516
65536 /* + tsc->id * 32 */,
517
NOUVEAU_BO_VRAM, 32, data);
518
BEGIN_NV04(push, NV50_3D(TSC_FLUSH), 1);