~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/gallium/drivers/freedreno/a5xx/fd5_gmem.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3
 
 *
4
 
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 
 * copy of this software and associated documentation files (the "Software"),
6
 
 * to deal in the Software without restriction, including without limitation
7
 
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 
 * and/or sell copies of the Software, and to permit persons to whom the
9
 
 * Software is furnished to do so, subject to the following conditions:
10
 
 *
11
 
 * The above copyright notice and this permission notice (including the next
12
 
 * paragraph) shall be included in all copies or substantial portions of the
13
 
 * Software.
14
 
 *
15
 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 
 * SOFTWARE.
22
 
 *
23
 
 * Authors:
24
 
 *    Rob Clark <robclark@freedesktop.org>
25
 
 */
26
 
 
27
 
#include "pipe/p_state.h"
28
 
#include "util/format/u_format.h"
29
 
#include "util/u_inlines.h"
30
 
#include "util/u_memory.h"
31
 
#include "util/u_string.h"
32
 
 
33
 
#include "freedreno_draw.h"
34
 
#include "freedreno_resource.h"
35
 
#include "freedreno_state.h"
36
 
 
37
 
#include "fd5_context.h"
38
 
#include "fd5_draw.h"
39
 
#include "fd5_emit.h"
40
 
#include "fd5_format.h"
41
 
#include "fd5_gmem.h"
42
 
#include "fd5_program.h"
43
 
#include "fd5_zsa.h"
44
 
 
45
 
static void
46
 
emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
47
 
         struct pipe_surface **bufs, const struct fd_gmem_stateobj *gmem)
48
 
{
49
 
   enum a5xx_tile_mode tile_mode;
50
 
   unsigned i;
51
 
 
52
 
   for (i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
53
 
      enum a5xx_color_fmt format = 0;
54
 
      enum a3xx_color_swap swap = WZYX;
55
 
      bool srgb = false, sint = false, uint = false;
56
 
      struct fd_resource *rsc = NULL;
57
 
      uint32_t stride = 0;
58
 
      uint32_t size = 0;
59
 
      uint32_t base = 0;
60
 
      uint32_t offset = 0;
61
 
 
62
 
      if (gmem) {
63
 
         tile_mode = TILE5_2;
64
 
      } else {
65
 
         tile_mode = TILE5_LINEAR;
66
 
      }
67
 
 
68
 
      if ((i < nr_bufs) && bufs[i]) {
69
 
         struct pipe_surface *psurf = bufs[i];
70
 
         enum pipe_format pformat = psurf->format;
71
 
 
72
 
         rsc = fd_resource(psurf->texture);
73
 
 
74
 
         format = fd5_pipe2color(pformat);
75
 
         swap = fd5_pipe2swap(pformat);
76
 
         srgb = util_format_is_srgb(pformat);
77
 
         sint = util_format_is_pure_sint(pformat);
78
 
         uint = util_format_is_pure_uint(pformat);
79
 
 
80
 
         debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
81
 
 
82
 
         offset = fd_resource_offset(rsc, psurf->u.tex.level,
83
 
                                     psurf->u.tex.first_layer);
84
 
 
85
 
         if (gmem) {
86
 
            stride = gmem->bin_w * gmem->cbuf_cpp[i];
87
 
            size = stride * gmem->bin_h;
88
 
            base = gmem->cbuf_base[i];
89
 
         } else {
90
 
            stride = fd_resource_pitch(rsc, psurf->u.tex.level);
91
 
            size = fd_resource_layer_stride(rsc, psurf->u.tex.level);
92
 
 
93
 
            tile_mode =
94
 
               fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
95
 
         }
96
 
      }
97
 
 
98
 
      OUT_PKT4(ring, REG_A5XX_RB_MRT_BUF_INFO(i), 5);
99
 
      OUT_RING(
100
 
         ring,
101
 
         A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
102
 
            A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
103
 
            A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap) |
104
 
            COND(gmem,
105
 
                 0x800) | /* XXX 0x1000 for RECTLIST clear, 0x0 for BLIT.. */
106
 
            COND(srgb, A5XX_RB_MRT_BUF_INFO_COLOR_SRGB));
107
 
      OUT_RING(ring, A5XX_RB_MRT_PITCH(stride));
108
 
      OUT_RING(ring, A5XX_RB_MRT_ARRAY_PITCH(size));
109
 
      if (gmem || (i >= nr_bufs) || !bufs[i]) {
110
 
         OUT_RING(ring, base);       /* RB_MRT[i].BASE_LO */
111
 
         OUT_RING(ring, 0x00000000); /* RB_MRT[i].BASE_HI */
112
 
      } else {
113
 
         OUT_RELOC(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
114
 
      }
115
 
 
116
 
      OUT_PKT4(ring, REG_A5XX_SP_FS_MRT_REG(i), 1);
117
 
      OUT_RING(ring, A5XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
118
 
                        COND(sint, A5XX_SP_FS_MRT_REG_COLOR_SINT) |
119
 
                        COND(uint, A5XX_SP_FS_MRT_REG_COLOR_UINT) |
120
 
                        COND(srgb, A5XX_SP_FS_MRT_REG_COLOR_SRGB));
121
 
 
122
 
      /* when we support UBWC, these would be the system memory
123
 
       * addr/pitch/etc:
124
 
       */
125
 
      OUT_PKT4(ring, REG_A5XX_RB_MRT_FLAG_BUFFER(i), 4);
126
 
      OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
127
 
      OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
128
 
      OUT_RING(ring, A5XX_RB_MRT_FLAG_BUFFER_PITCH(0));
129
 
      OUT_RING(ring, A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(0));
130
 
   }
131
 
}
132
 
 
133
 
static void
134
 
emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
135
 
        const struct fd_gmem_stateobj *gmem)
136
 
{
137
 
   if (zsbuf) {
138
 
      struct fd_resource *rsc = fd_resource(zsbuf->texture);
139
 
      enum a5xx_depth_format fmt = fd5_pipe2depth(zsbuf->format);
140
 
      uint32_t cpp = rsc->layout.cpp;
141
 
      uint32_t stride = 0;
142
 
      uint32_t size = 0;
143
 
 
144
 
      if (gmem) {
145
 
         stride = cpp * gmem->bin_w;
146
 
         size = stride * gmem->bin_h;
147
 
      } else {
148
 
         stride = fd_resource_pitch(rsc, zsbuf->u.tex.level);
149
 
         size = fd_resource_layer_stride(rsc, zsbuf->u.tex.level);
150
 
      }
151
 
 
152
 
      OUT_PKT4(ring, REG_A5XX_RB_DEPTH_BUFFER_INFO, 5);
153
 
      OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
154
 
      if (gmem) {
155
 
         OUT_RING(ring, gmem->zsbuf_base[0]); /* RB_DEPTH_BUFFER_BASE_LO */
156
 
         OUT_RING(ring, 0x00000000);          /* RB_DEPTH_BUFFER_BASE_HI */
157
 
      } else {
158
 
         OUT_RELOC(ring, rsc->bo,
159
 
            fd_resource_offset(rsc, zsbuf->u.tex.level, zsbuf->u.tex.first_layer),
160
 
            0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
161
 
      }
162
 
      OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_PITCH(stride));
163
 
      OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
164
 
 
165
 
      OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
166
 
      OUT_RING(ring, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
167
 
 
168
 
      OUT_PKT4(ring, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
169
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
170
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
171
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
172
 
 
173
 
      if (rsc->lrz) {
174
 
         OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO, 3);
175
 
         OUT_RELOC(ring, rsc->lrz, 0x1000, 0, 0);
176
 
         OUT_RING(ring, A5XX_GRAS_LRZ_BUFFER_PITCH(rsc->lrz_pitch));
177
 
 
178
 
         OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO, 2);
179
 
         OUT_RELOC(ring, rsc->lrz, 0, 0, 0);
180
 
      } else {
181
 
         OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO, 3);
182
 
         OUT_RING(ring, 0x00000000);
183
 
         OUT_RING(ring, 0x00000000);
184
 
         OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
185
 
 
186
 
         OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO, 2);
187
 
         OUT_RING(ring, 0x00000000);
188
 
         OUT_RING(ring, 0x00000000);
189
 
      }
190
 
 
191
 
      if (rsc->stencil) {
192
 
         if (gmem) {
193
 
            stride = 1 * gmem->bin_w;
194
 
            size = stride * gmem->bin_h;
195
 
         } else {
196
 
            stride = fd_resource_pitch(rsc->stencil, zsbuf->u.tex.level);
197
 
            size = fd_resource_layer_stride(rsc, zsbuf->u.tex.level);
198
 
         }
199
 
 
200
 
         OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 5);
201
 
         OUT_RING(ring, A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
202
 
         if (gmem) {
203
 
            OUT_RING(ring, gmem->zsbuf_base[1]); /* RB_STENCIL_BASE_LO */
204
 
            OUT_RING(ring, 0x00000000);          /* RB_STENCIL_BASE_HI */
205
 
         } else {
206
 
            OUT_RELOC(ring, rsc->stencil->bo,
207
 
               fd_resource_offset(rsc->stencil, zsbuf->u.tex.level, zsbuf->u.tex.first_layer),
208
 
                      0, 0); /* RB_STENCIL_BASE_LO/HI */
209
 
         }
210
 
         OUT_RING(ring, A5XX_RB_STENCIL_PITCH(stride));
211
 
         OUT_RING(ring, A5XX_RB_STENCIL_ARRAY_PITCH(size));
212
 
      } else {
213
 
         OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 1);
214
 
         OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
215
 
      }
216
 
   } else {
217
 
      OUT_PKT4(ring, REG_A5XX_RB_DEPTH_BUFFER_INFO, 5);
218
 
      OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE));
219
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
220
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
221
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
222
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
223
 
 
224
 
      OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
225
 
      OUT_RING(ring, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE));
226
 
 
227
 
      OUT_PKT4(ring, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
228
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
229
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
230
 
      OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
231
 
 
232
 
      OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 1);
233
 
      OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
234
 
   }
235
 
}
236
 
 
237
 
static void
238
 
emit_msaa(struct fd_ringbuffer *ring, uint32_t nr_samples)
239
 
{
240
 
   enum a3xx_msaa_samples samples = fd_msaa_samples(nr_samples);
241
 
 
242
 
   OUT_PKT4(ring, REG_A5XX_TPL1_TP_RAS_MSAA_CNTL, 2);
243
 
   OUT_RING(ring, A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(samples));
244
 
   OUT_RING(ring, A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
245
 
                     COND(samples == MSAA_ONE,
246
 
                          A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
247
 
 
248
 
   OUT_PKT4(ring, REG_A5XX_RB_RAS_MSAA_CNTL, 2);
249
 
   OUT_RING(ring, A5XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
250
 
   OUT_RING(ring,
251
 
            A5XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
252
 
               COND(samples == MSAA_ONE, A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
253
 
 
254
 
   OUT_PKT4(ring, REG_A5XX_GRAS_SC_RAS_MSAA_CNTL, 2);
255
 
   OUT_RING(ring, A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(samples));
256
 
   OUT_RING(ring, A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(samples) |
257
 
                     COND(samples == MSAA_ONE,
258
 
                          A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE));
259
 
}
260
 
 
261
 
static bool
262
 
use_hw_binning(struct fd_batch *batch)
263
 
{
264
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
265
 
 
266
 
   if ((gmem->maxpw * gmem->maxph) > 32)
267
 
      return false;
268
 
 
269
 
   if ((gmem->maxpw > 15) || (gmem->maxph > 15))
270
 
      return false;
271
 
 
272
 
   return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
273
 
          (batch->num_draws > 0);
274
 
}
275
 
 
276
 
static void
277
 
patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
278
 
{
279
 
   unsigned i;
280
 
   for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
281
 
      struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
282
 
      *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
283
 
   }
284
 
   util_dynarray_clear(&batch->draw_patches);
285
 
}
286
 
 
287
 
static void
288
 
update_vsc_pipe(struct fd_batch *batch) assert_dt
289
 
{
290
 
   struct fd_context *ctx = batch->ctx;
291
 
   struct fd5_context *fd5_ctx = fd5_context(ctx);
292
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
293
 
   struct fd_ringbuffer *ring = batch->gmem;
294
 
   int i;
295
 
 
296
 
   OUT_PKT4(ring, REG_A5XX_VSC_BIN_SIZE, 3);
297
 
   OUT_RING(ring, A5XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
298
 
                     A5XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
299
 
   OUT_RELOC(ring, fd5_ctx->vsc_size_mem, 0, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
300
 
 
301
 
   OUT_PKT4(ring, REG_A5XX_UNKNOWN_0BC5, 2);
302
 
   OUT_RING(ring, 0x00000000); /* UNKNOWN_0BC5 */
303
 
   OUT_RING(ring, 0x00000000); /* UNKNOWN_0BC6 */
304
 
 
305
 
   OUT_PKT4(ring, REG_A5XX_VSC_PIPE_CONFIG_REG(0), 16);
306
 
   for (i = 0; i < 16; i++) {
307
 
      const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
308
 
      OUT_RING(ring, A5XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
309
 
                        A5XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
310
 
                        A5XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
311
 
                        A5XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
312
 
   }
313
 
 
314
 
   OUT_PKT4(ring, REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(0), 32);
315
 
   for (i = 0; i < 16; i++) {
316
 
      if (!ctx->vsc_pipe_bo[i]) {
317
 
         ctx->vsc_pipe_bo[i] = fd_bo_new(
318
 
            ctx->dev, 0x20000, 0, "vsc_pipe[%u]", i);
319
 
      }
320
 
      OUT_RELOC(ring, ctx->vsc_pipe_bo[i], 0, 0,
321
 
                0); /* VSC_PIPE_DATA_ADDRESS[i].LO/HI */
322
 
   }
323
 
 
324
 
   OUT_PKT4(ring, REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(0), 16);
325
 
   for (i = 0; i < 16; i++) {
326
 
      OUT_RING(ring, fd_bo_size(ctx->vsc_pipe_bo[i]) -
327
 
                        32); /* VSC_PIPE_DATA_LENGTH[i] */
328
 
   }
329
 
}
330
 
 
331
 
static void
332
 
emit_binning_pass(struct fd_batch *batch) assert_dt
333
 
{
334
 
   struct fd_ringbuffer *ring = batch->gmem;
335
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
336
 
 
337
 
   uint32_t x1 = gmem->minx;
338
 
   uint32_t y1 = gmem->miny;
339
 
   uint32_t x2 = gmem->minx + gmem->width - 1;
340
 
   uint32_t y2 = gmem->miny + gmem->height - 1;
341
 
 
342
 
   fd5_set_render_mode(batch->ctx, ring, BINNING);
343
 
 
344
 
   OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
345
 
   OUT_RING(ring,
346
 
            A5XX_RB_CNTL_WIDTH(gmem->bin_w) | A5XX_RB_CNTL_HEIGHT(gmem->bin_h));
347
 
 
348
 
   OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
349
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
350
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
351
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
352
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
353
 
 
354
 
   OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
355
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(x1) | A5XX_RB_RESOLVE_CNTL_1_Y(y1));
356
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(x2) | A5XX_RB_RESOLVE_CNTL_2_Y(y2));
357
 
 
358
 
   update_vsc_pipe(batch);
359
 
 
360
 
   OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
361
 
   OUT_RING(ring, A5XX_VPC_MODE_CNTL_BINNING_PASS);
362
 
 
363
 
   fd5_event_write(batch, ring, UNK_2C, false);
364
 
 
365
 
   OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
366
 
   OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(0) | A5XX_RB_WINDOW_OFFSET_Y(0));
367
 
 
368
 
   /* emit IB to binning drawcmds: */
369
 
   fd5_emit_ib(ring, batch->binning);
370
 
 
371
 
   fd_reset_wfi(batch);
372
 
 
373
 
   fd5_event_write(batch, ring, UNK_2D, false);
374
 
 
375
 
   fd5_event_write(batch, ring, CACHE_FLUSH_TS, true);
376
 
 
377
 
   // TODO CP_COND_WRITE's for all the vsc buffers (check for overflow??)
378
 
 
379
 
   fd_wfi(batch, ring);
380
 
 
381
 
   OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
382
 
   OUT_RING(ring, 0x0);
383
 
}
384
 
 
385
 
/* before first tile */
386
 
static void
387
 
fd5_emit_tile_init(struct fd_batch *batch) assert_dt
388
 
{
389
 
   struct fd_ringbuffer *ring = batch->gmem;
390
 
   struct pipe_framebuffer_state *pfb = &batch->framebuffer;
391
 
 
392
 
   fd5_emit_restore(batch, ring);
393
 
 
394
 
   if (batch->prologue)
395
 
      fd5_emit_ib(ring, batch->prologue);
396
 
 
397
 
   fd5_emit_lrz_flush(batch, ring);
398
 
 
399
 
   OUT_PKT4(ring, REG_A5XX_GRAS_CL_CNTL, 1);
400
 
   OUT_RING(ring, 0x00000080); /* GRAS_CL_CNTL */
401
 
 
402
 
   OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
403
 
   OUT_RING(ring, 0x0);
404
 
 
405
 
   OUT_PKT4(ring, REG_A5XX_PC_POWER_CNTL, 1);
406
 
   OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
407
 
 
408
 
   OUT_PKT4(ring, REG_A5XX_VFD_POWER_CNTL, 1);
409
 
   OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
410
 
 
411
 
   /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
412
 
   fd_wfi(batch, ring);
413
 
   OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
414
 
   OUT_RING(ring, 0x7c13c080); /* RB_CCU_CNTL */
415
 
 
416
 
   emit_zs(ring, pfb->zsbuf, batch->gmem_state);
417
 
   emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, batch->gmem_state);
418
 
 
419
 
   /* Enable stream output for the first pass (likely the binning). */
420
 
   OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
421
 
   OUT_RING(ring, 0);
422
 
 
423
 
   if (use_hw_binning(batch)) {
424
 
      emit_binning_pass(batch);
425
 
 
426
 
      /* Disable stream output after binning, since each VS output should get
427
 
       * streamed out once.
428
 
       */
429
 
      OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
430
 
      OUT_RING(ring, A5XX_VPC_SO_OVERRIDE_SO_DISABLE);
431
 
 
432
 
      fd5_emit_lrz_flush(batch, ring);
433
 
      patch_draws(batch, USE_VISIBILITY);
434
 
   } else {
435
 
      patch_draws(batch, IGNORE_VISIBILITY);
436
 
   }
437
 
 
438
 
   fd5_set_render_mode(batch->ctx, ring, GMEM);
439
 
 
440
 
   /* XXX If we're in gmem mode but not doing HW binning, then after the first
441
 
    * tile we should disable stream output (fd6_gmem.c doesn't do that either).
442
 
    */
443
 
}
444
 
 
445
 
/* before mem2gmem */
446
 
static void
447
 
fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile) assert_dt
448
 
{
449
 
   struct fd_context *ctx = batch->ctx;
450
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
451
 
   struct fd5_context *fd5_ctx = fd5_context(ctx);
452
 
   struct fd_ringbuffer *ring = batch->gmem;
453
 
 
454
 
   uint32_t x1 = tile->xoff;
455
 
   uint32_t y1 = tile->yoff;
456
 
   uint32_t x2 = tile->xoff + tile->bin_w - 1;
457
 
   uint32_t y2 = tile->yoff + tile->bin_h - 1;
458
 
 
459
 
   OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
460
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
461
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
462
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
463
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
464
 
 
465
 
   OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
466
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(x1) | A5XX_RB_RESOLVE_CNTL_1_Y(y1));
467
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(x2) | A5XX_RB_RESOLVE_CNTL_2_Y(y2));
468
 
 
469
 
   if (use_hw_binning(batch)) {
470
 
      const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
471
 
      struct fd_bo *pipe_bo = ctx->vsc_pipe_bo[tile->p];
472
 
 
473
 
      OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
474
 
 
475
 
      OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
476
 
      OUT_RING(ring, 0x0);
477
 
 
478
 
      OUT_PKT7(ring, CP_SET_BIN_DATA5, 5);
479
 
      OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
480
 
                        CP_SET_BIN_DATA5_0_VSC_N(tile->n));
481
 
      OUT_RELOC(ring, pipe_bo, 0, 0, 0);     /* VSC_PIPE[p].DATA_ADDRESS */
482
 
      OUT_RELOC(ring, fd5_ctx->vsc_size_mem, /* VSC_SIZE_ADDRESS + (p * 4) */
483
 
                (tile->p * 4), 0, 0);
484
 
   } else {
485
 
      OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
486
 
      OUT_RING(ring, 0x1);
487
 
   }
488
 
 
489
 
   OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
490
 
   OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(x1) | A5XX_RB_WINDOW_OFFSET_Y(y1));
491
 
}
492
 
 
493
 
/*
494
 
 * transfer from system memory to gmem
495
 
 */
496
 
 
497
 
static void
498
 
emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
499
 
                   struct pipe_surface *psurf, enum a5xx_blit_buf buf)
500
 
{
501
 
   struct fd_ringbuffer *ring = batch->gmem;
502
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
503
 
   struct fd_resource *rsc = fd_resource(psurf->texture);
504
 
   uint32_t stride, size;
505
 
 
506
 
   debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
507
 
 
508
 
   if (buf == BLIT_S)
509
 
      rsc = rsc->stencil;
510
 
 
511
 
   if ((buf == BLIT_ZS) || (buf == BLIT_S)) {
512
 
      // XXX hack import via BLIT_MRT0 instead of BLIT_ZS, since I don't
513
 
      // know otherwise how to go from linear in sysmem to tiled in gmem.
514
 
      // possibly we want to flip this around gmem2mem and keep depth
515
 
      // tiled in sysmem (and fixup sampler state to assume tiled).. this
516
 
      // might be required for doing depth/stencil in bypass mode?
517
 
      enum a5xx_color_fmt format =
518
 
         fd5_pipe2color(fd_gmem_restore_format(rsc->b.b.format));
519
 
 
520
 
      OUT_PKT4(ring, REG_A5XX_RB_MRT_BUF_INFO(0), 5);
521
 
      OUT_RING(ring,
522
 
               A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
523
 
                  A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(rsc->layout.tile_mode) |
524
 
                  A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(WZYX));
525
 
      OUT_RING(ring, A5XX_RB_MRT_PITCH(fd_resource_pitch(rsc, psurf->u.tex.level)));
526
 
      OUT_RING(ring, A5XX_RB_MRT_ARRAY_PITCH(fd_resource_layer_stride(rsc, psurf->u.tex.level)));
527
 
      OUT_RELOC(ring, rsc->bo,
528
 
         fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer),
529
 
         0, 0); /* BASE_LO/HI */
530
 
 
531
 
      buf = BLIT_MRT0;
532
 
   }
533
 
 
534
 
   stride = gmem->bin_w << fdl_cpp_shift(&rsc->layout);
535
 
   size = stride * gmem->bin_h;
536
 
 
537
 
   OUT_PKT4(ring, REG_A5XX_RB_BLIT_FLAG_DST_LO, 4);
538
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
539
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
540
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
541
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
542
 
 
543
 
   OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_3, 5);
544
 
   OUT_RING(ring, 0x00000000); /* RB_RESOLVE_CNTL_3 */
545
 
   OUT_RING(ring, base);       /* RB_BLIT_DST_LO */
546
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_DST_HI */
547
 
   OUT_RING(ring, A5XX_RB_BLIT_DST_PITCH(stride));
548
 
   OUT_RING(ring, A5XX_RB_BLIT_DST_ARRAY_PITCH(size));
549
 
 
550
 
   OUT_PKT4(ring, REG_A5XX_RB_BLIT_CNTL, 1);
551
 
   OUT_RING(ring, A5XX_RB_BLIT_CNTL_BUF(buf));
552
 
 
553
 
   fd5_emit_blit(batch, ring);
554
 
}
555
 
 
556
 
static void
557
 
fd5_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
558
 
{
559
 
   struct fd_ringbuffer *ring = batch->gmem;
560
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
561
 
   struct pipe_framebuffer_state *pfb = &batch->framebuffer;
562
 
 
563
 
   /*
564
 
    * setup mrt and zs with system memory base addresses:
565
 
    */
566
 
 
567
 
   emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL);
568
 
   //   emit_zs(ring, pfb->zsbuf, NULL);
569
 
 
570
 
   OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
571
 
   OUT_RING(ring, A5XX_RB_CNTL_WIDTH(gmem->bin_w) |
572
 
                     A5XX_RB_CNTL_HEIGHT(gmem->bin_h) | A5XX_RB_CNTL_BYPASS);
573
 
 
574
 
   if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
575
 
      unsigned i;
576
 
      for (i = 0; i < pfb->nr_cbufs; i++) {
577
 
         if (!pfb->cbufs[i])
578
 
            continue;
579
 
         if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
580
 
            continue;
581
 
         emit_mem2gmem_surf(batch, gmem->cbuf_base[i], pfb->cbufs[i],
582
 
                            BLIT_MRT0 + i);
583
 
      }
584
 
   }
585
 
 
586
 
   if (fd_gmem_needs_restore(batch, tile,
587
 
                             FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
588
 
      struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
589
 
 
590
 
      if (!rsc->stencil || fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH))
591
 
         emit_mem2gmem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf, BLIT_ZS);
592
 
      if (rsc->stencil && fd_gmem_needs_restore(batch, tile, FD_BUFFER_STENCIL))
593
 
         emit_mem2gmem_surf(batch, gmem->zsbuf_base[1], pfb->zsbuf, BLIT_S);
594
 
   }
595
 
}
596
 
 
597
 
/* before IB to rendering cmds: */
598
 
static void
599
 
fd5_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
600
 
{
601
 
   struct fd_ringbuffer *ring = batch->gmem;
602
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
603
 
   struct pipe_framebuffer_state *pfb = &batch->framebuffer;
604
 
 
605
 
   OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
606
 
   OUT_RING(ring,
607
 
            A5XX_RB_CNTL_WIDTH(gmem->bin_w) | A5XX_RB_CNTL_HEIGHT(gmem->bin_h));
608
 
 
609
 
   emit_zs(ring, pfb->zsbuf, gmem);
610
 
   emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, gmem);
611
 
   emit_msaa(ring, pfb->samples);
612
 
}
613
 
 
614
 
/*
615
 
 * transfer from gmem to system memory (ie. normal RAM)
616
 
 */
617
 
 
618
 
static void
619
 
emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
620
 
                   struct pipe_surface *psurf, enum a5xx_blit_buf buf)
621
 
{
622
 
   struct fd_ringbuffer *ring = batch->gmem;
623
 
   struct fd_resource *rsc = fd_resource(psurf->texture);
624
 
   bool tiled;
625
 
   uint32_t offset, pitch;
626
 
 
627
 
   if (!rsc->valid)
628
 
      return;
629
 
 
630
 
   if (buf == BLIT_S)
631
 
      rsc = rsc->stencil;
632
 
 
633
 
   offset =
634
 
      fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
635
 
   pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
636
 
 
637
 
   debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
638
 
 
639
 
   OUT_PKT4(ring, REG_A5XX_RB_BLIT_FLAG_DST_LO, 4);
640
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
641
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
642
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
643
 
   OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
644
 
 
645
 
   tiled = fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
646
 
 
647
 
   OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_3, 5);
648
 
   OUT_RING(ring, 0x00000004 | /* XXX RB_RESOLVE_CNTL_3 */
649
 
                     COND(tiled, A5XX_RB_RESOLVE_CNTL_3_TILED));
650
 
   OUT_RELOC(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
651
 
   OUT_RING(ring, A5XX_RB_BLIT_DST_PITCH(pitch));
652
 
   OUT_RING(ring, A5XX_RB_BLIT_DST_ARRAY_PITCH(fd_resource_layer_stride(rsc, psurf->u.tex.level)));
653
 
 
654
 
   OUT_PKT4(ring, REG_A5XX_RB_BLIT_CNTL, 1);
655
 
   OUT_RING(ring, A5XX_RB_BLIT_CNTL_BUF(buf));
656
 
 
657
 
   //   bool msaa_resolve = pfb->samples > 1;
658
 
   bool msaa_resolve = false;
659
 
   OUT_PKT4(ring, REG_A5XX_RB_CLEAR_CNTL, 1);
660
 
   OUT_RING(ring, COND(msaa_resolve, A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE));
661
 
 
662
 
   fd5_emit_blit(batch, ring);
663
 
}
664
 
 
665
 
static void
666
 
fd5_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
667
 
{
668
 
   const struct fd_gmem_stateobj *gmem = batch->gmem_state;
669
 
   struct pipe_framebuffer_state *pfb = &batch->framebuffer;
670
 
 
671
 
   if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
672
 
      struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
673
 
 
674
 
      if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH))
675
 
         emit_gmem2mem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf, BLIT_ZS);
676
 
      if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL))
677
 
         emit_gmem2mem_surf(batch, gmem->zsbuf_base[1], pfb->zsbuf, BLIT_S);
678
 
   }
679
 
 
680
 
   if (batch->resolve & FD_BUFFER_COLOR) {
681
 
      unsigned i;
682
 
      for (i = 0; i < pfb->nr_cbufs; i++) {
683
 
         if (!pfb->cbufs[i])
684
 
            continue;
685
 
         if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
686
 
            continue;
687
 
         emit_gmem2mem_surf(batch, gmem->cbuf_base[i], pfb->cbufs[i],
688
 
                            BLIT_MRT0 + i);
689
 
      }
690
 
   }
691
 
}
692
 
 
693
 
static void
694
 
fd5_emit_tile_fini(struct fd_batch *batch) assert_dt
695
 
{
696
 
   struct fd_ringbuffer *ring = batch->gmem;
697
 
 
698
 
   OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
699
 
   OUT_RING(ring, 0x0);
700
 
 
701
 
   fd5_emit_lrz_flush(batch, ring);
702
 
 
703
 
   fd5_cache_flush(batch, ring);
704
 
   fd5_set_render_mode(batch->ctx, ring, BYPASS);
705
 
}
706
 
 
707
 
static void
708
 
fd5_emit_sysmem_prep(struct fd_batch *batch) assert_dt
709
 
{
710
 
   struct fd_ringbuffer *ring = batch->gmem;
711
 
 
712
 
   fd5_emit_restore(batch, ring);
713
 
 
714
 
   fd5_emit_lrz_flush(batch, ring);
715
 
 
716
 
   if (batch->prologue)
717
 
      fd5_emit_ib(ring, batch->prologue);
718
 
 
719
 
   OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
720
 
   OUT_RING(ring, 0x0);
721
 
 
722
 
   fd5_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
723
 
 
724
 
   OUT_PKT4(ring, REG_A5XX_PC_POWER_CNTL, 1);
725
 
   OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
726
 
 
727
 
   OUT_PKT4(ring, REG_A5XX_VFD_POWER_CNTL, 1);
728
 
   OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
729
 
 
730
 
   /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
731
 
   fd_wfi(batch, ring);
732
 
   OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
733
 
   OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
734
 
 
735
 
   OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
736
 
   OUT_RING(ring, A5XX_RB_CNTL_WIDTH(0) | A5XX_RB_CNTL_HEIGHT(0) |
737
 
                     A5XX_RB_CNTL_BYPASS);
738
 
 
739
 
   /* remaining setup below here does not apply to blit/compute: */
740
 
   if (batch->nondraw)
741
 
      return;
742
 
 
743
 
   struct pipe_framebuffer_state *pfb = &batch->framebuffer;
744
 
 
745
 
   OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
746
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(0) |
747
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(0));
748
 
   OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(pfb->width - 1) |
749
 
                     A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(pfb->height - 1));
750
 
 
751
 
   OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
752
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(0) | A5XX_RB_RESOLVE_CNTL_1_Y(0));
753
 
   OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(pfb->width - 1) |
754
 
                     A5XX_RB_RESOLVE_CNTL_2_Y(pfb->height - 1));
755
 
 
756
 
   OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
757
 
   OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(0) | A5XX_RB_WINDOW_OFFSET_Y(0));
758
 
 
759
 
   /* Enable stream output, since there's no binning pass to put it in. */
760
 
   OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
761
 
   OUT_RING(ring, 0);
762
 
 
763
 
   OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
764
 
   OUT_RING(ring, 0x1);
765
 
 
766
 
   patch_draws(batch, IGNORE_VISIBILITY);
767
 
 
768
 
   emit_zs(ring, pfb->zsbuf, NULL);
769
 
   emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL);
770
 
   emit_msaa(ring, pfb->samples);
771
 
}
772
 
 
773
 
static void
774
 
fd5_emit_sysmem_fini(struct fd_batch *batch)
775
 
{
776
 
   struct fd_ringbuffer *ring = batch->gmem;
777
 
 
778
 
   OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
779
 
   OUT_RING(ring, 0x0);
780
 
 
781
 
   fd5_emit_lrz_flush(batch, ring);
782
 
 
783
 
   fd5_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
784
 
   fd5_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
785
 
}
786
 
 
787
 
void
788
 
fd5_gmem_init(struct pipe_context *pctx) disable_thread_safety_analysis
789
 
{
790
 
   struct fd_context *ctx = fd_context(pctx);
791
 
 
792
 
   ctx->emit_tile_init = fd5_emit_tile_init;
793
 
   ctx->emit_tile_prep = fd5_emit_tile_prep;
794
 
   ctx->emit_tile_mem2gmem = fd5_emit_tile_mem2gmem;
795
 
   ctx->emit_tile_renderprep = fd5_emit_tile_renderprep;
796
 
   ctx->emit_tile_gmem2mem = fd5_emit_tile_gmem2mem;
797
 
   ctx->emit_tile_fini = fd5_emit_tile_fini;
798
 
   ctx->emit_sysmem_prep = fd5_emit_sysmem_prep;
799
 
   ctx->emit_sysmem_fini = fd5_emit_sysmem_fini;
800
 
}