~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/gallium/drivers/freedreno/freedreno_draw.c

  • Committer: mmach
  • Date: 2021-04-17 06:19:36 UTC
  • Revision ID: netbit73@gmail.com-20210417061936-peb5vc5ysl5zeoad
1

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
 
3
 *
 
4
 * Permission is hereby granted, free of charge, to any person obtaining a
 
5
 * copy of this software and associated documentation files (the "Software"),
 
6
 * to deal in the Software without restriction, including without limitation
 
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 
8
 * and/or sell copies of the Software, and to permit persons to whom the
 
9
 * Software is furnished to do so, subject to the following conditions:
 
10
 *
 
11
 * The above copyright notice and this permission notice (including the next
 
12
 * paragraph) shall be included in all copies or substantial portions of the
 
13
 * Software.
 
14
 *
 
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 
21
 * SOFTWARE.
 
22
 *
 
23
 * Authors:
 
24
 *    Rob Clark <robclark@freedesktop.org>
 
25
 */
 
26
 
 
27
#include "pipe/p_state.h"
 
28
#include "util/u_draw.h"
 
29
#include "util/u_string.h"
 
30
#include "util/u_memory.h"
 
31
#include "util/u_prim.h"
 
32
#include "util/format/u_format.h"
 
33
#include "util/u_helpers.h"
 
34
 
 
35
#include "freedreno_blitter.h"
 
36
#include "freedreno_draw.h"
 
37
#include "freedreno_context.h"
 
38
#include "freedreno_fence.h"
 
39
#include "freedreno_state.h"
 
40
#include "freedreno_resource.h"
 
41
#include "freedreno_query_acc.h"
 
42
#include "freedreno_query_hw.h"
 
43
#include "freedreno_util.h"
 
44
 
 
45
static void
 
46
resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
 
47
{
 
48
        if (!prsc)
 
49
                return;
 
50
        fd_batch_resource_read(batch, fd_resource(prsc));
 
51
}
 
52
 
 
53
static void
 
54
resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
 
55
{
 
56
        if (!prsc)
 
57
                return;
 
58
        fd_batch_resource_write(batch, fd_resource(prsc));
 
59
}
 
60
 
 
61
static void
 
62
batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
 
63
                    const struct pipe_draw_indirect_info *indirect)
 
64
{
 
65
        struct fd_context *ctx = batch->ctx;
 
66
        struct pipe_framebuffer_state *pfb = &batch->framebuffer;
 
67
        unsigned buffers = 0, restore_buffers = 0;
 
68
 
 
69
        /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
 
70
         * query_buf may not be created yet.
 
71
         */
 
72
        fd_batch_set_stage(batch, FD_STAGE_DRAW);
 
73
 
 
74
        /*
 
75
         * Figure out the buffers/features we need:
 
76
         */
 
77
 
 
78
        fd_screen_lock(ctx->screen);
 
79
 
 
80
        if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
 
81
                if (fd_depth_enabled(ctx)) {
 
82
                        if (fd_resource(pfb->zsbuf->texture)->valid) {
 
83
                                restore_buffers |= FD_BUFFER_DEPTH;
 
84
                        } else {
 
85
                                batch->invalidated |= FD_BUFFER_DEPTH;
 
86
                        }
 
87
                        batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
 
88
                        if (fd_depth_write_enabled(ctx)) {
 
89
                                buffers |= FD_BUFFER_DEPTH;
 
90
                                resource_written(batch, pfb->zsbuf->texture);
 
91
                        } else {
 
92
                                resource_read(batch, pfb->zsbuf->texture);
 
93
                        }
 
94
                }
 
95
 
 
96
                if (fd_stencil_enabled(ctx)) {
 
97
                        if (fd_resource(pfb->zsbuf->texture)->valid) {
 
98
                                restore_buffers |= FD_BUFFER_STENCIL;
 
99
                        } else {
 
100
                                batch->invalidated |= FD_BUFFER_STENCIL;
 
101
                        }
 
102
                        batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
 
103
                        buffers |= FD_BUFFER_STENCIL;
 
104
                        resource_written(batch, pfb->zsbuf->texture);
 
105
                }
 
106
        }
 
107
 
 
108
        if (fd_logicop_enabled(ctx))
 
109
                batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
 
110
 
 
111
        for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
 
112
                struct pipe_resource *surf;
 
113
 
 
114
                if (!pfb->cbufs[i])
 
115
                        continue;
 
116
 
 
117
                surf = pfb->cbufs[i]->texture;
 
118
 
 
119
                if (fd_resource(surf)->valid) {
 
120
                        restore_buffers |= PIPE_CLEAR_COLOR0 << i;
 
121
                } else {
 
122
                        batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
 
123
                }
 
124
 
 
125
                buffers |= PIPE_CLEAR_COLOR0 << i;
 
126
 
 
127
                if (fd_blend_enabled(ctx, i))
 
128
                        batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
 
129
 
 
130
                if (ctx->dirty & FD_DIRTY_FRAMEBUFFER)
 
131
                        resource_written(batch, pfb->cbufs[i]->texture);
 
132
        }
 
133
 
 
134
        /* Mark SSBOs */
 
135
        if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
 
136
                const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_FRAGMENT];
 
137
 
 
138
                foreach_bit (i, so->enabled_mask & so->writable_mask)
 
139
                        resource_written(batch, so->sb[i].buffer);
 
140
 
 
141
                foreach_bit (i, so->enabled_mask & ~so->writable_mask)
 
142
                        resource_read(batch, so->sb[i].buffer);
 
143
        }
 
144
 
 
145
        if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
 
146
                foreach_bit (i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
 
147
                        struct pipe_image_view *img =
 
148
                                        &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
 
149
                        if (img->access & PIPE_IMAGE_ACCESS_WRITE)
 
150
                                resource_written(batch, img->resource);
 
151
                        else
 
152
                                resource_read(batch, img->resource);
 
153
                }
 
154
        }
 
155
 
 
156
        if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
 
157
                foreach_bit (i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
 
158
                        resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
 
159
        }
 
160
 
 
161
        if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
 
162
                foreach_bit (i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
 
163
                        resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
 
164
        }
 
165
 
 
166
        /* Mark VBOs as being read */
 
167
        if (ctx->dirty & FD_DIRTY_VTXBUF) {
 
168
                foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
 
169
                        assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
 
170
                        resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
 
171
                }
 
172
        }
 
173
 
 
174
        /* Mark index buffer as being read */
 
175
        if (info->index_size)
 
176
                resource_read(batch, info->index.resource);
 
177
 
 
178
        /* Mark indirect draw buffer as being read */
 
179
        if (indirect && indirect->buffer)
 
180
                resource_read(batch, indirect->buffer);
 
181
 
 
182
        /* Mark textures as being read */
 
183
        if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
 
184
                foreach_bit (i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
 
185
                        resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
 
186
        }
 
187
 
 
188
        if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
 
189
                foreach_bit (i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
 
190
                        resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
 
191
        }
 
192
 
 
193
        /* Mark streamout buffers as being written.. */
 
194
        if (ctx->dirty & FD_DIRTY_STREAMOUT) {
 
195
                for (unsigned i = 0; i < ctx->streamout.num_targets; i++)
 
196
                        if (ctx->streamout.targets[i])
 
197
                                resource_written(batch, ctx->streamout.targets[i]->buffer);
 
198
        }
 
199
 
 
200
        resource_written(batch, batch->query_buf);
 
201
 
 
202
        list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
 
203
                resource_written(batch, aq->prsc);
 
204
 
 
205
        fd_screen_unlock(ctx->screen);
 
206
 
 
207
        /* any buffers that haven't been cleared yet, we need to restore: */
 
208
        batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
 
209
        /* and any buffers used, need to be resolved: */
 
210
        batch->resolve |= buffers;
 
211
}
 
212
 
 
213
static void
 
214
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
 
215
            const struct pipe_draw_indirect_info *indirect,
 
216
            const struct pipe_draw_start_count *draws,
 
217
            unsigned num_draws)
 
218
{
 
219
        if (num_draws > 1) {
 
220
           struct pipe_draw_info tmp_info = *info;
 
221
 
 
222
           for (unsigned i = 0; i < num_draws; i++) {
 
223
              fd_draw_vbo(pctx, &tmp_info, indirect, &draws[i], 1);
 
224
              if (tmp_info.increment_draw_id)
 
225
                 tmp_info.drawid++;
 
226
           }
 
227
           return;
 
228
        }
 
229
 
 
230
        if (!indirect && (!draws[0].count || !info->instance_count))
 
231
           return;
 
232
 
 
233
        struct fd_context *ctx = fd_context(pctx);
 
234
 
 
235
        /* for debugging problems with indirect draw, it is convenient
 
236
         * to be able to emulate it, to determine if game is feeding us
 
237
         * bogus data:
 
238
         */
 
239
        if (indirect && indirect->buffer && (fd_mesa_debug & FD_DBG_NOINDR)) {
 
240
                util_draw_indirect(pctx, info, indirect);
 
241
                return;
 
242
        }
 
243
 
 
244
        if (info->mode != PIPE_PRIM_MAX &&
 
245
            !indirect &&
 
246
            !info->primitive_restart &&
 
247
            !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
 
248
                return;
 
249
 
 
250
        /* TODO: push down the region versions into the tiles */
 
251
        if (!fd_render_condition_check(pctx))
 
252
                return;
 
253
 
 
254
        /* emulate unsupported primitives: */
 
255
        if (!fd_supported_prim(ctx, info->mode)) {
 
256
                if (ctx->streamout.num_targets > 0)
 
257
                        mesa_loge("stream-out with emulated prims");
 
258
                util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
 
259
                util_primconvert_draw_vbo(ctx->primconvert, info, &draws[0]);
 
260
                return;
 
261
        }
 
262
 
 
263
        /* Upload a user index buffer. */
 
264
        struct pipe_resource *indexbuf = NULL;
 
265
        unsigned index_offset = 0;
 
266
        struct pipe_draw_info new_info;
 
267
        if (info->index_size) {
 
268
                if (info->has_user_indices) {
 
269
                        if (!util_upload_index_buffer(pctx, info, &draws[0],
 
270
                                                      &indexbuf, &index_offset, 4))
 
271
                                return;
 
272
                        new_info = *info;
 
273
                        new_info.index.resource = indexbuf;
 
274
                        new_info.has_user_indices = false;
 
275
                        info = &new_info;
 
276
                } else {
 
277
                        indexbuf = info->index.resource;
 
278
                }
 
279
        }
 
280
 
 
281
        struct fd_batch *batch = fd_context_batch(ctx);
 
282
 
 
283
        if (ctx->in_discard_blit) {
 
284
                fd_batch_reset(batch);
 
285
                fd_context_all_dirty(ctx);
 
286
        }
 
287
 
 
288
        batch_draw_tracking(batch, info, indirect);
 
289
 
 
290
        while (unlikely(!fd_batch_lock_submit(batch))) {
 
291
                /* The current batch was flushed in batch_draw_tracking()
 
292
                 * so start anew.  We know this won't happen a second time
 
293
                 * since we are dealing with a fresh batch:
 
294
                 */
 
295
                fd_batch_reference(&batch, NULL);
 
296
                batch = fd_context_batch(ctx);
 
297
                batch_draw_tracking(batch, info, indirect);
 
298
                assert(ctx->batch == batch);
 
299
        }
 
300
 
 
301
        batch->blit = ctx->in_discard_blit;
 
302
        batch->back_blit = ctx->in_shadow;
 
303
        batch->num_draws++;
 
304
 
 
305
        /* Counting prims in sw doesn't work for GS and tesselation. For older
 
306
         * gens we don't have those stages and don't have the hw counters enabled,
 
307
         * so keep the count accurate for non-patch geometry.
 
308
         */
 
309
        unsigned prims;
 
310
        if ((info->mode != PIPE_PRIM_PATCHES) &&
 
311
                        (info->mode != PIPE_PRIM_MAX))
 
312
                prims = u_reduced_prims_for_vertices(info->mode, draws[0].count);
 
313
        else
 
314
                prims = 0;
 
315
 
 
316
        ctx->stats.draw_calls++;
 
317
 
 
318
        /* TODO prims_emitted should be clipped when the stream-out buffer is
 
319
         * not large enough.  See max_tf_vtx().. probably need to move that
 
320
         * into common code.  Although a bit more annoying since a2xx doesn't
 
321
         * use ir3 so no common way to get at the pipe_stream_output_info
 
322
         * which is needed for this calculation.
 
323
         */
 
324
        if (ctx->streamout.num_targets > 0)
 
325
                ctx->stats.prims_emitted += prims;
 
326
        ctx->stats.prims_generated += prims;
 
327
 
 
328
        /* Clearing last_fence must come after the batch dependency tracking
 
329
         * (resource_read()/resource_written()), as that can trigger a flush,
 
330
         * re-populating last_fence
 
331
         */
 
332
        fd_fence_ref(&ctx->last_fence, NULL);
 
333
 
 
334
        struct pipe_framebuffer_state *pfb = &batch->framebuffer;
 
335
        DBG("%p: %ux%u num_draws=%u (%s/%s)", batch,
 
336
                pfb->width, pfb->height, batch->num_draws,
 
337
                util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
 
338
                util_format_short_name(pipe_surface_format(pfb->zsbuf)));
 
339
 
 
340
        if (ctx->draw_vbo(ctx, info, indirect, &draws[0], index_offset))
 
341
                batch->needs_flush = true;
 
342
 
 
343
        batch->num_vertices += draws[0].count * info->instance_count;
 
344
 
 
345
        for (unsigned i = 0; i < ctx->streamout.num_targets; i++)
 
346
                ctx->streamout.offsets[i] += draws[0].count;
 
347
 
 
348
        if (fd_mesa_debug & FD_DBG_DDRAW)
 
349
                fd_context_all_dirty(ctx);
 
350
 
 
351
        fd_batch_unlock_submit(batch);
 
352
        fd_batch_check_size(batch);
 
353
        fd_batch_reference(&batch, NULL);
 
354
 
 
355
        if (info == &new_info)
 
356
                pipe_resource_reference(&indexbuf, NULL);
 
357
}
 
358
 
 
359
static void
 
360
batch_clear_tracking(struct fd_batch *batch, unsigned buffers)
 
361
{
 
362
        struct fd_context *ctx = batch->ctx;
 
363
        struct pipe_framebuffer_state *pfb = &batch->framebuffer;
 
364
        unsigned cleared_buffers;
 
365
 
 
366
        /* pctx->clear() is only for full-surface clears, so scissor is
 
367
         * equivalent to having GL_SCISSOR_TEST disabled:
 
368
         */
 
369
        batch->max_scissor.minx = 0;
 
370
        batch->max_scissor.miny = 0;
 
371
        batch->max_scissor.maxx = pfb->width;
 
372
        batch->max_scissor.maxy = pfb->height;
 
373
 
 
374
        /* for bookkeeping about which buffers have been cleared (and thus
 
375
         * can fully or partially skip mem2gmem) we need to ignore buffers
 
376
         * that have already had a draw, in case apps do silly things like
 
377
         * clear after draw (ie. if you only clear the color buffer, but
 
378
         * something like alpha-test causes side effects from the draw in
 
379
         * the depth buffer, etc)
 
380
         */
 
381
        cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
 
382
        batch->cleared |= buffers;
 
383
        batch->invalidated |= cleared_buffers;
 
384
 
 
385
        batch->resolve |= buffers;
 
386
        batch->needs_flush = true;
 
387
 
 
388
        fd_screen_lock(ctx->screen);
 
389
 
 
390
        if (buffers & PIPE_CLEAR_COLOR)
 
391
                for (unsigned i = 0; i < pfb->nr_cbufs; i++)
 
392
                        if (buffers & (PIPE_CLEAR_COLOR0 << i))
 
393
                                resource_written(batch, pfb->cbufs[i]->texture);
 
394
 
 
395
        if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
 
396
                resource_written(batch, pfb->zsbuf->texture);
 
397
                batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
 
398
        }
 
399
 
 
400
        resource_written(batch, batch->query_buf);
 
401
 
 
402
        list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
 
403
                resource_written(batch, aq->prsc);
 
404
 
 
405
        fd_screen_unlock(ctx->screen);
 
406
}
 
407
 
 
408
static void
 
409
fd_clear(struct pipe_context *pctx, unsigned buffers,
 
410
                const struct pipe_scissor_state *scissor_state,
 
411
                const union pipe_color_union *color, double depth,
 
412
                unsigned stencil)
 
413
{
 
414
        struct fd_context *ctx = fd_context(pctx);
 
415
 
 
416
        /* TODO: push down the region versions into the tiles */
 
417
        if (!fd_render_condition_check(pctx))
 
418
                return;
 
419
 
 
420
        struct fd_batch *batch = fd_context_batch(ctx);
 
421
 
 
422
        if (ctx->in_discard_blit) {
 
423
                fd_batch_reset(batch);
 
424
                fd_context_all_dirty(ctx);
 
425
        }
 
426
 
 
427
        batch_clear_tracking(batch, buffers);
 
428
 
 
429
        while (unlikely(!fd_batch_lock_submit(batch))) {
 
430
                /* The current batch was flushed in batch_clear_tracking()
 
431
                 * so start anew.  We know this won't happen a second time
 
432
                 * since we are dealing with a fresh batch:
 
433
                 */
 
434
                fd_batch_reference(&batch, NULL);
 
435
                batch = fd_context_batch(ctx);
 
436
                batch_clear_tracking(batch, buffers);
 
437
                assert(ctx->batch == batch);
 
438
        }
 
439
 
 
440
        /* Clearing last_fence must come after the batch dependency tracking
 
441
         * (resource_read()/resource_written()), as that can trigger a flush,
 
442
         * re-populating last_fence
 
443
         */
 
444
        fd_fence_ref(&ctx->last_fence, NULL);
 
445
 
 
446
        struct pipe_framebuffer_state *pfb = &batch->framebuffer;
 
447
        DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
 
448
                pfb->width, pfb->height, depth, stencil,
 
449
                util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
 
450
                util_format_short_name(pipe_surface_format(pfb->zsbuf)));
 
451
 
 
452
        /* if per-gen backend doesn't implement ctx->clear() generic
 
453
         * blitter clear:
 
454
         */
 
455
        bool fallback = true;
 
456
 
 
457
        if (ctx->clear) {
 
458
                fd_batch_set_stage(batch, FD_STAGE_CLEAR);
 
459
 
 
460
                if (ctx->clear(ctx, buffers, color, depth, stencil)) {
 
461
                        if (fd_mesa_debug & FD_DBG_DCLEAR)
 
462
                                fd_context_all_dirty(ctx);
 
463
 
 
464
                        fallback = false;
 
465
                }
 
466
        }
 
467
 
 
468
        fd_batch_unlock_submit(batch);
 
469
        fd_batch_check_size(batch);
 
470
 
 
471
        if (fallback) {
 
472
                fd_blitter_clear(pctx, buffers, color, depth, stencil);
 
473
        }
 
474
 
 
475
        fd_batch_reference(&batch, NULL);
 
476
}
 
477
 
 
478
static void
 
479
fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
 
480
                const union pipe_color_union *color,
 
481
                unsigned x, unsigned y, unsigned w, unsigned h,
 
482
                bool render_condition_enabled)
 
483
{
 
484
        DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
 
485
}
 
486
 
 
487
static void
 
488
fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
 
489
                unsigned buffers, double depth, unsigned stencil,
 
490
                unsigned x, unsigned y, unsigned w, unsigned h,
 
491
                bool render_condition_enabled)
 
492
{
 
493
        DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
 
494
                        buffers, depth, stencil, x, y, w, h);
 
495
}
 
496
 
 
497
static void
 
498
fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
 
499
{
 
500
        struct fd_context *ctx = fd_context(pctx);
 
501
        const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
 
502
        struct fd_batch *batch, *save_batch = NULL;
 
503
 
 
504
        batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
 
505
        fd_batch_reference(&save_batch, ctx->batch);
 
506
        fd_batch_reference(&ctx->batch, batch);
 
507
        fd_context_all_dirty(ctx);
 
508
 
 
509
        fd_screen_lock(ctx->screen);
 
510
 
 
511
        /* Mark SSBOs */
 
512
        foreach_bit (i, so->enabled_mask & so->writable_mask)
 
513
                resource_written(batch, so->sb[i].buffer);
 
514
 
 
515
        foreach_bit (i, so->enabled_mask & ~so->writable_mask)
 
516
                resource_read(batch, so->sb[i].buffer);
 
517
 
 
518
        foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
 
519
                struct pipe_image_view *img =
 
520
                        &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
 
521
                if (img->access & PIPE_IMAGE_ACCESS_WRITE)
 
522
                        resource_written(batch, img->resource);
 
523
                else
 
524
                        resource_read(batch, img->resource);
 
525
        }
 
526
 
 
527
        /* UBO's are read */
 
528
        foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
 
529
                resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
 
530
 
 
531
        /* Mark textures as being read */
 
532
        foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
 
533
                resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
 
534
 
 
535
        /* For global buffers, we don't really know if read or written, so assume
 
536
         * the worst:
 
537
         */
 
538
        foreach_bit(i, ctx->global_bindings.enabled_mask)
 
539
                resource_written(batch, ctx->global_bindings.buf[i]);
 
540
 
 
541
        if (info->indirect)
 
542
                resource_read(batch, info->indirect);
 
543
 
 
544
        fd_screen_unlock(ctx->screen);
 
545
 
 
546
        batch->needs_flush = true;
 
547
        ctx->launch_grid(ctx, info);
 
548
 
 
549
        fd_batch_flush(batch);
 
550
 
 
551
        fd_batch_reference(&ctx->batch, save_batch);
 
552
        fd_context_all_dirty(ctx);
 
553
        fd_batch_reference(&save_batch, NULL);
 
554
        fd_batch_reference(&batch, NULL);
 
555
}
 
556
 
 
557
void
 
558
fd_draw_init(struct pipe_context *pctx)
 
559
{
 
560
        pctx->draw_vbo = fd_draw_vbo;
 
561
        pctx->clear = fd_clear;
 
562
        pctx->clear_render_target = fd_clear_render_target;
 
563
        pctx->clear_depth_stencil = fd_clear_depth_stencil;
 
564
 
 
565
        if (has_compute(fd_screen(pctx->screen))) {
 
566
                pctx->launch_grid = fd_launch_grid;
 
567
        }
 
568
}