2
* Copyright 2013 Advanced Micro Devices, Inc.
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
* Authors: Marek Olšák <maraeo@gmail.com>
27
#include "r600_pipe_common.h"
29
#include "evergreen_compute.h"
30
#include "tgsi/tgsi_parse.h"
31
#include "util/list.h"
32
#include "util/u_draw_quad.h"
33
#include "util/u_memory.h"
34
#include "util/format/u_format_s3tc.h"
35
#include "util/u_upload_mgr.h"
36
#include "util/os_time.h"
37
#include "vl/vl_decoder.h"
38
#include "vl/vl_video_buffer.h"
39
#include "radeon_video.h"
41
#include <sys/utsname.h>
45
#include <llvm-c/TargetMachine.h>
48
struct r600_multi_fence {
49
struct pipe_reference reference;
50
struct pipe_fence_handle *gfx;
51
struct pipe_fence_handle *sdma;
53
/* If the context wasn't flushed at fence creation, this is non-NULL. */
55
struct r600_common_context *ctx;
67
* \param event EVENT_TYPE_*
68
* \param event_flags Optional cache flush flags (TC)
69
* \param data_sel 1 = fence, 3 = timestamp
71
* \param va GPU address
72
* \param old_value Previous fence value (for a bug workaround)
73
* \param new_value Fence value to write for this event.
75
void r600_gfx_write_event_eop(struct r600_common_context *ctx,
76
unsigned event, unsigned event_flags,
78
struct r600_resource *buf, uint64_t va,
79
uint32_t new_fence, unsigned query_type)
81
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
82
unsigned op = EVENT_TYPE(event) |
85
unsigned sel = EOP_DATA_SEL(data_sel);
87
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
90
radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
91
radeon_emit(cs, new_fence); /* immediate data */
92
radeon_emit(cs, 0); /* unused */
95
r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE |
99
unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen)
103
if (!screen->info.r600_has_virtual_memory)
109
void r600_gfx_wait_fence(struct r600_common_context *ctx,
110
struct r600_resource *buf,
111
uint64_t va, uint32_t ref, uint32_t mask)
113
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
115
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
116
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
118
radeon_emit(cs, va >> 32);
119
radeon_emit(cs, ref); /* reference value */
120
radeon_emit(cs, mask); /* mask */
121
radeon_emit(cs, 4); /* poll interval */
124
r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ |
128
void r600_draw_rectangle(struct blitter_context *blitter,
129
void *vertex_elements_cso,
130
blitter_get_vs_func get_vs,
131
int x1, int y1, int x2, int y2,
132
float depth, unsigned num_instances,
133
enum blitter_attrib_type type,
134
const union blitter_attrib *attrib)
136
struct r600_common_context *rctx =
137
(struct r600_common_context*)util_blitter_get_pipe(blitter);
138
struct pipe_viewport_state viewport;
139
struct pipe_resource *buf = NULL;
143
rctx->b.bind_vertex_elements_state(&rctx->b, vertex_elements_cso);
144
rctx->b.bind_vs_state(&rctx->b, get_vs(blitter));
146
/* Some operations (like color resolve on r6xx) don't work
147
* with the conventional primitive types.
148
* One that works is PT_RECTLIST, which we use here. */
151
viewport.scale[0] = 1.0f;
152
viewport.scale[1] = 1.0f;
153
viewport.scale[2] = 1.0f;
154
viewport.translate[0] = 0.0f;
155
viewport.translate[1] = 0.0f;
156
viewport.translate[2] = 0.0f;
157
rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport);
159
/* Upload vertices. The hw rectangle has only 3 vertices,
160
* The 4th one is derived from the first 3.
161
* The vertex specification should match u_blitter's vertex element state. */
162
u_upload_alloc(rctx->b.stream_uploader, 0, sizeof(float) * 24,
163
rctx->screen->info.tcc_cache_line_size,
164
&offset, &buf, (void**)&vb);
184
case UTIL_BLITTER_ATTRIB_COLOR:
185
memcpy(vb+4, attrib->color, sizeof(float)*4);
186
memcpy(vb+12, attrib->color, sizeof(float)*4);
187
memcpy(vb+20, attrib->color, sizeof(float)*4);
189
case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
190
case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
191
vb[6] = vb[14] = vb[22] = attrib->texcoord.z;
192
vb[7] = vb[15] = vb[23] = attrib->texcoord.w;
194
vb[4] = attrib->texcoord.x1;
195
vb[5] = attrib->texcoord.y1;
196
vb[12] = attrib->texcoord.x1;
197
vb[13] = attrib->texcoord.y2;
198
vb[20] = attrib->texcoord.x2;
199
vb[21] = attrib->texcoord.y1;
201
default:; /* Nothing to do. */
205
struct pipe_vertex_buffer vbuffer = {};
206
vbuffer.buffer.resource = buf;
207
vbuffer.stride = 2 * 4 * sizeof(float); /* vertex size */
208
vbuffer.buffer_offset = offset;
210
rctx->b.set_vertex_buffers(&rctx->b, blitter->vb_slot, 1, 0, false, &vbuffer);
211
util_draw_arrays_instanced(&rctx->b, R600_PRIM_RECTANGLE_LIST, 0, 3,
213
pipe_resource_reference(&buf, NULL);
216
static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
218
struct radeon_cmdbuf *cs = &rctx->dma.cs;
220
if (rctx->chip_class >= EVERGREEN)
221
radeon_emit(cs, 0xf0000000); /* NOP */
223
/* TODO: R600-R700 should use the FENCE packet.
224
* CS checker support is required. */
228
void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
229
struct r600_resource *dst, struct r600_resource *src)
231
uint64_t vram = (uint64_t)ctx->dma.cs.used_vram_kb * 1024;
232
uint64_t gtt = (uint64_t)ctx->dma.cs.used_gart_kb * 1024;
235
vram += dst->vram_usage;
236
gtt += dst->gart_usage;
239
vram += src->vram_usage;
240
gtt += src->gart_usage;
243
/* Flush the GFX IB if DMA depends on it. */
244
if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
246
ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, dst->buf,
247
RADEON_USAGE_READWRITE)) ||
249
ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, src->buf,
250
RADEON_USAGE_WRITE))))
251
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
253
/* Flush if there's not enough space, or if the memory usage per IB
256
* IBs using too little memory are limited by the IB submission overhead.
257
* IBs using too much memory are limited by the kernel/TTM overhead.
258
* Too long IBs create CPU-GPU pipeline bubbles and add latency.
260
* This heuristic makes sure that DMA requests are executed
261
* very soon after the call is made and lowers memory usage.
262
* It improves texture upload performance by keeping the DMA
263
* engine busy while uploads are being submitted.
265
num_dw++; /* for emit_wait_idle below */
266
if (!ctx->ws->cs_check_space(&ctx->dma.cs, num_dw) ||
267
ctx->dma.cs.used_vram_kb + ctx->dma.cs.used_gart_kb > 64 * 1024 ||
268
!radeon_cs_memory_below_limit(ctx->screen, &ctx->dma.cs, vram, gtt)) {
269
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
270
assert((num_dw + ctx->dma.cs.current.cdw) <= ctx->dma.cs.current.max_dw);
273
/* Wait for idle if either buffer has been used in the IB before to
274
* prevent read-after-write hazards.
277
ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, dst->buf,
278
RADEON_USAGE_READWRITE)) ||
280
ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, src->buf,
281
RADEON_USAGE_WRITE)))
282
r600_dma_emit_wait_idle(ctx);
284
/* If GPUVM is not supported, the CS checker needs 2 entries
285
* in the buffer list per packet, which has to be done manually.
287
if (ctx->screen->info.r600_has_virtual_memory) {
289
radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
292
radeon_add_to_buffer_list(ctx, &ctx->dma, src,
296
/* this function is called before all DMA calls, so increment this. */
297
ctx->num_dma_calls++;
300
void r600_preflush_suspend_features(struct r600_common_context *ctx)
302
/* suspend queries */
303
if (!list_is_empty(&ctx->active_queries))
304
r600_suspend_queries(ctx);
306
ctx->streamout.suspended = false;
307
if (ctx->streamout.begin_emitted) {
308
r600_emit_streamout_end(ctx);
309
ctx->streamout.suspended = true;
313
void r600_postflush_resume_features(struct r600_common_context *ctx)
315
if (ctx->streamout.suspended) {
316
ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
317
r600_streamout_buffers_dirty(ctx);
321
if (!list_is_empty(&ctx->active_queries))
322
r600_resume_queries(ctx);
325
static void r600_fence_server_sync(struct pipe_context *ctx,
326
struct pipe_fence_handle *fence)
328
/* radeon synchronizes all rings by default and will not implement
333
static void r600_flush_from_st(struct pipe_context *ctx,
334
struct pipe_fence_handle **fence,
337
struct pipe_screen *screen = ctx->screen;
338
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
339
struct radeon_winsys *ws = rctx->ws;
340
struct pipe_fence_handle *gfx_fence = NULL;
341
struct pipe_fence_handle *sdma_fence = NULL;
342
bool deferred_fence = false;
343
unsigned rflags = PIPE_FLUSH_ASYNC;
345
if (flags & PIPE_FLUSH_END_OF_FRAME)
346
rflags |= PIPE_FLUSH_END_OF_FRAME;
348
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
349
if (rctx->dma.cs.priv)
350
rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
352
if (!radeon_emitted(&rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
354
ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
355
if (!(flags & PIPE_FLUSH_DEFERRED))
356
ws->cs_sync_flush(&rctx->gfx.cs);
358
/* Instead of flushing, create a deferred fence. Constraints:
359
* - the gallium frontend must allow a deferred flush.
360
* - the gallium frontend must request a fence.
361
* Thread safety in fence_finish must be ensured by the gallium frontend.
363
if (flags & PIPE_FLUSH_DEFERRED && fence) {
364
gfx_fence = rctx->ws->cs_get_next_fence(&rctx->gfx.cs);
365
deferred_fence = true;
367
rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
371
/* Both engines can signal out of order, so we need to keep both fences. */
373
struct r600_multi_fence *multi_fence =
374
CALLOC_STRUCT(r600_multi_fence);
376
ws->fence_reference(&sdma_fence, NULL);
377
ws->fence_reference(&gfx_fence, NULL);
381
multi_fence->reference.count = 1;
382
/* If both fences are NULL, fence_finish will always return true. */
383
multi_fence->gfx = gfx_fence;
384
multi_fence->sdma = sdma_fence;
386
if (deferred_fence) {
387
multi_fence->gfx_unflushed.ctx = rctx;
388
multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
391
screen->fence_reference(screen, fence, NULL);
392
*fence = (struct pipe_fence_handle*)multi_fence;
395
if (!(flags & PIPE_FLUSH_DEFERRED)) {
396
if (rctx->dma.cs.priv)
397
ws->cs_sync_flush(&rctx->dma.cs);
398
ws->cs_sync_flush(&rctx->gfx.cs);
402
static void r600_flush_dma_ring(void *ctx, unsigned flags,
403
struct pipe_fence_handle **fence)
405
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
406
struct radeon_cmdbuf *cs = &rctx->dma.cs;
407
struct radeon_saved_cs saved;
409
(rctx->screen->debug_flags & DBG_CHECK_VM) &&
410
rctx->check_vm_faults;
412
if (!radeon_emitted(cs, 0)) {
414
rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
419
radeon_save_cs(rctx->ws, cs, &saved, true);
421
rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
423
rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
426
/* Use conservative timeout 800ms, after which we won't wait any
427
* longer and assume the GPU is hung.
429
rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
431
rctx->check_vm_faults(rctx, &saved, RING_DMA);
432
radeon_clear_saved_cs(&saved);
437
* Store a linearized copy of all chunks of \p cs together with the buffer
440
void radeon_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
441
struct radeon_saved_cs *saved, bool get_buffer_list)
446
/* Save the IB chunks. */
447
saved->num_dw = cs->prev_dw + cs->current.cdw;
448
saved->ib = MALLOC(4 * saved->num_dw);
453
for (i = 0; i < cs->num_prev; ++i) {
454
memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
455
buf += cs->prev[i].cdw;
457
memcpy(buf, cs->current.buf, cs->current.cdw * 4);
459
if (!get_buffer_list)
462
/* Save the buffer list. */
463
saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
464
saved->bo_list = CALLOC(saved->bo_count,
465
sizeof(saved->bo_list[0]));
466
if (!saved->bo_list) {
470
ws->cs_get_buffer_list(cs, saved->bo_list);
475
fprintf(stderr, "%s: out of memory\n", __func__);
476
memset(saved, 0, sizeof(*saved));
479
void radeon_clear_saved_cs(struct radeon_saved_cs *saved)
482
FREE(saved->bo_list);
484
memset(saved, 0, sizeof(*saved));
487
static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
489
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
491
return rctx->ws->ctx_query_reset_status(rctx->ctx, false, NULL);
494
static void r600_set_debug_callback(struct pipe_context *ctx,
495
const struct util_debug_callback *cb)
497
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
502
memset(&rctx->debug, 0, sizeof(rctx->debug));
505
static void r600_set_device_reset_callback(struct pipe_context *ctx,
506
const struct pipe_device_reset_callback *cb)
508
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
511
rctx->device_reset_callback = *cb;
513
memset(&rctx->device_reset_callback, 0,
514
sizeof(rctx->device_reset_callback));
517
bool r600_check_device_reset(struct r600_common_context *rctx)
519
enum pipe_reset_status status;
521
if (!rctx->device_reset_callback.reset)
524
if (!rctx->b.get_device_reset_status)
527
status = rctx->b.get_device_reset_status(&rctx->b);
528
if (status == PIPE_NO_RESET)
531
rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
535
static void r600_dma_clear_buffer_fallback(struct pipe_context *ctx,
536
struct pipe_resource *dst,
537
uint64_t offset, uint64_t size,
540
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
542
rctx->clear_buffer(ctx, dst, offset, size, value, R600_COHERENCY_NONE);
545
static bool r600_resource_commit(struct pipe_context *pctx,
546
struct pipe_resource *resource,
547
unsigned level, struct pipe_box *box,
550
struct r600_common_context *ctx = (struct r600_common_context *)pctx;
551
struct r600_resource *res = r600_resource(resource);
554
* Since buffer commitment changes cannot be pipelined, we need to
555
* (a) flush any pending commands that refer to the buffer we're about
557
* (b) wait for threaded submit to finish, including those that were
558
* triggered by some other, earlier operation.
560
if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
561
ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,
562
res->buf, RADEON_USAGE_READWRITE)) {
563
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
565
if (radeon_emitted(&ctx->dma.cs, 0) &&
566
ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,
567
res->buf, RADEON_USAGE_READWRITE)) {
568
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
571
ctx->ws->cs_sync_flush(&ctx->dma.cs);
572
ctx->ws->cs_sync_flush(&ctx->gfx.cs);
574
assert(resource->target == PIPE_BUFFER);
576
return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
579
bool r600_common_context_init(struct r600_common_context *rctx,
580
struct r600_common_screen *rscreen,
581
unsigned context_flags)
583
slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
584
slab_create_child(&rctx->pool_transfers_unsync, &rscreen->pool_transfers);
586
rctx->screen = rscreen;
587
rctx->ws = rscreen->ws;
588
rctx->family = rscreen->family;
589
rctx->chip_class = rscreen->chip_class;
591
rctx->b.invalidate_resource = r600_invalidate_resource;
592
rctx->b.resource_commit = r600_resource_commit;
593
rctx->b.buffer_map = r600_buffer_transfer_map;
594
rctx->b.texture_map = r600_texture_transfer_map;
595
rctx->b.transfer_flush_region = r600_buffer_flush_region;
596
rctx->b.buffer_unmap = r600_buffer_transfer_unmap;
597
rctx->b.texture_unmap = r600_texture_transfer_unmap;
598
rctx->b.texture_subdata = u_default_texture_subdata;
599
rctx->b.flush = r600_flush_from_st;
600
rctx->b.set_debug_callback = r600_set_debug_callback;
601
rctx->b.fence_server_sync = r600_fence_server_sync;
602
rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
604
/* evergreen_compute.c has a special codepath for global buffers.
605
* Everything else can use the direct path.
607
if ((rscreen->chip_class == EVERGREEN || rscreen->chip_class == CAYMAN) &&
608
(context_flags & PIPE_CONTEXT_COMPUTE_ONLY))
609
rctx->b.buffer_subdata = u_default_buffer_subdata;
611
rctx->b.buffer_subdata = r600_buffer_subdata;
613
rctx->b.get_device_reset_status = r600_get_reset_status;
614
rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
616
r600_init_context_texture_functions(rctx);
617
r600_init_viewport_functions(rctx);
618
r600_streamout_init(rctx);
619
r600_query_init(rctx);
620
cayman_init_msaa(&rctx->b);
622
u_suballocator_init(&rctx->allocator_zeroed_memory, &rctx->b, rscreen->info.gart_page_size,
623
0, PIPE_USAGE_DEFAULT, 0, true);
625
rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
626
0, PIPE_USAGE_STREAM, 0);
627
if (!rctx->b.stream_uploader)
630
rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
631
0, PIPE_USAGE_DEFAULT, 0);
632
if (!rctx->b.const_uploader)
635
rctx->ctx = rctx->ws->ctx_create(rctx->ws);
639
if (rscreen->info.num_rings[RING_DMA] && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
640
rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, RING_DMA,
641
r600_flush_dma_ring, rctx, false);
642
rctx->dma.flush = r600_flush_dma_ring;
648
void r600_common_context_cleanup(struct r600_common_context *rctx)
650
if (rctx->query_result_shader)
651
rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
653
rctx->ws->cs_destroy(&rctx->gfx.cs);
654
rctx->ws->cs_destroy(&rctx->dma.cs);
656
rctx->ws->ctx_destroy(rctx->ctx);
658
if (rctx->b.stream_uploader)
659
u_upload_destroy(rctx->b.stream_uploader);
660
if (rctx->b.const_uploader)
661
u_upload_destroy(rctx->b.const_uploader);
663
slab_destroy_child(&rctx->pool_transfers);
664
slab_destroy_child(&rctx->pool_transfers_unsync);
666
u_suballocator_destroy(&rctx->allocator_zeroed_memory);
667
rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
668
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
669
r600_resource_reference(&rctx->eop_bug_scratch, NULL);
676
static const struct debug_named_value common_debug_options[] = {
678
{ "tex", DBG_TEX, "Print texture info" },
679
{ "nir", DBG_NIR, "Enable experimental NIR shaders" },
680
{ "compute", DBG_COMPUTE, "Print compute info" },
681
{ "vm", DBG_VM, "Print virtual addresses when creating resources" },
682
{ "info", DBG_INFO, "Print driver information" },
685
{ "fs", DBG_FS, "Print fetch shaders" },
686
{ "vs", DBG_VS, "Print vertex shaders" },
687
{ "gs", DBG_GS, "Print geometry shaders" },
688
{ "ps", DBG_PS, "Print pixel shaders" },
689
{ "cs", DBG_CS, "Print compute shaders" },
690
{ "tcs", DBG_TCS, "Print tessellation control shaders" },
691
{ "tes", DBG_TES, "Print tessellation evaluation shaders" },
692
{ "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
693
{ "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
695
{ "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
696
{ "testvmfaultcp", DBG_TEST_VMFAULT_CP, "Invoke a CP VM fault test and exit." },
697
{ "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA, "Invoke a SDMA VM fault test and exit." },
698
{ "testvmfaultshader", DBG_TEST_VMFAULT_SHADER, "Invoke a shader VM fault test and exit." },
701
{ "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
702
{ "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
703
/* GL uses the word INVALIDATE, gallium uses the word DISCARD */
704
{ "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
705
{ "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
706
{ "notiling", DBG_NO_TILING, "Disable tiling" },
707
{ "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
708
{ "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
709
{ "nowc", DBG_NO_WC, "Disable GTT write combining" },
710
{ "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
712
DEBUG_NAMED_VALUE_END /* must be last */
715
static const char* r600_get_vendor(struct pipe_screen* pscreen)
720
static const char* r600_get_device_vendor(struct pipe_screen* pscreen)
725
static const char *r600_get_family_name(const struct r600_common_screen *rscreen)
727
switch (rscreen->info.family) {
728
case CHIP_R600: return "AMD R600";
729
case CHIP_RV610: return "AMD RV610";
730
case CHIP_RV630: return "AMD RV630";
731
case CHIP_RV670: return "AMD RV670";
732
case CHIP_RV620: return "AMD RV620";
733
case CHIP_RV635: return "AMD RV635";
734
case CHIP_RS780: return "AMD RS780";
735
case CHIP_RS880: return "AMD RS880";
736
case CHIP_RV770: return "AMD RV770";
737
case CHIP_RV730: return "AMD RV730";
738
case CHIP_RV710: return "AMD RV710";
739
case CHIP_RV740: return "AMD RV740";
740
case CHIP_CEDAR: return "AMD CEDAR";
741
case CHIP_REDWOOD: return "AMD REDWOOD";
742
case CHIP_JUNIPER: return "AMD JUNIPER";
743
case CHIP_CYPRESS: return "AMD CYPRESS";
744
case CHIP_HEMLOCK: return "AMD HEMLOCK";
745
case CHIP_PALM: return "AMD PALM";
746
case CHIP_SUMO: return "AMD SUMO";
747
case CHIP_SUMO2: return "AMD SUMO2";
748
case CHIP_BARTS: return "AMD BARTS";
749
case CHIP_TURKS: return "AMD TURKS";
750
case CHIP_CAICOS: return "AMD CAICOS";
751
case CHIP_CAYMAN: return "AMD CAYMAN";
752
case CHIP_ARUBA: return "AMD ARUBA";
753
default: return "AMD unknown";
757
static void r600_disk_cache_create(struct r600_common_screen *rscreen)
759
/* Don't use the cache if shader dumping is enabled. */
760
if (rscreen->debug_flags & DBG_ALL_SHADERS)
763
struct mesa_sha1 ctx;
764
unsigned char sha1[20];
765
char cache_id[20 * 2 + 1];
767
_mesa_sha1_init(&ctx);
768
if (!disk_cache_get_function_identifier(r600_disk_cache_create,
772
_mesa_sha1_final(&ctx, sha1);
773
disk_cache_format_hex_id(cache_id, sha1, 20 * 2);
775
/* These flags affect shader compilation. */
776
uint64_t shader_debug_flags =
777
rscreen->debug_flags &
781
rscreen->disk_shader_cache =
782
disk_cache_create(r600_get_family_name(rscreen),
787
static struct disk_cache *r600_get_disk_shader_cache(struct pipe_screen *pscreen)
789
struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
790
return rscreen->disk_shader_cache;
793
static const char* r600_get_name(struct pipe_screen* pscreen)
795
struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
797
return rscreen->renderer_string;
800
static float r600_get_paramf(struct pipe_screen* pscreen,
801
enum pipe_capf param)
804
case PIPE_CAPF_MIN_LINE_WIDTH:
805
case PIPE_CAPF_MIN_LINE_WIDTH_AA:
806
case PIPE_CAPF_MIN_POINT_SIZE:
807
case PIPE_CAPF_MIN_POINT_SIZE_AA:
810
case PIPE_CAPF_POINT_SIZE_GRANULARITY:
811
case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
814
case PIPE_CAPF_MAX_LINE_WIDTH:
815
case PIPE_CAPF_MAX_LINE_WIDTH_AA:
816
case PIPE_CAPF_MAX_POINT_SIZE:
817
case PIPE_CAPF_MAX_POINT_SIZE_AA:
819
case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
821
case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
823
case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
824
case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
825
case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
831
static int r600_get_video_param(struct pipe_screen *screen,
832
enum pipe_video_profile profile,
833
enum pipe_video_entrypoint entrypoint,
834
enum pipe_video_cap param)
837
case PIPE_VIDEO_CAP_SUPPORTED:
838
return vl_profile_supported(screen, profile, entrypoint);
839
case PIPE_VIDEO_CAP_NPOT_TEXTURES:
841
case PIPE_VIDEO_CAP_MAX_WIDTH:
842
case PIPE_VIDEO_CAP_MAX_HEIGHT:
843
return vl_video_buffer_max_size(screen);
844
case PIPE_VIDEO_CAP_PREFERED_FORMAT:
845
return PIPE_FORMAT_NV12;
846
case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
848
case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
850
case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
852
case PIPE_VIDEO_CAP_MAX_LEVEL:
853
return vl_level_supported(screen, profile);
859
const char *r600_get_llvm_processor_name(enum radeon_family family)
907
static unsigned get_max_threads_per_block(struct r600_common_screen *screen,
908
enum pipe_shader_ir ir_type)
910
if (ir_type != PIPE_SHADER_IR_TGSI &&
911
ir_type != PIPE_SHADER_IR_NIR)
913
if (screen->chip_class >= EVERGREEN)
918
static int r600_get_compute_param(struct pipe_screen *screen,
919
enum pipe_shader_ir ir_type,
920
enum pipe_compute_cap param,
923
struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
925
//TODO: select these params by asic
927
case PIPE_COMPUTE_CAP_IR_TARGET: {
929
const char *triple = "r600--";
930
gpu = r600_get_llvm_processor_name(rscreen->family);
932
sprintf(ret, "%s-%s", gpu, triple);
934
/* +2 for dash and terminating NIL byte */
935
return (strlen(triple) + strlen(gpu) + 2) * sizeof(char);
937
case PIPE_COMPUTE_CAP_GRID_DIMENSION:
939
uint64_t *grid_dimension = ret;
940
grid_dimension[0] = 3;
942
return 1 * sizeof(uint64_t);
944
case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
946
uint64_t *grid_size = ret;
947
grid_size[0] = 65535;
948
grid_size[1] = 65535;
949
grid_size[2] = 65535;
951
return 3 * sizeof(uint64_t) ;
953
case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
955
uint64_t *block_size = ret;
956
unsigned threads_per_block = get_max_threads_per_block(rscreen, ir_type);
957
block_size[0] = threads_per_block;
958
block_size[1] = threads_per_block;
959
block_size[2] = threads_per_block;
961
return 3 * sizeof(uint64_t);
963
case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
965
uint64_t *max_threads_per_block = ret;
966
*max_threads_per_block = get_max_threads_per_block(rscreen, ir_type);
968
return sizeof(uint64_t);
969
case PIPE_COMPUTE_CAP_ADDRESS_BITS:
971
uint32_t *address_bits = ret;
972
address_bits[0] = 32;
974
return 1 * sizeof(uint32_t);
976
case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
978
uint64_t *max_global_size = ret;
979
uint64_t max_mem_alloc_size;
981
r600_get_compute_param(screen, ir_type,
982
PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
983
&max_mem_alloc_size);
985
/* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
986
* 1/4 of the MAX_GLOBAL_SIZE. Since the
987
* MAX_MEM_ALLOC_SIZE is fixed for older kernels,
988
* make sure we never report more than
989
* 4 * MAX_MEM_ALLOC_SIZE.
991
*max_global_size = MIN2(4 * max_mem_alloc_size,
992
MAX2(rscreen->info.gart_size,
993
rscreen->info.vram_size));
995
return sizeof(uint64_t);
997
case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
999
uint64_t *max_local_size = ret;
1000
/* Value reported by the closed source driver. */
1001
*max_local_size = 32768;
1003
return sizeof(uint64_t);
1005
case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
1007
uint64_t *max_input_size = ret;
1008
/* Value reported by the closed source driver. */
1009
*max_input_size = 1024;
1011
return sizeof(uint64_t);
1013
case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
1015
uint64_t *max_mem_alloc_size = ret;
1017
*max_mem_alloc_size = rscreen->info.max_alloc_size;
1019
return sizeof(uint64_t);
1021
case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
1023
uint32_t *max_clock_frequency = ret;
1024
*max_clock_frequency = rscreen->info.max_shader_clock;
1026
return sizeof(uint32_t);
1028
case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
1030
uint32_t *max_compute_units = ret;
1031
*max_compute_units = rscreen->info.num_good_compute_units;
1033
return sizeof(uint32_t);
1035
case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
1037
uint32_t *images_supported = ret;
1038
*images_supported = 0;
1040
return sizeof(uint32_t);
1041
case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
1043
case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
1045
uint32_t *subgroup_size = ret;
1046
*subgroup_size = r600_wavefront_size(rscreen->family);
1048
return sizeof(uint32_t);
1049
case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
1051
uint64_t *max_variable_threads_per_block = ret;
1052
*max_variable_threads_per_block = 0;
1054
return sizeof(uint64_t);
1057
fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
1061
static uint64_t r600_get_timestamp(struct pipe_screen *screen)
1063
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1065
return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) /
1066
rscreen->info.clock_crystal_freq;
1069
static void r600_fence_reference(struct pipe_screen *screen,
1070
struct pipe_fence_handle **dst,
1071
struct pipe_fence_handle *src)
1073
struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws;
1074
struct r600_multi_fence **rdst = (struct r600_multi_fence **)dst;
1075
struct r600_multi_fence *rsrc = (struct r600_multi_fence *)src;
1077
if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
1078
ws->fence_reference(&(*rdst)->gfx, NULL);
1079
ws->fence_reference(&(*rdst)->sdma, NULL);
1085
static bool r600_fence_finish(struct pipe_screen *screen,
1086
struct pipe_context *ctx,
1087
struct pipe_fence_handle *fence,
1090
struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
1091
struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
1092
struct r600_common_context *rctx;
1093
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
1095
ctx = threaded_context_unwrap_sync(ctx);
1096
rctx = ctx ? (struct r600_common_context*)ctx : NULL;
1099
if (!rws->fence_wait(rws, rfence->sdma, timeout))
1102
/* Recompute the timeout after waiting. */
1103
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
1104
int64_t time = os_time_get_nano();
1105
timeout = abs_timeout > time ? abs_timeout - time : 0;
1112
/* Flush the gfx IB if it hasn't been flushed yet. */
1114
rfence->gfx_unflushed.ctx == rctx &&
1115
rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
1116
rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
1117
rfence->gfx_unflushed.ctx = NULL;
1122
/* Recompute the timeout after all that. */
1123
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
1124
int64_t time = os_time_get_nano();
1125
timeout = abs_timeout > time ? abs_timeout - time : 0;
1129
return rws->fence_wait(rws, rfence->gfx, timeout);
1132
static void r600_query_memory_info(struct pipe_screen *screen,
1133
struct pipe_memory_info *info)
1135
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1136
struct radeon_winsys *ws = rscreen->ws;
1137
unsigned vram_usage, gtt_usage;
1139
info->total_device_memory = rscreen->info.vram_size / 1024;
1140
info->total_staging_memory = rscreen->info.gart_size / 1024;
1142
/* The real TTM memory usage is somewhat random, because:
1144
* 1) TTM delays freeing memory, because it can only free it after
1147
* 2) The memory usage can be really low if big VRAM evictions are
1148
* taking place, but the real usage is well above the size of VRAM.
1150
* Instead, return statistics of this process.
1152
vram_usage = ws->query_value(ws, RADEON_REQUESTED_VRAM_MEMORY) / 1024;
1153
gtt_usage = ws->query_value(ws, RADEON_REQUESTED_GTT_MEMORY) / 1024;
1155
info->avail_device_memory =
1156
vram_usage <= info->total_device_memory ?
1157
info->total_device_memory - vram_usage : 0;
1158
info->avail_staging_memory =
1159
gtt_usage <= info->total_staging_memory ?
1160
info->total_staging_memory - gtt_usage : 0;
1162
info->device_memory_evicted =
1163
ws->query_value(ws, RADEON_NUM_BYTES_MOVED) / 1024;
1165
/* Just return the number of evicted 64KB pages. */
1166
info->nr_device_memory_evictions = info->device_memory_evicted / 64;
1169
struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
1170
const struct pipe_resource *templ)
1172
if (templ->target == PIPE_BUFFER) {
1173
return r600_buffer_create(screen, templ, 256);
1175
return r600_texture_create(screen, templ);
1180
r600_get_compiler_options(struct pipe_screen *screen,
1181
enum pipe_shader_ir ir,
1182
enum pipe_shader_type shader)
1184
assert(ir == PIPE_SHADER_IR_NIR);
1186
struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1188
return &rscreen->nir_options;
1191
extern bool r600_lower_to_scalar_instr_filter(const nir_instr *instr, const void *);
1193
static void r600_resource_destroy(struct pipe_screen *screen,
1194
struct pipe_resource *res)
1196
if (res->target == PIPE_BUFFER) {
1197
if (r600_resource(res)->compute_global_bo)
1198
r600_compute_global_buffer_destroy(screen, res);
1200
r600_buffer_destroy(screen, res);
1202
r600_texture_destroy(screen, res);
1206
bool r600_common_screen_init(struct r600_common_screen *rscreen,
1207
struct radeon_winsys *ws)
1209
char family_name[32] = {}, kernel_version[128] = {};
1210
struct utsname uname_data;
1211
const char *chip_name;
1213
ws->query_info(ws, &rscreen->info, false, false);
1216
chip_name = r600_get_family_name(rscreen);
1218
if (uname(&uname_data) == 0)
1219
snprintf(kernel_version, sizeof(kernel_version),
1220
" / %s", uname_data.release);
1222
snprintf(rscreen->renderer_string, sizeof(rscreen->renderer_string),
1223
"%s (%sDRM %i.%i.%i%s"
1224
#ifdef LLVM_AVAILABLE
1225
", LLVM " MESA_LLVM_VERSION_STRING
1228
chip_name, family_name, rscreen->info.drm_major,
1229
rscreen->info.drm_minor, rscreen->info.drm_patchlevel,
1232
rscreen->b.get_name = r600_get_name;
1233
rscreen->b.get_vendor = r600_get_vendor;
1234
rscreen->b.get_device_vendor = r600_get_device_vendor;
1235
rscreen->b.get_disk_shader_cache = r600_get_disk_shader_cache;
1236
rscreen->b.get_compute_param = r600_get_compute_param;
1237
rscreen->b.get_paramf = r600_get_paramf;
1238
rscreen->b.get_timestamp = r600_get_timestamp;
1239
rscreen->b.get_compiler_options = r600_get_compiler_options;
1240
rscreen->b.fence_finish = r600_fence_finish;
1241
rscreen->b.fence_reference = r600_fence_reference;
1242
rscreen->b.resource_destroy = r600_resource_destroy;
1243
rscreen->b.resource_from_user_memory = r600_buffer_from_user_memory;
1244
rscreen->b.query_memory_info = r600_query_memory_info;
1246
if (rscreen->info.has_video_hw.uvd_decode) {
1247
rscreen->b.get_video_param = rvid_get_video_param;
1248
rscreen->b.is_video_format_supported = rvid_is_format_supported;
1250
rscreen->b.get_video_param = r600_get_video_param;
1251
rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported;
1254
r600_init_screen_texture_functions(rscreen);
1255
r600_init_screen_query_functions(rscreen);
1257
rscreen->family = rscreen->info.family;
1258
rscreen->chip_class = rscreen->info.chip_class;
1259
rscreen->debug_flags |= debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
1261
r600_disk_cache_create(rscreen);
1263
slab_create_parent(&rscreen->pool_transfers, sizeof(struct r600_transfer), 64);
1265
rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1266
if (rscreen->force_aniso >= 0) {
1267
printf("radeon: Forcing anisotropy filter to %ix\n",
1268
/* round down to a power of two */
1269
1 << util_logbase2(rscreen->force_aniso));
1272
(void) mtx_init(&rscreen->aux_context_lock, mtx_plain);
1273
(void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
1275
if (rscreen->debug_flags & DBG_INFO) {
1276
printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
1277
rscreen->info.pci_domain, rscreen->info.pci_bus,
1278
rscreen->info.pci_dev, rscreen->info.pci_func);
1279
printf("pci_id = 0x%x\n", rscreen->info.pci_id);
1280
printf("family = %i (%s)\n", rscreen->info.family,
1281
r600_get_family_name(rscreen));
1282
printf("chip_class = %i\n", rscreen->info.chip_class);
1283
printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
1284
printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
1285
printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
1286
printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
1287
printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_vis_size, 1024*1024));
1288
printf("max_alloc_size = %i MB\n",
1289
(int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
1290
printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
1291
printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
1292
printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
1293
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
1294
printf("uvd_decode = %u\n", rscreen->info.has_video_hw.uvd_decode);
1295
printf("num_rings[RING_DMA] = %i\n", rscreen->info.num_rings[RING_DMA]);
1296
printf("num_rings[RING_COMPUTE] = %u\n", rscreen->info.num_rings[RING_COMPUTE]);
1297
printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
1298
printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
1299
printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
1300
printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
1301
printf("ce_fw_version = %i\n", rscreen->info.ce_fw_version);
1302
printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
1303
printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
1304
printf("tcc_cache_line_size = %u\n", rscreen->info.tcc_cache_line_size);
1305
printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
1306
rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
1307
printf("has_userptr = %i\n", rscreen->info.has_userptr);
1308
printf("has_syncobj = %u\n", rscreen->info.has_syncobj);
1310
printf("r600_max_quad_pipes = %i\n", rscreen->info.r600_max_quad_pipes);
1311
printf("max_shader_clock = %i\n", rscreen->info.max_shader_clock);
1312
printf("num_good_compute_units = %i\n", rscreen->info.num_good_compute_units);
1313
printf("max_se = %i\n", rscreen->info.max_se);
1314
printf("max_sh_per_se = %i\n", rscreen->info.max_sa_per_se);
1316
printf("r600_gb_backend_map = %i\n", rscreen->info.r600_gb_backend_map);
1317
printf("r600_gb_backend_map_valid = %i\n", rscreen->info.r600_gb_backend_map_valid);
1318
printf("r600_num_banks = %i\n", rscreen->info.r600_num_banks);
1319
printf("num_render_backends = %i\n", rscreen->info.max_render_backends);
1320
printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
1321
printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
1322
printf("enabled_rb_mask = 0x%x\n", rscreen->info.enabled_rb_mask);
1323
printf("max_alignment = %u\n", (unsigned)rscreen->info.max_alignment);
1326
const struct nir_shader_compiler_options nir_options = {
1327
.fuse_ffma16 = true,
1328
.fuse_ffma32 = true,
1329
.fuse_ffma64 = true,
1330
.lower_flrp32 = true,
1331
.lower_flrp64 = true,
1334
.lower_isign = true,
1335
.lower_fsign = true,
1337
.lower_doubles_options = nir_lower_fp64_full_software,
1338
.lower_int64_options = ~0,
1339
.lower_extract_byte = true,
1340
.lower_extract_word = true,
1341
.lower_insert_byte = true,
1342
.lower_insert_word = true,
1343
.lower_rotate = true,
1344
.max_unroll_iterations = 32,
1345
.lower_interpolate_at = true,
1346
.vectorize_io = true,
1349
.use_interpolated_input_intrinsics = true,
1353
.lower_bitfield_extract = true,
1354
.lower_bitfield_insert_to_bitfield_select = true,
1355
.has_fused_comp_and_csel = true,
1356
.lower_find_msb_to_reverse = true,
1357
.lower_to_scalar = true,
1358
.lower_to_scalar_filter = r600_lower_to_scalar_instr_filter,
1359
.linker_ignore_precision = true,
1362
rscreen->nir_options = nir_options;
1367
void r600_destroy_common_screen(struct r600_common_screen *rscreen)
1369
r600_perfcounters_destroy(rscreen);
1370
r600_gpu_load_kill_thread(rscreen);
1372
mtx_destroy(&rscreen->gpu_load_mutex);
1373
mtx_destroy(&rscreen->aux_context_lock);
1374
rscreen->aux_context->destroy(rscreen->aux_context);
1376
slab_destroy_parent(&rscreen->pool_transfers);
1378
disk_cache_destroy(rscreen->disk_shader_cache);
1379
rscreen->ws->destroy(rscreen->ws);
1383
bool r600_can_dump_shader(struct r600_common_screen *rscreen,
1386
return rscreen->debug_flags & (1 << processor);
1389
bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
1391
return (rscreen->debug_flags & DBG_CHECK_IR) ||
1392
r600_can_dump_shader(rscreen, processor);
1395
void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
1396
uint64_t offset, uint64_t size, unsigned value)
1398
struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
1400
mtx_lock(&rscreen->aux_context_lock);
1401
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
1402
rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
1403
mtx_unlock(&rscreen->aux_context_lock);