~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/intel/vulkan/anv_measure.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright © 2020 Intel Corporation
3
 
 *
4
 
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 
 * copy of this software and associated documentation files (the "Software"),
6
 
 * to deal in the Software without restriction, including without limitation
7
 
 * on the rights to use, copy, modify, merge, publish, distribute, sub
8
 
 * license, and/or sell copies of the Software, and to permit persons to whom
9
 
 * the Software is furnished to do so, subject to the following conditions:
10
 
 *
11
 
 * The above copyright notice and this permission notice (including the next
12
 
 * paragraph) shall be included in all copies or substantial portions of the
13
 
 * Software.
14
 
 *
15
 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
 
 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
 
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
 
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
 
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22
 
 */
23
 
 
24
 
#include "anv_measure.h"
25
 
 
26
 
#include <fcntl.h>
27
 
#include <sys/stat.h>
28
 
#include <sys/types.h>
29
 
 
30
 
#include "common/intel_measure.h"
31
 
#include "util/debug.h"
32
 
 
33
 
struct anv_measure_batch {
34
 
   struct anv_bo *bo;
35
 
   struct intel_measure_batch base;
36
 
};
37
 
 
38
 
void
39
 
anv_measure_device_init(struct anv_physical_device *device)
40
 
{
41
 
   switch (device->info.verx10) {
42
 
   case 125:
43
 
      device->cmd_emit_timestamp = &gfx125_cmd_emit_timestamp;
44
 
      break;
45
 
   case 120:
46
 
      device->cmd_emit_timestamp = &gfx12_cmd_emit_timestamp;
47
 
      break;
48
 
   case 110:
49
 
      device->cmd_emit_timestamp = &gfx11_cmd_emit_timestamp;
50
 
      break;
51
 
   case 90:
52
 
      device->cmd_emit_timestamp = &gfx9_cmd_emit_timestamp;
53
 
      break;
54
 
   case 80:
55
 
      device->cmd_emit_timestamp = &gfx8_cmd_emit_timestamp;
56
 
      break;
57
 
   case 75:
58
 
      device->cmd_emit_timestamp = &gfx75_cmd_emit_timestamp;
59
 
      break;
60
 
   case 70:
61
 
      device->cmd_emit_timestamp = &gfx7_cmd_emit_timestamp;
62
 
      break;
63
 
   default:
64
 
      assert(false);
65
 
   }
66
 
 
67
 
   /* initialise list of measure structures that await rendering */
68
 
   struct intel_measure_device *measure_device = &device->measure_device;
69
 
   intel_measure_init(measure_device);
70
 
   struct intel_measure_config *config = measure_device->config;
71
 
   if (config == NULL)
72
 
      return;
73
 
 
74
 
   /* the final member of intel_measure_ringbuffer is a zero-length array of
75
 
    * intel_measure_buffered_result objects.  Allocate additional space for
76
 
    * the buffered objects based on the run-time configurable buffer_size
77
 
    */
78
 
   const size_t rb_bytes = sizeof(struct intel_measure_ringbuffer) +
79
 
      config->buffer_size * sizeof(struct intel_measure_buffered_result);
80
 
   struct intel_measure_ringbuffer * rb =
81
 
      vk_zalloc(&device->instance->vk.alloc,
82
 
                rb_bytes, 8,
83
 
                VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
84
 
   measure_device->ringbuffer = rb;
85
 
}
86
 
 
87
 
static struct intel_measure_config*
88
 
config_from_command_buffer(struct anv_cmd_buffer *cmd_buffer)
89
 
{
90
 
   return cmd_buffer->device->physical->measure_device.config;
91
 
}
92
 
 
93
 
void
94
 
anv_measure_init(struct anv_cmd_buffer *cmd_buffer)
95
 
{
96
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
97
 
   struct anv_device *device = cmd_buffer->device;
98
 
 
99
 
   if (!config || !config->enabled) {
100
 
      cmd_buffer->measure = NULL;
101
 
      return;
102
 
   }
103
 
 
104
 
   /* the final member of anv_measure is a zero-length array of
105
 
    * intel_measure_snapshot objects.  Create additional space for the
106
 
    * snapshot objects based on the run-time configurable batch_size
107
 
    */
108
 
   const size_t batch_bytes = sizeof(struct anv_measure_batch) +
109
 
      config->batch_size * sizeof(struct intel_measure_snapshot);
110
 
   struct anv_measure_batch * measure =
111
 
      vk_alloc(&cmd_buffer->vk.pool->alloc,
112
 
               batch_bytes, 8,
113
 
               VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
114
 
 
115
 
   memset(measure, 0, batch_bytes);
116
 
   ASSERTED VkResult result =
117
 
      anv_device_alloc_bo(device, "measure data",
118
 
                          config->batch_size * sizeof(uint64_t),
119
 
                          ANV_BO_ALLOC_MAPPED,
120
 
                          0,
121
 
                          (struct anv_bo**)&measure->bo);
122
 
   measure->base.timestamps = measure->bo->map;
123
 
   assert(result == VK_SUCCESS);
124
 
 
125
 
   cmd_buffer->measure = measure;
126
 
}
127
 
 
128
 
static void
129
 
anv_measure_start_snapshot(struct anv_cmd_buffer *cmd_buffer,
130
 
                           enum intel_measure_snapshot_type type,
131
 
                           const char *event_name,
132
 
                           uint32_t count)
133
 
{
134
 
   struct anv_batch *batch = &cmd_buffer->batch;
135
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
136
 
   struct anv_physical_device *device = cmd_buffer->device->physical;
137
 
   struct intel_measure_device *measure_device = &device->measure_device;
138
 
 
139
 
   const unsigned device_frame = measure_device->frame;
140
 
 
141
 
   /* if the command buffer is not associated with a frame, associate it with
142
 
    * the most recent acquired frame
143
 
    */
144
 
   if (measure->base.frame == 0)
145
 
      measure->base.frame = device_frame;
146
 
 
147
 
//   uintptr_t framebuffer = (uintptr_t)cmd_buffer->state.framebuffer;
148
 
//
149
 
//   if (!measure->base.framebuffer &&
150
 
//       cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
151
 
//      /* secondary command buffer inherited the framebuffer from the primary */
152
 
//      measure->base.framebuffer = framebuffer;
153
 
//
154
 
//   /* verify framebuffer has been properly tracked */
155
 
//   assert(type == INTEL_SNAPSHOT_END ||
156
 
//          framebuffer == measure->base.framebuffer ||
157
 
//          framebuffer == 0 ); /* compute has no framebuffer */
158
 
 
159
 
   unsigned index = measure->base.index++;
160
 
 
161
 
   (*device->cmd_emit_timestamp)(batch, cmd_buffer->device,
162
 
                                 (struct anv_address) {
163
 
                                    .bo = measure->bo,
164
 
                                    .offset = index * sizeof(uint64_t) },
165
 
                                 true /* end_of_pipe */);
166
 
 
167
 
   if (event_name == NULL)
168
 
      event_name = intel_measure_snapshot_string(type);
169
 
 
170
 
   struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[index]);
171
 
   memset(snapshot, 0, sizeof(*snapshot));
172
 
   snapshot->type = type;
173
 
   snapshot->count = (unsigned) count;
174
 
   snapshot->event_count = measure->base.event_count;
175
 
   snapshot->event_name = event_name;
176
 
//   snapshot->framebuffer = framebuffer;
177
 
 
178
 
   if (type == INTEL_SNAPSHOT_COMPUTE && cmd_buffer->state.compute.pipeline) {
179
 
      snapshot->cs = (uintptr_t) cmd_buffer->state.compute.pipeline->cs;
180
 
   } else if (cmd_buffer->state.gfx.pipeline) {
181
 
      const struct anv_graphics_pipeline *pipeline =
182
 
         cmd_buffer->state.gfx.pipeline;
183
 
      snapshot->vs = (uintptr_t) pipeline->shaders[MESA_SHADER_VERTEX];
184
 
      snapshot->tcs = (uintptr_t) pipeline->shaders[MESA_SHADER_TESS_CTRL];
185
 
      snapshot->tes = (uintptr_t) pipeline->shaders[MESA_SHADER_TESS_EVAL];
186
 
      snapshot->gs = (uintptr_t) pipeline->shaders[MESA_SHADER_GEOMETRY];
187
 
      snapshot->fs = (uintptr_t) pipeline->shaders[MESA_SHADER_FRAGMENT];
188
 
   }
189
 
}
190
 
 
191
 
static void
192
 
anv_measure_end_snapshot(struct anv_cmd_buffer *cmd_buffer,
193
 
                         uint32_t event_count)
194
 
{
195
 
   struct anv_batch *batch = &cmd_buffer->batch;
196
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
197
 
   struct anv_physical_device *device = cmd_buffer->device->physical;
198
 
 
199
 
   unsigned index = measure->base.index++;
200
 
   assert(index % 2 == 1);
201
 
 
202
 
   (*device->cmd_emit_timestamp)(batch, cmd_buffer->device,
203
 
                                 (struct anv_address) {
204
 
                                    .bo = measure->bo,
205
 
                                    .offset = index * sizeof(uint64_t) },
206
 
                                 true /* end_of_pipe */);
207
 
 
208
 
   struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[index]);
209
 
   memset(snapshot, 0, sizeof(*snapshot));
210
 
   snapshot->type = INTEL_SNAPSHOT_END;
211
 
   snapshot->event_count = event_count;
212
 
}
213
 
 
214
 
static bool
215
 
state_changed(struct anv_cmd_buffer *cmd_buffer,
216
 
              enum intel_measure_snapshot_type type)
217
 
{
218
 
   uintptr_t vs=0, tcs=0, tes=0, gs=0, fs=0, cs=0;
219
 
 
220
 
   if (cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)
221
 
      /* can't record timestamps in this mode */
222
 
      return false;
223
 
 
224
 
   if (type == INTEL_SNAPSHOT_COMPUTE) {
225
 
      const struct anv_compute_pipeline *cs_pipe =
226
 
         cmd_buffer->state.compute.pipeline;
227
 
      assert(cs_pipe);
228
 
      cs = (uintptr_t)cs_pipe->cs;
229
 
   } else if (type == INTEL_SNAPSHOT_DRAW) {
230
 
      const struct anv_graphics_pipeline *gfx = cmd_buffer->state.gfx.pipeline;
231
 
      assert(gfx);
232
 
      vs = (uintptr_t) gfx->shaders[MESA_SHADER_VERTEX];
233
 
      tcs = (uintptr_t) gfx->shaders[MESA_SHADER_TESS_CTRL];
234
 
      tes = (uintptr_t) gfx->shaders[MESA_SHADER_TESS_EVAL];
235
 
      gs = (uintptr_t) gfx->shaders[MESA_SHADER_GEOMETRY];
236
 
      fs = (uintptr_t) gfx->shaders[MESA_SHADER_FRAGMENT];
237
 
   }
238
 
   /* else blorp, all programs NULL */
239
 
 
240
 
   return intel_measure_state_changed(&cmd_buffer->measure->base,
241
 
                                      vs, tcs, tes, gs, fs, cs);
242
 
}
243
 
 
244
 
void
245
 
_anv_measure_snapshot(struct anv_cmd_buffer *cmd_buffer,
246
 
                     enum intel_measure_snapshot_type type,
247
 
                     const char *event_name,
248
 
                     uint32_t count)
249
 
{
250
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
251
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
252
 
 
253
 
   assert(config);
254
 
   if (measure == NULL)
255
 
      return;
256
 
 
257
 
   assert(type != INTEL_SNAPSHOT_END);
258
 
   if (!state_changed(cmd_buffer, type)) {
259
 
      /* filter out this event */
260
 
      return;
261
 
   }
262
 
 
263
 
   /* increment event count */
264
 
   ++measure->base.event_count;
265
 
   if (measure->base.event_count == 1 ||
266
 
       measure->base.event_count == config->event_interval + 1) {
267
 
      /* the first event of an interval */
268
 
 
269
 
      if (measure->base.index % 2) {
270
 
         /* end the previous event */
271
 
         anv_measure_end_snapshot(cmd_buffer, measure->base.event_count - 1);
272
 
      }
273
 
      measure->base.event_count = 1;
274
 
 
275
 
      if (measure->base.index == config->batch_size) {
276
 
         /* Snapshot buffer is full.  The batch must be flushed before
277
 
          * additional snapshots can be taken.
278
 
          */
279
 
         static bool warned = false;
280
 
         if (unlikely(!warned)) {
281
 
            fprintf(config->file,
282
 
                    "WARNING: batch size exceeds INTEL_MEASURE limit: %d. "
283
 
                    "Data has been dropped. "
284
 
                    "Increase setting with INTEL_MEASURE=batch_size={count}\n",
285
 
                    config->batch_size);
286
 
         }
287
 
 
288
 
         warned = true;
289
 
         return;
290
 
      }
291
 
 
292
 
      anv_measure_start_snapshot(cmd_buffer, type, event_name, count);
293
 
   }
294
 
}
295
 
 
296
 
/**
297
 
 * Called when a command buffer is reset.  Re-initializes existing anv_measure
298
 
 * data structures.
299
 
 */
300
 
void
301
 
anv_measure_reset(struct anv_cmd_buffer *cmd_buffer)
302
 
{
303
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
304
 
   struct anv_device *device = cmd_buffer->device;
305
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
306
 
 
307
 
   if (!config)
308
 
      return;
309
 
 
310
 
   if (!config->enabled) {
311
 
      cmd_buffer->measure = NULL;
312
 
      return;
313
 
   }
314
 
 
315
 
   if (!measure) {
316
 
      /* Capture has recently been enabled. Instead of resetting, a new data
317
 
       * structure must be allocated and initialized.
318
 
       */
319
 
      return anv_measure_init(cmd_buffer);
320
 
   }
321
 
 
322
 
   /* it is possible that the command buffer contains snapshots that have not
323
 
    * yet been processed
324
 
    */
325
 
   intel_measure_gather(&device->physical->measure_device,
326
 
                        &device->info);
327
 
 
328
 
   assert(cmd_buffer->device != NULL);
329
 
 
330
 
   measure->base.index = 0;
331
 
//   measure->base.framebuffer = 0;
332
 
   measure->base.frame = 0;
333
 
   measure->base.event_count = 0;
334
 
   list_inithead(&measure->base.link);
335
 
 
336
 
   anv_device_release_bo(device, measure->bo);
337
 
   ASSERTED VkResult result =
338
 
      anv_device_alloc_bo(device, "measure data",
339
 
                          config->batch_size * sizeof(uint64_t),
340
 
                          ANV_BO_ALLOC_MAPPED,
341
 
                          0,
342
 
                          (struct anv_bo**)&measure->bo);
343
 
   measure->base.timestamps = measure->bo->map;
344
 
   assert(result == VK_SUCCESS);
345
 
}
346
 
 
347
 
void
348
 
anv_measure_destroy(struct anv_cmd_buffer *cmd_buffer)
349
 
{
350
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
351
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
352
 
   struct anv_device *device = cmd_buffer->device;
353
 
   struct anv_physical_device *physical = device->physical;
354
 
 
355
 
   if (!config)
356
 
      return;
357
 
   if (measure == NULL)
358
 
      return;
359
 
 
360
 
   /* it is possible that the command buffer contains snapshots that have not
361
 
    * yet been processed
362
 
    */
363
 
   intel_measure_gather(&physical->measure_device, &physical->info);
364
 
 
365
 
   anv_device_release_bo(device, measure->bo);
366
 
   vk_free(&cmd_buffer->vk.pool->alloc, measure);
367
 
   cmd_buffer->measure = NULL;
368
 
}
369
 
 
370
 
static struct intel_measure_config*
371
 
config_from_device(struct anv_device *device)
372
 
{
373
 
   return device->physical->measure_device.config;
374
 
}
375
 
 
376
 
void
377
 
anv_measure_device_destroy(struct anv_physical_device *device)
378
 
{
379
 
   struct intel_measure_device *measure_device = &device->measure_device;
380
 
   struct intel_measure_config *config = measure_device->config;
381
 
 
382
 
   if (!config)
383
 
      return;
384
 
 
385
 
   if (measure_device->ringbuffer != NULL) {
386
 
      vk_free(&device->instance->vk.alloc, measure_device->ringbuffer);
387
 
      measure_device->ringbuffer = NULL;
388
 
   }
389
 
}
390
 
 
391
 
/**
392
 
 *  Hook for command buffer submission.
393
 
 */
394
 
void
395
 
_anv_measure_submit(struct anv_cmd_buffer *cmd_buffer)
396
 
{
397
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
398
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
399
 
   struct intel_measure_device *measure_device = &cmd_buffer->device->physical->measure_device;
400
 
 
401
 
   if (!config)
402
 
      return;
403
 
   if (measure == NULL)
404
 
      return;
405
 
 
406
 
   if (measure->base.index == 0)
407
 
      /* no snapshots were started */
408
 
      return;
409
 
 
410
 
   /* finalize snapshots and enqueue them */
411
 
   static unsigned cmd_buffer_count = 0;
412
 
   measure->base.batch_count = p_atomic_inc_return(&cmd_buffer_count);
413
 
 
414
 
   if (measure->base.index %2 == 1) {
415
 
      anv_measure_end_snapshot(cmd_buffer, measure->base.event_count);
416
 
      measure->base.event_count = 0;
417
 
   }
418
 
 
419
 
   /* add to the list of submitted snapshots */
420
 
   pthread_mutex_lock(&measure_device->mutex);
421
 
   list_addtail(&measure->base.link, &measure_device->queued_snapshots);
422
 
   pthread_mutex_unlock(&measure_device->mutex);
423
 
}
424
 
 
425
 
/**
426
 
 *  Hook for the start of a frame.
427
 
 */
428
 
void
429
 
_anv_measure_acquire(struct anv_device *device)
430
 
{
431
 
   struct intel_measure_config *config = config_from_device(device);
432
 
   struct intel_measure_device *measure_device = &device->physical->measure_device;
433
 
 
434
 
   if (!config)
435
 
      return;
436
 
   if (measure_device == NULL)
437
 
      return;
438
 
 
439
 
   intel_measure_frame_transition(p_atomic_inc_return(&measure_device->frame));
440
 
 
441
 
   /* iterate the queued snapshots and publish those that finished */
442
 
   intel_measure_gather(measure_device, &device->physical->info);
443
 
}
444
 
 
445
 
void
446
 
_anv_measure_endcommandbuffer(struct anv_cmd_buffer *cmd_buffer)
447
 
{
448
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
449
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
450
 
 
451
 
   if (!config)
452
 
      return;
453
 
   if (measure == NULL)
454
 
      return;
455
 
   if (measure->base.index % 2 == 0)
456
 
      return;
457
 
 
458
 
   anv_measure_end_snapshot(cmd_buffer, measure->base.event_count);
459
 
   measure->base.event_count = 0;
460
 
}
461
 
 
462
 
void
463
 
_anv_measure_beginrenderpass(struct anv_cmd_buffer *cmd_buffer)
464
 
{
465
 
   struct intel_measure_config *config = config_from_command_buffer(cmd_buffer);
466
 
   struct anv_measure_batch *measure = cmd_buffer->measure;
467
 
 
468
 
   if (!config)
469
 
      return;
470
 
   if (measure == NULL)
471
 
      return;
472
 
 
473
 
//   if (measure->base.framebuffer == (uintptr_t) cmd_buffer->state.framebuffer)
474
 
//      /* no change */
475
 
//      return;
476
 
 
477
 
   bool filtering = (config->flags & (INTEL_MEASURE_RENDERPASS |
478
 
                                      INTEL_MEASURE_SHADER));
479
 
   if (filtering && measure->base.index % 2 == 1) {
480
 
      /* snapshot for previous renderpass was not ended */
481
 
      anv_measure_end_snapshot(cmd_buffer,
482
 
                               measure->base.event_count);
483
 
      measure->base.event_count = 0;
484
 
   }
485
 
 
486
 
//   measure->base.framebuffer = (uintptr_t) cmd_buffer->state.framebuffer;
487
 
}
488
 
 
489
 
void
490
 
_anv_measure_add_secondary(struct anv_cmd_buffer *primary,
491
 
                           struct anv_cmd_buffer *secondary)
492
 
{
493
 
   struct intel_measure_config *config = config_from_command_buffer(primary);
494
 
   struct anv_measure_batch *measure = primary->measure;
495
 
   if (!config)
496
 
      return;
497
 
   if (measure == NULL)
498
 
      return;
499
 
   if (config->flags & (INTEL_MEASURE_BATCH | INTEL_MEASURE_FRAME))
500
 
      /* secondary timing will be contained within the primary */
501
 
      return;
502
 
   if (secondary->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
503
 
         static bool warned = false;
504
 
         if (unlikely(!warned)) {
505
 
            fprintf(config->file,
506
 
                    "WARNING: INTEL_MEASURE cannot capture timings of commands "
507
 
                    "in secondary command buffers with "
508
 
                    "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.\n");
509
 
         }
510
 
      return;
511
 
   }
512
 
 
513
 
   if (measure->base.index % 2 == 1)
514
 
      anv_measure_end_snapshot(primary, measure->base.event_count);
515
 
 
516
 
   struct intel_measure_snapshot *snapshot = &(measure->base.snapshots[measure->base.index]);
517
 
   _anv_measure_snapshot(primary, INTEL_SNAPSHOT_SECONDARY_BATCH, NULL, 0);
518
 
 
519
 
   snapshot->secondary = &secondary->measure->base;
520
 
}