364
pvr_transfer_job_submit(device,
364
pvr_transfer_job_submit(queue->transfer_ctx,
367
366
queue->next_job_wait_sync[PVR_JOB_TYPE_TRANSFER],
416
static VkResult pvr_process_event_cmd_barrier(struct pvr_device *device,
417
struct pvr_queue *queue,
418
struct pvr_sub_cmd_event *sub_cmd)
416
pvr_process_event_cmd_barrier(struct pvr_device *device,
417
struct pvr_queue *queue,
418
struct pvr_sub_cmd_event_barrier *sub_cmd)
420
const uint32_t src_mask = sub_cmd->barrier.wait_for_stage_mask;
421
const uint32_t dst_mask = sub_cmd->barrier.wait_at_stage_mask;
420
const uint32_t src_mask = sub_cmd->wait_for_stage_mask;
421
const uint32_t dst_mask = sub_cmd->wait_at_stage_mask;
422
422
struct vk_sync_wait wait_syncs[PVR_JOB_TYPE_MAX + 1];
423
423
uint32_t src_wait_count = 0;
426
assert(sub_cmd->type == PVR_EVENT_TYPE_BARRIER);
428
426
assert(!(src_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
429
427
assert(!(dst_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
491
489
pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
492
490
struct pvr_queue *queue,
493
struct pvr_sub_cmd_event *sub_cmd)
491
struct pvr_sub_cmd_event_set_reset *sub_cmd,
492
const enum pvr_event_state new_event_state)
495
494
/* Not PVR_JOB_TYPE_MAX since that also includes
496
495
* PVR_JOB_TYPE_OCCLUSION_QUERY so no stage in the src mask.
499
498
struct vk_sync_signal signal;
500
499
struct vk_sync *signal_sync;
502
uint32_t wait_for_stage_mask;
503
501
uint32_t wait_count = 0;
506
assert(sub_cmd->type == PVR_EVENT_TYPE_SET ||
507
sub_cmd->type == PVR_EVENT_TYPE_RESET);
509
if (sub_cmd->type == PVR_EVENT_TYPE_SET)
510
wait_for_stage_mask = sub_cmd->set.wait_for_stage_mask;
512
wait_for_stage_mask = sub_cmd->reset.wait_for_stage_mask;
514
assert(!(wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
516
u_foreach_bit (stage, wait_for_stage_mask) {
504
assert(!(sub_cmd->wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
506
u_foreach_bit (stage, sub_cmd->wait_for_stage_mask) {
517
507
if (!queue->last_job_signal_sync[stage])
548
if (sub_cmd->type == PVR_EVENT_TYPE_SET) {
549
if (sub_cmd->set.event->sync)
550
vk_sync_destroy(&device->vk, sub_cmd->set.event->sync);
552
sub_cmd->set.event->sync = signal_sync;
553
sub_cmd->set.event->state = PVR_EVENT_STATE_SET_BY_DEVICE;
555
if (sub_cmd->reset.event->sync)
556
vk_sync_destroy(&device->vk, sub_cmd->reset.event->sync);
558
sub_cmd->reset.event->sync = signal_sync;
559
sub_cmd->reset.event->state = PVR_EVENT_STATE_RESET_BY_DEVICE;
538
if (sub_cmd->event->sync)
539
vk_sync_destroy(&device->vk, sub_cmd->event->sync);
541
sub_cmd->event->sync = signal_sync;
542
sub_cmd->event->state = new_event_state;
562
544
return VK_SUCCESS;
547
static inline VkResult
548
pvr_process_event_cmd_set(struct pvr_device *device,
549
struct pvr_queue *queue,
550
struct pvr_sub_cmd_event_set_reset *sub_cmd)
552
return pvr_process_event_cmd_set_or_reset(device,
555
PVR_EVENT_STATE_SET_BY_DEVICE);
558
static inline VkResult
559
pvr_process_event_cmd_reset(struct pvr_device *device,
560
struct pvr_queue *queue,
561
struct pvr_sub_cmd_event_set_reset *sub_cmd)
563
return pvr_process_event_cmd_set_or_reset(device,
566
PVR_EVENT_STATE_RESET_BY_DEVICE);
566
570
* \brief Process an event sub command of wait type.
578
582
* \parma[in,out] per_cmd_buffer_syncobjs Completion syncobjs for the command
579
583
* buffer being processed.
581
static VkResult pvr_process_event_cmd_wait(struct pvr_device *device,
582
struct pvr_queue *queue,
583
struct pvr_sub_cmd_event *sub_cmd)
586
pvr_process_event_cmd_wait(struct pvr_device *device,
587
struct pvr_queue *queue,
588
struct pvr_sub_cmd_event_wait *sub_cmd)
585
590
uint32_t dst_mask = 0;
588
STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->wait.count + 1);
593
STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->count + 1);
590
595
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
592
for (uint32_t i = 0; i < sub_cmd->wait.count; i++)
593
dst_mask |= sub_cmd->wait.wait_at_stage_masks[i];
597
for (uint32_t i = 0; i < sub_cmd->count; i++)
598
dst_mask |= sub_cmd->wait_at_stage_masks[i];
595
600
u_foreach_bit (stage, dst_mask) {
596
601
struct vk_sync_signal signal;
597
602
struct vk_sync *signal_sync;
598
603
uint32_t wait_count = 0;
600
for (uint32_t i = 0; i < sub_cmd->wait.count; i++) {
601
if (sub_cmd->wait.wait_at_stage_masks[i] & stage) {
605
for (uint32_t i = 0; i < sub_cmd->count; i++) {
606
if (sub_cmd->wait_at_stage_masks[i] & stage) {
602
607
waits[wait_count++] = (struct vk_sync_wait){
603
.sync = sub_cmd->wait.events[i]->sync,
608
.sync = sub_cmd->events[i]->sync,
604
609
.stage_mask = ~(VkPipelineStageFlags2)0,
621
assert(wait_count <= (sub_cmd->wait.count + 1));
626
assert(wait_count <= (sub_cmd->count + 1));
623
628
result = vk_sync_create(&device->vk,
624
629
&device->pdevice->ws->syncobj_type,
666
671
switch (sub_cmd->type) {
667
672
case PVR_EVENT_TYPE_SET:
673
return pvr_process_event_cmd_set(device, queue, &sub_cmd->set_reset);
668
674
case PVR_EVENT_TYPE_RESET:
669
return pvr_process_event_cmd_set_or_reset(device, queue, sub_cmd);
675
return pvr_process_event_cmd_reset(device, queue, &sub_cmd->set_reset);
670
676
case PVR_EVENT_TYPE_WAIT:
671
return pvr_process_event_cmd_wait(device, queue, sub_cmd);
677
return pvr_process_event_cmd_wait(device, queue, &sub_cmd->wait);
672
678
case PVR_EVENT_TYPE_BARRIER:
673
return pvr_process_event_cmd_barrier(device, queue, sub_cmd);
679
return pvr_process_event_cmd_barrier(device, queue, &sub_cmd->barrier);
675
681
unreachable("Invalid event sub-command type.");
689
695
switch (sub_cmd->type) {
690
696
case PVR_SUB_CMD_TYPE_GRAPHICS: {
697
/* If the fragment job utilizes occlusion queries, for data integrity
698
* it needs to wait for the occlusion query to be processed.
691
700
if (sub_cmd->gfx.has_occlusion_query) {
692
struct pvr_sub_cmd_event frag_to_transfer_barrier = {
693
.type = PVR_EVENT_TYPE_BARRIER,
695
.wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
696
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
700
/* If the fragment job utilizes occlusion queries, for data
701
* integrity it needs to wait for the occlusion query to be
705
result = pvr_process_event_cmd_barrier(device,
707
&frag_to_transfer_barrier);
701
struct pvr_sub_cmd_event_barrier barrier = {
702
.wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
703
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
706
result = pvr_process_event_cmd_barrier(device, queue, &barrier);
707
if (result != VK_SUCCESS)
711
if (sub_cmd->gfx.wait_on_previous_transfer) {
712
struct pvr_sub_cmd_event_barrier barrier = {
713
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
714
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
717
result = pvr_process_event_cmd_barrier(device, queue, &barrier);
708
718
if (result != VK_SUCCESS)
722
732
const bool serialize_with_frag = sub_cmd->transfer.serialize_with_frag;
724
734
if (serialize_with_frag) {
725
struct pvr_sub_cmd_event frag_to_transfer_barrier = {
726
.type = PVR_EVENT_TYPE_BARRIER,
728
.wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
729
.wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
735
struct pvr_sub_cmd_event_barrier barrier = {
736
.wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
737
.wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
733
result = pvr_process_event_cmd_barrier(device,
735
&frag_to_transfer_barrier);
740
result = pvr_process_event_cmd_barrier(device, queue, &barrier);
736
741
if (result != VK_SUCCESS)
740
745
result = pvr_process_transfer_cmds(device, queue, &sub_cmd->transfer);
742
747
if (serialize_with_frag) {
743
struct pvr_sub_cmd_event transfer_to_frag_barrier = {
744
.type = PVR_EVENT_TYPE_BARRIER,
746
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
747
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
748
struct pvr_sub_cmd_event_barrier barrier = {
749
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
750
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
751
753
if (result != VK_SUCCESS)
754
result = pvr_process_event_cmd_barrier(device,
756
&transfer_to_frag_barrier);
756
result = pvr_process_event_cmd_barrier(device, queue, &barrier);