~mmach/netext73/mesa-ryzen

« back to all changes in this revision

Viewing changes to src/imagination/vulkan/pvr_queue.c

  • Committer: mmach
  • Date: 2023-11-02 21:31:35 UTC
  • Revision ID: netbit73@gmail.com-20231102213135-18d4tzh7tj0uz752
2023-11-02 22:11:57

Show diffs side-by-side

added added

removed removed

Lines of Context:
361
361
      return result;
362
362
 
363
363
   result =
364
 
      pvr_transfer_job_submit(device,
365
 
                              queue->transfer_ctx,
 
364
      pvr_transfer_job_submit(queue->transfer_ctx,
366
365
                              sub_cmd,
367
366
                              queue->next_job_wait_sync[PVR_JOB_TYPE_TRANSFER],
368
367
                              sync);
413
412
   return result;
414
413
}
415
414
 
416
 
static VkResult pvr_process_event_cmd_barrier(struct pvr_device *device,
417
 
                                              struct pvr_queue *queue,
418
 
                                              struct pvr_sub_cmd_event *sub_cmd)
 
415
static VkResult
 
416
pvr_process_event_cmd_barrier(struct pvr_device *device,
 
417
                              struct pvr_queue *queue,
 
418
                              struct pvr_sub_cmd_event_barrier *sub_cmd)
419
419
{
420
 
   const uint32_t src_mask = sub_cmd->barrier.wait_for_stage_mask;
421
 
   const uint32_t dst_mask = sub_cmd->barrier.wait_at_stage_mask;
 
420
   const uint32_t src_mask = sub_cmd->wait_for_stage_mask;
 
421
   const uint32_t dst_mask = sub_cmd->wait_at_stage_mask;
422
422
   struct vk_sync_wait wait_syncs[PVR_JOB_TYPE_MAX + 1];
423
423
   uint32_t src_wait_count = 0;
424
424
   VkResult result;
425
425
 
426
 
   assert(sub_cmd->type == PVR_EVENT_TYPE_BARRIER);
427
 
 
428
426
   assert(!(src_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
429
427
   assert(!(dst_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
430
428
 
490
488
static VkResult
491
489
pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
492
490
                                   struct pvr_queue *queue,
493
 
                                   struct pvr_sub_cmd_event *sub_cmd)
 
491
                                   struct pvr_sub_cmd_event_set_reset *sub_cmd,
 
492
                                   const enum pvr_event_state new_event_state)
494
493
{
495
494
   /* Not PVR_JOB_TYPE_MAX since that also includes
496
495
    * PVR_JOB_TYPE_OCCLUSION_QUERY so no stage in the src mask.
499
498
   struct vk_sync_signal signal;
500
499
   struct vk_sync *signal_sync;
501
500
 
502
 
   uint32_t wait_for_stage_mask;
503
501
   uint32_t wait_count = 0;
504
502
   VkResult result;
505
503
 
506
 
   assert(sub_cmd->type == PVR_EVENT_TYPE_SET ||
507
 
          sub_cmd->type == PVR_EVENT_TYPE_RESET);
508
 
 
509
 
   if (sub_cmd->type == PVR_EVENT_TYPE_SET)
510
 
      wait_for_stage_mask = sub_cmd->set.wait_for_stage_mask;
511
 
   else
512
 
      wait_for_stage_mask = sub_cmd->reset.wait_for_stage_mask;
513
 
 
514
 
   assert(!(wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
515
 
 
516
 
   u_foreach_bit (stage, wait_for_stage_mask) {
 
504
   assert(!(sub_cmd->wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
 
505
 
 
506
   u_foreach_bit (stage, sub_cmd->wait_for_stage_mask) {
517
507
      if (!queue->last_job_signal_sync[stage])
518
508
         continue;
519
509
 
545
535
      return result;
546
536
   }
547
537
 
548
 
   if (sub_cmd->type == PVR_EVENT_TYPE_SET) {
549
 
      if (sub_cmd->set.event->sync)
550
 
         vk_sync_destroy(&device->vk, sub_cmd->set.event->sync);
551
 
 
552
 
      sub_cmd->set.event->sync = signal_sync;
553
 
      sub_cmd->set.event->state = PVR_EVENT_STATE_SET_BY_DEVICE;
554
 
   } else {
555
 
      if (sub_cmd->reset.event->sync)
556
 
         vk_sync_destroy(&device->vk, sub_cmd->reset.event->sync);
557
 
 
558
 
      sub_cmd->reset.event->sync = signal_sync;
559
 
      sub_cmd->reset.event->state = PVR_EVENT_STATE_RESET_BY_DEVICE;
560
 
   }
 
538
   if (sub_cmd->event->sync)
 
539
      vk_sync_destroy(&device->vk, sub_cmd->event->sync);
 
540
 
 
541
   sub_cmd->event->sync = signal_sync;
 
542
   sub_cmd->event->state = new_event_state;
561
543
 
562
544
   return VK_SUCCESS;
563
545
}
564
546
 
 
547
static inline VkResult
 
548
pvr_process_event_cmd_set(struct pvr_device *device,
 
549
                          struct pvr_queue *queue,
 
550
                          struct pvr_sub_cmd_event_set_reset *sub_cmd)
 
551
{
 
552
   return pvr_process_event_cmd_set_or_reset(device,
 
553
                                             queue,
 
554
                                             sub_cmd,
 
555
                                             PVR_EVENT_STATE_SET_BY_DEVICE);
 
556
}
 
557
 
 
558
static inline VkResult
 
559
pvr_process_event_cmd_reset(struct pvr_device *device,
 
560
                            struct pvr_queue *queue,
 
561
                            struct pvr_sub_cmd_event_set_reset *sub_cmd)
 
562
{
 
563
   return pvr_process_event_cmd_set_or_reset(device,
 
564
                                             queue,
 
565
                                             sub_cmd,
 
566
                                             PVR_EVENT_STATE_RESET_BY_DEVICE);
 
567
}
 
568
 
565
569
/**
566
570
 * \brief Process an event sub command of wait type.
567
571
 *
578
582
 * \parma[in,out] per_cmd_buffer_syncobjs  Completion syncobjs for the command
579
583
 *                                         buffer being processed.
580
584
 */
581
 
static VkResult pvr_process_event_cmd_wait(struct pvr_device *device,
582
 
                                           struct pvr_queue *queue,
583
 
                                           struct pvr_sub_cmd_event *sub_cmd)
 
585
static VkResult
 
586
pvr_process_event_cmd_wait(struct pvr_device *device,
 
587
                           struct pvr_queue *queue,
 
588
                           struct pvr_sub_cmd_event_wait *sub_cmd)
584
589
{
585
590
   uint32_t dst_mask = 0;
586
591
   VkResult result;
587
592
 
588
 
   STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->wait.count + 1);
 
593
   STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->count + 1);
589
594
   if (!waits)
590
595
      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
591
596
 
592
 
   for (uint32_t i = 0; i < sub_cmd->wait.count; i++)
593
 
      dst_mask |= sub_cmd->wait.wait_at_stage_masks[i];
 
597
   for (uint32_t i = 0; i < sub_cmd->count; i++)
 
598
      dst_mask |= sub_cmd->wait_at_stage_masks[i];
594
599
 
595
600
   u_foreach_bit (stage, dst_mask) {
596
601
      struct vk_sync_signal signal;
597
602
      struct vk_sync *signal_sync;
598
603
      uint32_t wait_count = 0;
599
604
 
600
 
      for (uint32_t i = 0; i < sub_cmd->wait.count; i++) {
601
 
         if (sub_cmd->wait.wait_at_stage_masks[i] & stage) {
 
605
      for (uint32_t i = 0; i < sub_cmd->count; i++) {
 
606
         if (sub_cmd->wait_at_stage_masks[i] & stage) {
602
607
            waits[wait_count++] = (struct vk_sync_wait){
603
 
               .sync = sub_cmd->wait.events[i]->sync,
 
608
               .sync = sub_cmd->events[i]->sync,
604
609
               .stage_mask = ~(VkPipelineStageFlags2)0,
605
610
               .wait_value = 0,
606
611
            };
618
623
         };
619
624
      }
620
625
 
621
 
      assert(wait_count <= (sub_cmd->wait.count + 1));
 
626
      assert(wait_count <= (sub_cmd->count + 1));
622
627
 
623
628
      result = vk_sync_create(&device->vk,
624
629
                              &device->pdevice->ws->syncobj_type,
665
670
{
666
671
   switch (sub_cmd->type) {
667
672
   case PVR_EVENT_TYPE_SET:
 
673
      return pvr_process_event_cmd_set(device, queue, &sub_cmd->set_reset);
668
674
   case PVR_EVENT_TYPE_RESET:
669
 
      return pvr_process_event_cmd_set_or_reset(device, queue, sub_cmd);
 
675
      return pvr_process_event_cmd_reset(device, queue, &sub_cmd->set_reset);
670
676
   case PVR_EVENT_TYPE_WAIT:
671
 
      return pvr_process_event_cmd_wait(device, queue, sub_cmd);
 
677
      return pvr_process_event_cmd_wait(device, queue, &sub_cmd->wait);
672
678
   case PVR_EVENT_TYPE_BARRIER:
673
 
      return pvr_process_event_cmd_barrier(device, queue, sub_cmd);
 
679
      return pvr_process_event_cmd_barrier(device, queue, &sub_cmd->barrier);
674
680
   default:
675
681
      unreachable("Invalid event sub-command type.");
676
682
   };
688
694
                             link) {
689
695
      switch (sub_cmd->type) {
690
696
      case PVR_SUB_CMD_TYPE_GRAPHICS: {
 
697
         /* If the fragment job utilizes occlusion queries, for data integrity
 
698
          * it needs to wait for the occlusion query to be processed.
 
699
          */
691
700
         if (sub_cmd->gfx.has_occlusion_query) {
692
 
            struct pvr_sub_cmd_event frag_to_transfer_barrier = {
693
 
               .type = PVR_EVENT_TYPE_BARRIER,
694
 
               .barrier = {
695
 
                  .wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
696
 
                  .wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
697
 
               },
698
 
            };
699
 
 
700
 
            /* If the fragment job utilizes occlusion queries, for data
701
 
             * integrity it needs to wait for the occlusion query to be
702
 
             * processed.
703
 
             */
704
 
 
705
 
            result = pvr_process_event_cmd_barrier(device,
706
 
                                                   queue,
707
 
                                                   &frag_to_transfer_barrier);
 
701
            struct pvr_sub_cmd_event_barrier barrier = {
 
702
               .wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
 
703
               .wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
 
704
            };
 
705
 
 
706
            result = pvr_process_event_cmd_barrier(device, queue, &barrier);
 
707
            if (result != VK_SUCCESS)
 
708
               break;
 
709
         }
 
710
 
 
711
         if (sub_cmd->gfx.wait_on_previous_transfer) {
 
712
            struct pvr_sub_cmd_event_barrier barrier = {
 
713
               .wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
 
714
               .wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
 
715
            };
 
716
 
 
717
            result = pvr_process_event_cmd_barrier(device, queue, &barrier);
708
718
            if (result != VK_SUCCESS)
709
719
               break;
710
720
         }
722
732
         const bool serialize_with_frag = sub_cmd->transfer.serialize_with_frag;
723
733
 
724
734
         if (serialize_with_frag) {
725
 
            struct pvr_sub_cmd_event frag_to_transfer_barrier = {
726
 
               .type = PVR_EVENT_TYPE_BARRIER,
727
 
               .barrier = {
728
 
                  .wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
729
 
                  .wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
730
 
               },
 
735
            struct pvr_sub_cmd_event_barrier barrier = {
 
736
               .wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
 
737
               .wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
731
738
            };
732
739
 
733
 
            result = pvr_process_event_cmd_barrier(device,
734
 
                                                   queue,
735
 
                                                   &frag_to_transfer_barrier);
 
740
            result = pvr_process_event_cmd_barrier(device, queue, &barrier);
736
741
            if (result != VK_SUCCESS)
737
742
               break;
738
743
         }
740
745
         result = pvr_process_transfer_cmds(device, queue, &sub_cmd->transfer);
741
746
 
742
747
         if (serialize_with_frag) {
743
 
            struct pvr_sub_cmd_event transfer_to_frag_barrier = {
744
 
               .type = PVR_EVENT_TYPE_BARRIER,
745
 
               .barrier = {
746
 
                  .wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
747
 
                  .wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
748
 
               },
 
748
            struct pvr_sub_cmd_event_barrier barrier = {
 
749
               .wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
 
750
               .wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
749
751
            };
750
752
 
751
753
            if (result != VK_SUCCESS)
752
754
               break;
753
755
 
754
 
            result = pvr_process_event_cmd_barrier(device,
755
 
                                                   queue,
756
 
                                                   &transfer_to_frag_barrier);
 
756
            result = pvr_process_event_cmd_barrier(device, queue, &barrier);
757
757
         }
758
758
 
759
759
         break;