~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/i915/intel_ringbuffer.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
62
62
                  u32   flush_domains)
63
63
{
64
64
        struct drm_device *dev = ring->dev;
65
 
        drm_i915_private_t *dev_priv = dev->dev_private;
66
65
        u32 cmd;
67
66
        int ret;
68
67
 
69
 
#if WATCH_EXEC
70
 
        DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71
 
                  invalidate_domains, flush_domains);
72
 
#endif
73
 
 
74
 
        trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75
 
                                     invalidate_domains, flush_domains);
76
 
 
77
 
        if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
 
68
        /*
 
69
         * read/write caches:
 
70
         *
 
71
         * I915_GEM_DOMAIN_RENDER is always invalidated, but is
 
72
         * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
 
73
         * also flushed at 2d versus 3d pipeline switches.
 
74
         *
 
75
         * read-only caches:
 
76
         *
 
77
         * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
 
78
         * MI_READ_FLUSH is set, and is always flushed on 965.
 
79
         *
 
80
         * I915_GEM_DOMAIN_COMMAND may not exist?
 
81
         *
 
82
         * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
 
83
         * invalidated when MI_EXE_FLUSH is set.
 
84
         *
 
85
         * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
 
86
         * invalidated with every MI_FLUSH.
 
87
         *
 
88
         * TLBs:
 
89
         *
 
90
         * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
 
91
         * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
 
92
         * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
 
93
         * are flushed at any MI_FLUSH.
 
94
         */
 
95
 
 
96
        cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 
97
        if ((invalidate_domains|flush_domains) &
 
98
            I915_GEM_DOMAIN_RENDER)
 
99
                cmd &= ~MI_NO_WRITE_FLUSH;
 
100
        if (INTEL_INFO(dev)->gen < 4) {
78
101
                /*
79
 
                 * read/write caches:
80
 
                 *
81
 
                 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82
 
                 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
83
 
                 * also flushed at 2d versus 3d pipeline switches.
84
 
                 *
85
 
                 * read-only caches:
86
 
                 *
87
 
                 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88
 
                 * MI_READ_FLUSH is set, and is always flushed on 965.
89
 
                 *
90
 
                 * I915_GEM_DOMAIN_COMMAND may not exist?
91
 
                 *
92
 
                 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93
 
                 * invalidated when MI_EXE_FLUSH is set.
94
 
                 *
95
 
                 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96
 
                 * invalidated with every MI_FLUSH.
97
 
                 *
98
 
                 * TLBs:
99
 
                 *
100
 
                 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101
 
                 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102
 
                 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103
 
                 * are flushed at any MI_FLUSH.
 
102
                 * On the 965, the sampler cache always gets flushed
 
103
                 * and this bit is reserved.
104
104
                 */
105
 
 
106
 
                cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107
 
                if ((invalidate_domains|flush_domains) &
108
 
                    I915_GEM_DOMAIN_RENDER)
109
 
                        cmd &= ~MI_NO_WRITE_FLUSH;
110
 
                if (INTEL_INFO(dev)->gen < 4) {
111
 
                        /*
112
 
                         * On the 965, the sampler cache always gets flushed
113
 
                         * and this bit is reserved.
114
 
                         */
115
 
                        if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116
 
                                cmd |= MI_READ_FLUSH;
117
 
                }
118
 
                if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119
 
                        cmd |= MI_EXE_FLUSH;
120
 
 
121
 
                if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122
 
                    (IS_G4X(dev) || IS_GEN5(dev)))
123
 
                        cmd |= MI_INVALIDATE_ISP;
124
 
 
125
 
#if WATCH_EXEC
126
 
                DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
127
 
#endif
128
 
                ret = intel_ring_begin(ring, 2);
129
 
                if (ret)
130
 
                        return ret;
131
 
 
132
 
                intel_ring_emit(ring, cmd);
133
 
                intel_ring_emit(ring, MI_NOOP);
134
 
                intel_ring_advance(ring);
 
105
                if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 
106
                        cmd |= MI_READ_FLUSH;
135
107
        }
 
108
        if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
 
109
                cmd |= MI_EXE_FLUSH;
 
110
 
 
111
        if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
 
112
            (IS_G4X(dev) || IS_GEN5(dev)))
 
113
                cmd |= MI_INVALIDATE_ISP;
 
114
 
 
115
        ret = intel_ring_begin(ring, 2);
 
116
        if (ret)
 
117
                return ret;
 
118
 
 
119
        intel_ring_emit(ring, cmd);
 
120
        intel_ring_emit(ring, MI_NOOP);
 
121
        intel_ring_advance(ring);
136
122
 
137
123
        return 0;
138
124
}
250
236
                ret = -ENOMEM;
251
237
                goto err;
252
238
        }
253
 
        obj->agp_type = AGP_USER_CACHED_MEMORY;
 
239
        obj->cache_level = I915_CACHE_LLC;
254
240
 
255
241
        ret = i915_gem_object_pin(obj, 4096, true);
256
242
        if (ret)
300
286
 
301
287
        if (INTEL_INFO(dev)->gen > 3) {
302
288
                int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
303
 
                if (IS_GEN6(dev))
 
289
                if (IS_GEN6(dev) || IS_GEN7(dev))
304
290
                        mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
305
291
                I915_WRITE(MI_MODE, mode);
306
292
        }
565
551
 
566
552
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
567
553
{
 
554
        struct drm_device *dev = ring->dev;
568
555
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
569
 
        u32 mmio = IS_GEN6(ring->dev) ?
570
 
                RING_HWS_PGA_GEN6(ring->mmio_base) :
571
 
                RING_HWS_PGA(ring->mmio_base);
 
556
        u32 mmio = 0;
 
557
 
 
558
        /* The ring status page addresses are no longer next to the rest of
 
559
         * the ring registers as of gen7.
 
560
         */
 
561
        if (IS_GEN7(dev)) {
 
562
                switch (ring->id) {
 
563
                case RING_RENDER:
 
564
                        mmio = RENDER_HWS_PGA_GEN7;
 
565
                        break;
 
566
                case RING_BLT:
 
567
                        mmio = BLT_HWS_PGA_GEN7;
 
568
                        break;
 
569
                case RING_BSD:
 
570
                        mmio = BSD_HWS_PGA_GEN7;
 
571
                        break;
 
572
                }
 
573
        } else if (IS_GEN6(ring->dev)) {
 
574
                mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
 
575
        } else {
 
576
                mmio = RING_HWS_PGA(ring->mmio_base);
 
577
        }
 
578
 
572
579
        I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
573
580
        POSTING_READ(mmio);
574
581
}
580
587
{
581
588
        int ret;
582
589
 
583
 
        if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
584
 
                return 0;
585
 
 
586
590
        ret = intel_ring_begin(ring, 2);
587
591
        if (ret)
588
592
                return ret;
612
616
        intel_ring_emit(ring, MI_USER_INTERRUPT);
613
617
        intel_ring_advance(ring);
614
618
 
615
 
        DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
616
619
        *result = seqno;
617
620
        return 0;
618
621
}
619
622
 
620
623
static bool
621
 
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
622
 
{
623
 
        struct drm_device *dev = ring->dev;
624
 
        drm_i915_private_t *dev_priv = dev->dev_private;
625
 
 
626
 
        if (!dev->irq_enabled)
627
 
               return false;
628
 
 
629
 
        spin_lock(&ring->irq_lock);
630
 
        if (ring->irq_refcount++ == 0)
631
 
                ironlake_enable_irq(dev_priv, flag);
632
 
        spin_unlock(&ring->irq_lock);
633
 
 
634
 
        return true;
635
 
}
636
 
 
637
 
static void
638
 
ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
639
 
{
640
 
        struct drm_device *dev = ring->dev;
641
 
        drm_i915_private_t *dev_priv = dev->dev_private;
642
 
 
643
 
        spin_lock(&ring->irq_lock);
644
 
        if (--ring->irq_refcount == 0)
645
 
                ironlake_disable_irq(dev_priv, flag);
646
 
        spin_unlock(&ring->irq_lock);
647
 
}
648
 
 
649
 
static bool
650
624
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
651
625
{
652
626
        struct drm_device *dev = ring->dev;
684
658
static bool
685
659
bsd_ring_get_irq(struct intel_ring_buffer *ring)
686
660
{
687
 
        return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
 
661
        struct drm_device *dev = ring->dev;
 
662
        drm_i915_private_t *dev_priv = dev->dev_private;
 
663
 
 
664
        if (!dev->irq_enabled)
 
665
                return false;
 
666
 
 
667
        spin_lock(&ring->irq_lock);
 
668
        if (ring->irq_refcount++ == 0) {
 
669
                if (IS_G4X(dev))
 
670
                        i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
 
671
                else
 
672
                        ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
 
673
        }
 
674
        spin_unlock(&ring->irq_lock);
 
675
 
 
676
        return true;
688
677
}
689
678
static void
690
679
bsd_ring_put_irq(struct intel_ring_buffer *ring)
691
680
{
692
 
        ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
 
681
        struct drm_device *dev = ring->dev;
 
682
        drm_i915_private_t *dev_priv = dev->dev_private;
 
683
 
 
684
        spin_lock(&ring->irq_lock);
 
685
        if (--ring->irq_refcount == 0) {
 
686
                if (IS_G4X(dev))
 
687
                        i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
 
688
                else
 
689
                        ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
 
690
        }
 
691
        spin_unlock(&ring->irq_lock);
693
692
}
694
693
 
695
694
static int
715
714
                                u32 offset, u32 len)
716
715
{
717
716
        struct drm_device *dev = ring->dev;
718
 
        drm_i915_private_t *dev_priv = dev->dev_private;
719
717
        int ret;
720
718
 
721
 
        trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
722
 
 
723
719
        if (IS_I830(dev) || IS_845G(dev)) {
724
720
                ret = intel_ring_begin(ring, 4);
725
721
                if (ret)
780
776
                ret = -ENOMEM;
781
777
                goto err;
782
778
        }
783
 
        obj->agp_type = AGP_USER_CACHED_MEMORY;
 
779
        obj->cache_level = I915_CACHE_LLC;
784
780
 
785
781
        ret = i915_gem_object_pin(obj, 4096, true);
786
782
        if (ret != 0) {
821
817
        INIT_LIST_HEAD(&ring->request_list);
822
818
        INIT_LIST_HEAD(&ring->gpu_write_list);
823
819
 
 
820
        init_waitqueue_head(&ring->irq_queue);
824
821
        spin_lock_init(&ring->irq_lock);
825
822
        ring->irq_mask = ~0;
826
823
 
893
890
 
894
891
        /* Disable the ring buffer. The ring must be idle at this point */
895
892
        dev_priv = ring->dev->dev_private;
896
 
        ret = intel_wait_ring_buffer(ring, ring->size - 8);
 
893
        ret = intel_wait_ring_idle(ring);
 
894
        if (ret)
 
895
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 
896
                          ring->name, ret);
 
897
 
897
898
        I915_WRITE_CTL(ring, 0);
898
899
 
899
900
        drm_core_ioremapfree(&ring->map, ring->dev);
950
951
                        return 0;
951
952
        }
952
953
 
953
 
        trace_i915_ring_wait_begin (dev);
 
954
        trace_i915_ring_wait_begin(ring);
954
955
        end = jiffies + 3 * HZ;
955
956
        do {
956
957
                ring->head = I915_READ_HEAD(ring);
957
958
                ring->space = ring_space(ring);
958
959
                if (ring->space >= n) {
959
 
                        trace_i915_ring_wait_end(dev);
 
960
                        trace_i915_ring_wait_end(ring);
960
961
                        return 0;
961
962
                }
962
963
 
970
971
                if (atomic_read(&dev_priv->mm.wedged))
971
972
                        return -EAGAIN;
972
973
        } while (!time_after(jiffies, end));
973
 
        trace_i915_ring_wait_end (dev);
 
974
        trace_i915_ring_wait_end(ring);
974
975
        return -EBUSY;
975
976
}
976
977
 
977
978
int intel_ring_begin(struct intel_ring_buffer *ring,
978
979
                     int num_dwords)
979
980
{
 
981
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
980
982
        int n = 4*num_dwords;
981
983
        int ret;
982
984
 
 
985
        if (unlikely(atomic_read(&dev_priv->mm.wedged)))
 
986
                return -EIO;
 
987
 
983
988
        if (unlikely(ring->tail + n > ring->effective_size)) {
984
989
                ret = intel_wrap_ring_buffer(ring);
985
990
                if (unlikely(ret))
1064
1069
        uint32_t cmd;
1065
1070
        int ret;
1066
1071
 
1067
 
        if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068
 
                return 0;
1069
 
 
1070
1072
        ret = intel_ring_begin(ring, 4);
1071
1073
        if (ret)
1072
1074
                return ret;
1238
1240
        uint32_t cmd;
1239
1241
        int ret;
1240
1242
 
1241
 
        if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1242
 
                return 0;
1243
 
 
1244
1243
        ret = blt_ring_begin(ring, 4);
1245
1244
        if (ret)
1246
1245
                return ret;
1352
1351
        drm_i915_private_t *dev_priv = dev->dev_private;
1353
1352
        struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1354
1353
 
1355
 
        if (IS_GEN6(dev))
 
1354
        if (IS_GEN6(dev) || IS_GEN7(dev))
1356
1355
                *ring = gen6_bsd_ring;
1357
1356
        else
1358
1357
                *ring = bsd_ring;