64
64
struct drm_device *dev = ring->dev;
65
drm_i915_private_t *dev_priv = dev->dev_private;
70
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71
invalidate_domains, flush_domains);
74
trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75
invalidate_domains, flush_domains);
77
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
71
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
72
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73
* also flushed at 2d versus 3d pipeline switches.
77
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78
* MI_READ_FLUSH is set, and is always flushed on 965.
80
* I915_GEM_DOMAIN_COMMAND may not exist?
82
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83
* invalidated when MI_EXE_FLUSH is set.
85
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86
* invalidated with every MI_FLUSH.
90
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93
* are flushed at any MI_FLUSH.
96
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97
if ((invalidate_domains|flush_domains) &
98
I915_GEM_DOMAIN_RENDER)
99
cmd &= ~MI_NO_WRITE_FLUSH;
100
if (INTEL_INFO(dev)->gen < 4) {
81
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
82
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
83
* also flushed at 2d versus 3d pipeline switches.
87
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88
* MI_READ_FLUSH is set, and is always flushed on 965.
90
* I915_GEM_DOMAIN_COMMAND may not exist?
92
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93
* invalidated when MI_EXE_FLUSH is set.
95
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96
* invalidated with every MI_FLUSH.
100
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103
* are flushed at any MI_FLUSH.
102
* On the 965, the sampler cache always gets flushed
103
* and this bit is reserved.
106
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107
if ((invalidate_domains|flush_domains) &
108
I915_GEM_DOMAIN_RENDER)
109
cmd &= ~MI_NO_WRITE_FLUSH;
110
if (INTEL_INFO(dev)->gen < 4) {
112
* On the 965, the sampler cache always gets flushed
113
* and this bit is reserved.
115
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116
cmd |= MI_READ_FLUSH;
118
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122
(IS_G4X(dev) || IS_GEN5(dev)))
123
cmd |= MI_INVALIDATE_ISP;
126
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
128
ret = intel_ring_begin(ring, 2);
132
intel_ring_emit(ring, cmd);
133
intel_ring_emit(ring, MI_NOOP);
134
intel_ring_advance(ring);
105
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106
cmd |= MI_READ_FLUSH;
108
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112
(IS_G4X(dev) || IS_GEN5(dev)))
113
cmd |= MI_INVALIDATE_ISP;
115
ret = intel_ring_begin(ring, 2);
119
intel_ring_emit(ring, cmd);
120
intel_ring_emit(ring, MI_NOOP);
121
intel_ring_advance(ring);
566
552
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
554
struct drm_device *dev = ring->dev;
568
555
drm_i915_private_t *dev_priv = ring->dev->dev_private;
569
u32 mmio = IS_GEN6(ring->dev) ?
570
RING_HWS_PGA_GEN6(ring->mmio_base) :
571
RING_HWS_PGA(ring->mmio_base);
558
/* The ring status page addresses are no longer next to the rest of
559
* the ring registers as of gen7.
564
mmio = RENDER_HWS_PGA_GEN7;
567
mmio = BLT_HWS_PGA_GEN7;
570
mmio = BSD_HWS_PGA_GEN7;
573
} else if (IS_GEN6(ring->dev)) {
574
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
576
mmio = RING_HWS_PGA(ring->mmio_base);
572
579
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
573
580
POSTING_READ(mmio);
612
616
intel_ring_emit(ring, MI_USER_INTERRUPT);
613
617
intel_ring_advance(ring);
615
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
621
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
623
struct drm_device *dev = ring->dev;
624
drm_i915_private_t *dev_priv = dev->dev_private;
626
if (!dev->irq_enabled)
629
spin_lock(&ring->irq_lock);
630
if (ring->irq_refcount++ == 0)
631
ironlake_enable_irq(dev_priv, flag);
632
spin_unlock(&ring->irq_lock);
638
ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
640
struct drm_device *dev = ring->dev;
641
drm_i915_private_t *dev_priv = dev->dev_private;
643
spin_lock(&ring->irq_lock);
644
if (--ring->irq_refcount == 0)
645
ironlake_disable_irq(dev_priv, flag);
646
spin_unlock(&ring->irq_lock);
650
624
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
652
626
struct drm_device *dev = ring->dev;
685
659
bsd_ring_get_irq(struct intel_ring_buffer *ring)
687
return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
661
struct drm_device *dev = ring->dev;
662
drm_i915_private_t *dev_priv = dev->dev_private;
664
if (!dev->irq_enabled)
667
spin_lock(&ring->irq_lock);
668
if (ring->irq_refcount++ == 0) {
670
i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
672
ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
674
spin_unlock(&ring->irq_lock);
690
679
bsd_ring_put_irq(struct intel_ring_buffer *ring)
692
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
681
struct drm_device *dev = ring->dev;
682
drm_i915_private_t *dev_priv = dev->dev_private;
684
spin_lock(&ring->irq_lock);
685
if (--ring->irq_refcount == 0) {
687
i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
689
ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
691
spin_unlock(&ring->irq_lock);