1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7
* Permission is hereby granted, free of charge, to any person obtaining a
8
* copy of this software and associated documentation files (the
9
* "Software"), to deal in the Software without restriction, including
10
* without limitation the rights to use, copy, modify, merge, publish,
11
* distribute, sub license, and/or sell copies of the Software, and to
12
* permit persons to whom the Software is furnished to do so, subject to
13
* the following conditions:
15
* The above copyright notice and this permission notice (including the
16
* next paragraph) shall be included in all copies or substantial portions
19
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
37
#include "i915_trace.h"
38
#include "intel_drv.h"
40
/* For display hotplug interrupt */
42
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
44
if ((dev_priv->irq_mask & mask) != 0) {
45
dev_priv->irq_mask &= ~mask;
46
I915_WRITE(DEIMR, dev_priv->irq_mask);
52
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
54
if ((dev_priv->irq_mask & mask) != mask) {
55
dev_priv->irq_mask |= mask;
56
I915_WRITE(DEIMR, dev_priv->irq_mask);
62
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
64
if ((dev_priv->pipestat[pipe] & mask) != mask) {
65
u32 reg = PIPESTAT(pipe);
67
dev_priv->pipestat[pipe] |= mask;
68
/* Enable the interrupt, clear any pending status */
69
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
75
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
77
if ((dev_priv->pipestat[pipe] & mask) != 0) {
78
u32 reg = PIPESTAT(pipe);
80
dev_priv->pipestat[pipe] &= ~mask;
81
I915_WRITE(reg, dev_priv->pipestat[pipe]);
87
* intel_enable_asle - enable ASLE interrupt for OpRegion
89
void intel_enable_asle(struct drm_device *dev)
91
drm_i915_private_t *dev_priv = dev->dev_private;
92
unsigned long irqflags;
94
/* FIXME: opregion/asle for VLV */
95
if (IS_VALLEYVIEW(dev))
98
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
100
if (HAS_PCH_SPLIT(dev))
101
ironlake_enable_display_irq(dev_priv, DE_GSE);
103
i915_enable_pipestat(dev_priv, 1,
104
PIPE_LEGACY_BLC_EVENT_ENABLE);
105
if (INTEL_INFO(dev)->gen >= 4)
106
i915_enable_pipestat(dev_priv, 0,
107
PIPE_LEGACY_BLC_EVENT_ENABLE);
110
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
114
* i915_pipe_enabled - check if a pipe is enabled
116
* @pipe: pipe to check
118
* Reading certain registers when the pipe is disabled can hang the chip.
119
* Use this routine to make sure the PLL is running and the pipe is active
120
* before reading such registers if unsure.
123
i915_pipe_enabled(struct drm_device *dev, int pipe)
125
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
126
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
129
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
132
/* Called from drm generic code, passed a 'crtc', which
133
* we use as a pipe index
135
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
137
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
138
unsigned long high_frame;
139
unsigned long low_frame;
140
u32 high1, high2, low;
142
if (!i915_pipe_enabled(dev, pipe)) {
143
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
144
"pipe %c\n", pipe_name(pipe));
148
high_frame = PIPEFRAME(pipe);
149
low_frame = PIPEFRAMEPIXEL(pipe);
152
* High & low register fields aren't synchronized, so make sure
153
* we get a low value that's stable across two reads of the high
157
high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
158
low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
159
high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
160
} while (high1 != high2);
162
high1 >>= PIPE_FRAME_HIGH_SHIFT;
163
low >>= PIPE_FRAME_LOW_SHIFT;
164
return (high1 << 8) | low;
167
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
169
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
170
int reg = PIPE_FRMCOUNT_GM45(pipe);
172
if (!i915_pipe_enabled(dev, pipe)) {
173
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
174
"pipe %c\n", pipe_name(pipe));
178
return I915_READ(reg);
181
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
182
int *vpos, int *hpos)
184
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
185
u32 vbl = 0, position = 0;
186
int vbl_start, vbl_end, htotal, vtotal;
189
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
192
if (!i915_pipe_enabled(dev, pipe)) {
193
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
194
"pipe %c\n", pipe_name(pipe));
199
vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
201
if (INTEL_INFO(dev)->gen >= 4) {
202
/* No obvious pixelcount register. Only query vertical
203
* scanout position from Display scan line register.
205
position = I915_READ(PIPEDSL(pipe));
207
/* Decode into vertical scanout position. Don't have
208
* horizontal scanout position.
210
*vpos = position & 0x1fff;
213
/* Have access to pixelcount since start of frame.
214
* We can split this into vertical and horizontal
217
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
219
htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
220
*vpos = position / htotal;
221
*hpos = position - (*vpos * htotal);
224
/* Query vblank area. */
225
vbl = I915_READ(VBLANK(cpu_transcoder));
227
/* Test position against vblank region. */
228
vbl_start = vbl & 0x1fff;
229
vbl_end = (vbl >> 16) & 0x1fff;
231
if ((*vpos < vbl_start) || (*vpos > vbl_end))
234
/* Inside "upper part" of vblank area? Apply corrective offset: */
235
if (in_vbl && (*vpos >= vbl_start))
236
*vpos = *vpos - vtotal;
238
/* Readouts valid? */
240
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
244
ret |= DRM_SCANOUTPOS_INVBL;
249
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
251
struct timeval *vblank_time,
254
struct drm_i915_private *dev_priv = dev->dev_private;
255
struct drm_crtc *crtc;
257
if (pipe < 0 || pipe >= dev_priv->num_pipe) {
258
DRM_ERROR("Invalid crtc %d\n", pipe);
262
/* Get drm_crtc to timestamp: */
263
crtc = intel_get_crtc_for_pipe(dev, pipe);
265
DRM_ERROR("Invalid crtc %d\n", pipe);
269
if (!crtc->enabled) {
270
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
274
/* Helper routine in DRM core does all the work: */
275
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
281
* Handle hotplug events outside the interrupt handler proper.
283
static void i915_hotplug_work_func(struct work_struct *work)
285
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
287
struct drm_device *dev = dev_priv->dev;
288
struct drm_mode_config *mode_config = &dev->mode_config;
289
struct intel_encoder *encoder;
291
mutex_lock(&mode_config->mutex);
292
DRM_DEBUG_KMS("running encoder hotplug functions\n");
294
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
295
if (encoder->hot_plug)
296
encoder->hot_plug(encoder);
298
mutex_unlock(&mode_config->mutex);
300
/* Just fire off a uevent and let userspace tell us what to do */
301
drm_helper_hpd_irq_event(dev);
304
/* defined intel_pm.c */
305
extern spinlock_t mchdev_lock;
307
static void ironlake_handle_rps_change(struct drm_device *dev)
309
drm_i915_private_t *dev_priv = dev->dev_private;
310
u32 busy_up, busy_down, max_avg, min_avg;
314
spin_lock_irqsave(&mchdev_lock, flags);
316
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
318
new_delay = dev_priv->ips.cur_delay;
320
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321
busy_up = I915_READ(RCPREVBSYTUPAVG);
322
busy_down = I915_READ(RCPREVBSYTDNAVG);
323
max_avg = I915_READ(RCBMAXAVG);
324
min_avg = I915_READ(RCBMINAVG);
326
/* Handle RCS change request from hw */
327
if (busy_up > max_avg) {
328
if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
329
new_delay = dev_priv->ips.cur_delay - 1;
330
if (new_delay < dev_priv->ips.max_delay)
331
new_delay = dev_priv->ips.max_delay;
332
} else if (busy_down < min_avg) {
333
if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
334
new_delay = dev_priv->ips.cur_delay + 1;
335
if (new_delay > dev_priv->ips.min_delay)
336
new_delay = dev_priv->ips.min_delay;
339
if (ironlake_set_drps(dev, new_delay))
340
dev_priv->ips.cur_delay = new_delay;
342
spin_unlock_irqrestore(&mchdev_lock, flags);
347
static void notify_ring(struct drm_device *dev,
348
struct intel_ring_buffer *ring)
350
struct drm_i915_private *dev_priv = dev->dev_private;
352
if (ring->obj == NULL)
355
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
357
wake_up_all(&ring->irq_queue);
358
if (i915_enable_hangcheck) {
359
dev_priv->hangcheck_count = 0;
360
mod_timer(&dev_priv->hangcheck_timer,
361
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
365
static void gen6_pm_rps_work(struct work_struct *work)
367
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
372
spin_lock_irq(&dev_priv->rps.lock);
373
pm_iir = dev_priv->rps.pm_iir;
374
dev_priv->rps.pm_iir = 0;
375
pm_imr = I915_READ(GEN6_PMIMR);
376
I915_WRITE(GEN6_PMIMR, 0);
377
spin_unlock_irq(&dev_priv->rps.lock);
379
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
382
mutex_lock(&dev_priv->rps.hw_lock);
384
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
385
new_delay = dev_priv->rps.cur_delay + 1;
387
new_delay = dev_priv->rps.cur_delay - 1;
389
/* sysfs frequency interfaces may have snuck in while servicing the
392
if (!(new_delay > dev_priv->rps.max_delay ||
393
new_delay < dev_priv->rps.min_delay)) {
394
gen6_set_rps(dev_priv->dev, new_delay);
397
mutex_unlock(&dev_priv->rps.hw_lock);
402
* ivybridge_parity_work - Workqueue called when a parity error interrupt
404
* @work: workqueue struct
406
* Doesn't actually do anything except notify userspace. As a consequence of
407
* this event, userspace should try to remap the bad rows since statistically
408
* it is likely the same row is more likely to go bad again.
410
static void ivybridge_parity_work(struct work_struct *work)
412
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
413
l3_parity.error_work);
414
u32 error_status, row, bank, subbank;
415
char *parity_event[5];
419
/* We must turn off DOP level clock gating to access the L3 registers.
420
* In order to prevent a get/put style interface, acquire struct mutex
421
* any time we access those registers.
423
mutex_lock(&dev_priv->dev->struct_mutex);
425
misccpctl = I915_READ(GEN7_MISCCPCTL);
426
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
427
POSTING_READ(GEN7_MISCCPCTL);
429
error_status = I915_READ(GEN7_L3CDERRST1);
430
row = GEN7_PARITY_ERROR_ROW(error_status);
431
bank = GEN7_PARITY_ERROR_BANK(error_status);
432
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
434
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
435
GEN7_L3CDERRST1_ENABLE);
436
POSTING_READ(GEN7_L3CDERRST1);
438
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
440
spin_lock_irqsave(&dev_priv->irq_lock, flags);
441
dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
442
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
443
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
445
mutex_unlock(&dev_priv->dev->struct_mutex);
447
parity_event[0] = "L3_PARITY_ERROR=1";
448
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
449
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
450
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
451
parity_event[4] = NULL;
453
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
454
KOBJ_CHANGE, parity_event);
456
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
459
kfree(parity_event[3]);
460
kfree(parity_event[2]);
461
kfree(parity_event[1]);
464
static void ivybridge_handle_parity_error(struct drm_device *dev)
466
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
469
if (!HAS_L3_GPU_CACHE(dev))
472
spin_lock_irqsave(&dev_priv->irq_lock, flags);
473
dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
474
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
475
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
477
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
480
static void snb_gt_irq_handler(struct drm_device *dev,
481
struct drm_i915_private *dev_priv,
485
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
486
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
487
notify_ring(dev, &dev_priv->ring[RCS]);
488
if (gt_iir & GEN6_BSD_USER_INTERRUPT)
489
notify_ring(dev, &dev_priv->ring[VCS]);
490
if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
491
notify_ring(dev, &dev_priv->ring[BCS]);
493
if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
494
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
495
GT_RENDER_CS_ERROR_INTERRUPT)) {
496
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
497
i915_handle_error(dev, false);
500
if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
501
ivybridge_handle_parity_error(dev);
504
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
510
* IIR bits should never already be set because IMR should
511
* prevent an interrupt from being shown in IIR. The warning
512
* displays a case where we've unsafely cleared
513
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
514
* type is not a problem, it displays a problem in the logic.
516
* The mask bit in IMR is cleared by dev_priv->rps.work.
519
spin_lock_irqsave(&dev_priv->rps.lock, flags);
520
dev_priv->rps.pm_iir |= pm_iir;
521
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
522
POSTING_READ(GEN6_PMIMR);
523
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
525
queue_work(dev_priv->wq, &dev_priv->rps.work);
528
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
530
struct drm_device *dev = (struct drm_device *) arg;
531
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
532
u32 iir, gt_iir, pm_iir;
533
irqreturn_t ret = IRQ_NONE;
534
unsigned long irqflags;
536
u32 pipe_stats[I915_MAX_PIPES];
539
atomic_inc(&dev_priv->irq_received);
542
iir = I915_READ(VLV_IIR);
543
gt_iir = I915_READ(GTIIR);
544
pm_iir = I915_READ(GEN6_PMIIR);
546
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
551
snb_gt_irq_handler(dev, dev_priv, gt_iir);
553
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
554
for_each_pipe(pipe) {
555
int reg = PIPESTAT(pipe);
556
pipe_stats[pipe] = I915_READ(reg);
559
* Clear the PIPE*STAT regs before the IIR
561
if (pipe_stats[pipe] & 0x8000ffff) {
562
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
563
DRM_DEBUG_DRIVER("pipe %c underrun\n",
565
I915_WRITE(reg, pipe_stats[pipe]);
568
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
570
for_each_pipe(pipe) {
571
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
572
drm_handle_vblank(dev, pipe);
574
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
575
intel_prepare_page_flip(dev, pipe);
576
intel_finish_page_flip(dev, pipe);
580
/* Consume port. Then clear IIR or we'll miss events */
581
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
582
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
584
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
586
if (hotplug_status & dev_priv->hotplug_supported_mask)
587
queue_work(dev_priv->wq,
588
&dev_priv->hotplug_work);
590
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
591
I915_READ(PORT_HOTPLUG_STAT);
594
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
597
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
598
gen6_queue_rps_work(dev_priv, pm_iir);
600
I915_WRITE(GTIIR, gt_iir);
601
I915_WRITE(GEN6_PMIIR, pm_iir);
602
I915_WRITE(VLV_IIR, iir);
609
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
611
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
614
if (pch_iir & SDE_HOTPLUG_MASK)
615
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
617
if (pch_iir & SDE_AUDIO_POWER_MASK)
618
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
619
(pch_iir & SDE_AUDIO_POWER_MASK) >>
620
SDE_AUDIO_POWER_SHIFT);
622
if (pch_iir & SDE_GMBUS)
623
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
625
if (pch_iir & SDE_AUDIO_HDCP_MASK)
626
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
628
if (pch_iir & SDE_AUDIO_TRANS_MASK)
629
DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
631
if (pch_iir & SDE_POISON)
632
DRM_ERROR("PCH poison interrupt\n");
634
if (pch_iir & SDE_FDI_MASK)
636
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
638
I915_READ(FDI_RX_IIR(pipe)));
640
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
641
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
643
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
644
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
646
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
647
DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
648
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
649
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
652
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
654
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
657
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
658
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
660
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
661
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
662
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
663
SDE_AUDIO_POWER_SHIFT_CPT);
665
if (pch_iir & SDE_AUX_MASK_CPT)
666
DRM_DEBUG_DRIVER("AUX channel interrupt\n");
668
if (pch_iir & SDE_GMBUS_CPT)
669
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
671
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
672
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
674
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
675
DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
677
if (pch_iir & SDE_FDI_MASK_CPT)
679
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
681
I915_READ(FDI_RX_IIR(pipe)));
684
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
686
struct drm_device *dev = (struct drm_device *) arg;
687
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
688
u32 de_iir, gt_iir, de_ier, pm_iir;
689
irqreturn_t ret = IRQ_NONE;
692
atomic_inc(&dev_priv->irq_received);
694
/* disable master interrupt before clearing iir */
695
de_ier = I915_READ(DEIER);
696
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
698
gt_iir = I915_READ(GTIIR);
700
snb_gt_irq_handler(dev, dev_priv, gt_iir);
701
I915_WRITE(GTIIR, gt_iir);
705
de_iir = I915_READ(DEIIR);
707
if (de_iir & DE_GSE_IVB)
708
intel_opregion_gse_intr(dev);
710
for (i = 0; i < 3; i++) {
711
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
712
drm_handle_vblank(dev, i);
713
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
714
intel_prepare_page_flip(dev, i);
715
intel_finish_page_flip_plane(dev, i);
719
/* check event from PCH */
720
if (de_iir & DE_PCH_EVENT_IVB) {
721
u32 pch_iir = I915_READ(SDEIIR);
723
cpt_irq_handler(dev, pch_iir);
725
/* clear PCH hotplug event before clear CPU irq */
726
I915_WRITE(SDEIIR, pch_iir);
729
I915_WRITE(DEIIR, de_iir);
733
pm_iir = I915_READ(GEN6_PMIIR);
735
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
736
gen6_queue_rps_work(dev_priv, pm_iir);
737
I915_WRITE(GEN6_PMIIR, pm_iir);
741
I915_WRITE(DEIER, de_ier);
747
static void ilk_gt_irq_handler(struct drm_device *dev,
748
struct drm_i915_private *dev_priv,
751
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
752
notify_ring(dev, &dev_priv->ring[RCS]);
753
if (gt_iir & GT_BSD_USER_INTERRUPT)
754
notify_ring(dev, &dev_priv->ring[VCS]);
757
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
759
struct drm_device *dev = (struct drm_device *) arg;
760
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
762
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
764
atomic_inc(&dev_priv->irq_received);
766
/* disable master interrupt before clearing iir */
767
de_ier = I915_READ(DEIER);
768
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
771
de_iir = I915_READ(DEIIR);
772
gt_iir = I915_READ(GTIIR);
773
pch_iir = I915_READ(SDEIIR);
774
pm_iir = I915_READ(GEN6_PMIIR);
776
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
777
(!IS_GEN6(dev) || pm_iir == 0))
783
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
785
snb_gt_irq_handler(dev, dev_priv, gt_iir);
788
intel_opregion_gse_intr(dev);
790
if (de_iir & DE_PIPEA_VBLANK)
791
drm_handle_vblank(dev, 0);
793
if (de_iir & DE_PIPEB_VBLANK)
794
drm_handle_vblank(dev, 1);
796
if (de_iir & DE_PLANEA_FLIP_DONE) {
797
intel_prepare_page_flip(dev, 0);
798
intel_finish_page_flip_plane(dev, 0);
801
if (de_iir & DE_PLANEB_FLIP_DONE) {
802
intel_prepare_page_flip(dev, 1);
803
intel_finish_page_flip_plane(dev, 1);
806
/* check event from PCH */
807
if (de_iir & DE_PCH_EVENT) {
808
if (HAS_PCH_CPT(dev))
809
cpt_irq_handler(dev, pch_iir);
811
ibx_irq_handler(dev, pch_iir);
814
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
815
ironlake_handle_rps_change(dev);
817
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
818
gen6_queue_rps_work(dev_priv, pm_iir);
820
/* should clear PCH hotplug event before clear CPU irq */
821
I915_WRITE(SDEIIR, pch_iir);
822
I915_WRITE(GTIIR, gt_iir);
823
I915_WRITE(DEIIR, de_iir);
824
I915_WRITE(GEN6_PMIIR, pm_iir);
827
I915_WRITE(DEIER, de_ier);
834
* i915_error_work_func - do process context error handling work
837
* Fire an error uevent so userspace can see that a hang or error
840
static void i915_error_work_func(struct work_struct *work)
842
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
844
struct drm_device *dev = dev_priv->dev;
845
char *error_event[] = { "ERROR=1", NULL };
846
char *reset_event[] = { "RESET=1", NULL };
847
char *reset_done_event[] = { "ERROR=0", NULL };
849
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
851
if (atomic_read(&dev_priv->mm.wedged)) {
852
DRM_DEBUG_DRIVER("resetting chip\n");
853
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
854
if (!i915_reset(dev)) {
855
atomic_set(&dev_priv->mm.wedged, 0);
856
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
858
complete_all(&dev_priv->error_completion);
862
/* NB: please notice the memset */
863
static void i915_get_extra_instdone(struct drm_device *dev,
866
struct drm_i915_private *dev_priv = dev->dev_private;
867
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
869
switch(INTEL_INFO(dev)->gen) {
872
instdone[0] = I915_READ(INSTDONE);
877
instdone[0] = I915_READ(INSTDONE_I965);
878
instdone[1] = I915_READ(INSTDONE1);
881
WARN_ONCE(1, "Unsupported platform\n");
883
instdone[0] = I915_READ(GEN7_INSTDONE_1);
884
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
885
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
886
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
891
#ifdef CONFIG_DEBUG_FS
892
static struct drm_i915_error_object *
893
i915_error_object_create(struct drm_i915_private *dev_priv,
894
struct drm_i915_gem_object *src)
896
struct drm_i915_error_object *dst;
900
if (src == NULL || src->pages == NULL)
903
count = src->base.size / PAGE_SIZE;
905
dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
909
reloc_offset = src->gtt_offset;
910
for (i = 0; i < count; i++) {
914
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
918
local_irq_save(flags);
919
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
920
src->has_global_gtt_mapping) {
923
/* Simply ignore tiling or any overlapping fence.
924
* It's part of the error state, and this hopefully
925
* captures what the GPU read.
928
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
930
memcpy_fromio(d, s, PAGE_SIZE);
931
io_mapping_unmap_atomic(s);
936
page = i915_gem_object_get_page(src, i);
938
drm_clflush_pages(&page, 1);
940
s = kmap_atomic(page);
941
memcpy(d, s, PAGE_SIZE);
944
drm_clflush_pages(&page, 1);
946
local_irq_restore(flags);
950
reloc_offset += PAGE_SIZE;
952
dst->page_count = count;
953
dst->gtt_offset = src->gtt_offset;
959
kfree(dst->pages[i]);
965
i915_error_object_free(struct drm_i915_error_object *obj)
972
for (page = 0; page < obj->page_count; page++)
973
kfree(obj->pages[page]);
979
i915_error_state_free(struct kref *error_ref)
981
struct drm_i915_error_state *error = container_of(error_ref,
982
typeof(*error), ref);
985
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
986
i915_error_object_free(error->ring[i].batchbuffer);
987
i915_error_object_free(error->ring[i].ringbuffer);
988
kfree(error->ring[i].requests);
991
kfree(error->active_bo);
992
kfree(error->overlay);
995
static void capture_bo(struct drm_i915_error_buffer *err,
996
struct drm_i915_gem_object *obj)
998
err->size = obj->base.size;
999
err->name = obj->base.name;
1000
err->rseqno = obj->last_read_seqno;
1001
err->wseqno = obj->last_write_seqno;
1002
err->gtt_offset = obj->gtt_offset;
1003
err->read_domains = obj->base.read_domains;
1004
err->write_domain = obj->base.write_domain;
1005
err->fence_reg = obj->fence_reg;
1007
if (obj->pin_count > 0)
1009
if (obj->user_pin_count > 0)
1011
err->tiling = obj->tiling_mode;
1012
err->dirty = obj->dirty;
1013
err->purgeable = obj->madv != I915_MADV_WILLNEED;
1014
err->ring = obj->ring ? obj->ring->id : -1;
1015
err->cache_level = obj->cache_level;
1018
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1019
int count, struct list_head *head)
1021
struct drm_i915_gem_object *obj;
1024
list_for_each_entry(obj, head, mm_list) {
1025
capture_bo(err++, obj);
1033
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1034
int count, struct list_head *head)
1036
struct drm_i915_gem_object *obj;
1039
list_for_each_entry(obj, head, gtt_list) {
1040
if (obj->pin_count == 0)
1043
capture_bo(err++, obj);
1051
static void i915_gem_record_fences(struct drm_device *dev,
1052
struct drm_i915_error_state *error)
1054
struct drm_i915_private *dev_priv = dev->dev_private;
1058
switch (INTEL_INFO(dev)->gen) {
1061
for (i = 0; i < 16; i++)
1062
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1066
for (i = 0; i < 16; i++)
1067
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1070
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1071
for (i = 0; i < 8; i++)
1072
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1074
for (i = 0; i < 8; i++)
1075
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1081
static struct drm_i915_error_object *
1082
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1083
struct intel_ring_buffer *ring)
1085
struct drm_i915_gem_object *obj;
1088
if (!ring->get_seqno)
1091
seqno = ring->get_seqno(ring, false);
1092
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1093
if (obj->ring != ring)
1096
if (i915_seqno_passed(seqno, obj->last_read_seqno))
1099
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1102
/* We need to copy these to an anonymous buffer as the simplest
1103
* method to avoid being overwritten by userspace.
1105
return i915_error_object_create(dev_priv, obj);
1111
static void i915_record_ring_state(struct drm_device *dev,
1112
struct drm_i915_error_state *error,
1113
struct intel_ring_buffer *ring)
1115
struct drm_i915_private *dev_priv = dev->dev_private;
1117
if (INTEL_INFO(dev)->gen >= 6) {
1118
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1119
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1120
error->semaphore_mboxes[ring->id][0]
1121
= I915_READ(RING_SYNC_0(ring->mmio_base));
1122
error->semaphore_mboxes[ring->id][1]
1123
= I915_READ(RING_SYNC_1(ring->mmio_base));
1126
if (INTEL_INFO(dev)->gen >= 4) {
1127
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1128
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1129
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1130
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1131
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1132
if (ring->id == RCS)
1133
error->bbaddr = I915_READ64(BB_ADDR);
1135
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1136
error->ipeir[ring->id] = I915_READ(IPEIR);
1137
error->ipehr[ring->id] = I915_READ(IPEHR);
1138
error->instdone[ring->id] = I915_READ(INSTDONE);
1141
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1142
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1143
error->seqno[ring->id] = ring->get_seqno(ring, false);
1144
error->acthd[ring->id] = intel_ring_get_active_head(ring);
1145
error->head[ring->id] = I915_READ_HEAD(ring);
1146
error->tail[ring->id] = I915_READ_TAIL(ring);
1148
error->cpu_ring_head[ring->id] = ring->head;
1149
error->cpu_ring_tail[ring->id] = ring->tail;
1152
static void i915_gem_record_rings(struct drm_device *dev,
1153
struct drm_i915_error_state *error)
1155
struct drm_i915_private *dev_priv = dev->dev_private;
1156
struct intel_ring_buffer *ring;
1157
struct drm_i915_gem_request *request;
1160
for_each_ring(ring, dev_priv, i) {
1161
i915_record_ring_state(dev, error, ring);
1163
error->ring[i].batchbuffer =
1164
i915_error_first_batchbuffer(dev_priv, ring);
1166
error->ring[i].ringbuffer =
1167
i915_error_object_create(dev_priv, ring->obj);
1170
list_for_each_entry(request, &ring->request_list, list)
1173
error->ring[i].num_requests = count;
1174
error->ring[i].requests =
1175
kmalloc(count*sizeof(struct drm_i915_error_request),
1177
if (error->ring[i].requests == NULL) {
1178
error->ring[i].num_requests = 0;
1183
list_for_each_entry(request, &ring->request_list, list) {
1184
struct drm_i915_error_request *erq;
1186
erq = &error->ring[i].requests[count++];
1187
erq->seqno = request->seqno;
1188
erq->jiffies = request->emitted_jiffies;
1189
erq->tail = request->tail;
1195
* i915_capture_error_state - capture an error record for later analysis
1198
* Should be called when an error is detected (either a hang or an error
1199
* interrupt) to capture error state from the time of the error. Fills
1200
* out a structure which becomes available in debugfs for user level tools
1203
static void i915_capture_error_state(struct drm_device *dev)
1205
struct drm_i915_private *dev_priv = dev->dev_private;
1206
struct drm_i915_gem_object *obj;
1207
struct drm_i915_error_state *error;
1208
unsigned long flags;
1211
spin_lock_irqsave(&dev_priv->error_lock, flags);
1212
error = dev_priv->first_error;
1213
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1217
/* Account for pipe specific data like PIPE*STAT */
1218
error = kzalloc(sizeof(*error), GFP_ATOMIC);
1220
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1224
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1225
dev->primary->index);
1227
kref_init(&error->ref);
1228
error->eir = I915_READ(EIR);
1229
error->pgtbl_er = I915_READ(PGTBL_ER);
1230
error->ccid = I915_READ(CCID);
1232
if (HAS_PCH_SPLIT(dev))
1233
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1234
else if (IS_VALLEYVIEW(dev))
1235
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1236
else if (IS_GEN2(dev))
1237
error->ier = I915_READ16(IER);
1239
error->ier = I915_READ(IER);
1242
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1244
if (INTEL_INFO(dev)->gen >= 6) {
1245
error->error = I915_READ(ERROR_GEN6);
1246
error->done_reg = I915_READ(DONE_REG);
1249
if (INTEL_INFO(dev)->gen == 7)
1250
error->err_int = I915_READ(GEN7_ERR_INT);
1252
i915_get_extra_instdone(dev, error->extra_instdone);
1254
i915_gem_record_fences(dev, error);
1255
i915_gem_record_rings(dev, error);
1257
/* Record buffers on the active and pinned lists. */
1258
error->active_bo = NULL;
1259
error->pinned_bo = NULL;
1262
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1264
error->active_bo_count = i;
1265
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1268
error->pinned_bo_count = i - error->active_bo_count;
1270
error->active_bo = NULL;
1271
error->pinned_bo = NULL;
1273
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1275
if (error->active_bo)
1277
error->active_bo + error->active_bo_count;
1280
if (error->active_bo)
1281
error->active_bo_count =
1282
capture_active_bo(error->active_bo,
1283
error->active_bo_count,
1284
&dev_priv->mm.active_list);
1286
if (error->pinned_bo)
1287
error->pinned_bo_count =
1288
capture_pinned_bo(error->pinned_bo,
1289
error->pinned_bo_count,
1290
&dev_priv->mm.bound_list);
1292
do_gettimeofday(&error->time);
1294
error->overlay = intel_overlay_capture_error_state(dev);
1295
error->display = intel_display_capture_error_state(dev);
1297
spin_lock_irqsave(&dev_priv->error_lock, flags);
1298
if (dev_priv->first_error == NULL) {
1299
dev_priv->first_error = error;
1302
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1305
i915_error_state_free(&error->ref);
1308
void i915_destroy_error_state(struct drm_device *dev)
1310
struct drm_i915_private *dev_priv = dev->dev_private;
1311
struct drm_i915_error_state *error;
1312
unsigned long flags;
1314
spin_lock_irqsave(&dev_priv->error_lock, flags);
1315
error = dev_priv->first_error;
1316
dev_priv->first_error = NULL;
1317
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1320
kref_put(&error->ref, i915_error_state_free);
1323
#define i915_capture_error_state(x)
1326
static void i915_report_and_clear_eir(struct drm_device *dev)
1328
struct drm_i915_private *dev_priv = dev->dev_private;
1329
uint32_t instdone[I915_NUM_INSTDONE_REG];
1330
u32 eir = I915_READ(EIR);
1336
pr_err("render error detected, EIR: 0x%08x\n", eir);
1338
i915_get_extra_instdone(dev, instdone);
1341
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1342
u32 ipeir = I915_READ(IPEIR_I965);
1344
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1345
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1346
for (i = 0; i < ARRAY_SIZE(instdone); i++)
1347
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1348
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1349
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1350
I915_WRITE(IPEIR_I965, ipeir);
1351
POSTING_READ(IPEIR_I965);
1353
if (eir & GM45_ERROR_PAGE_TABLE) {
1354
u32 pgtbl_err = I915_READ(PGTBL_ER);
1355
pr_err("page table error\n");
1356
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1357
I915_WRITE(PGTBL_ER, pgtbl_err);
1358
POSTING_READ(PGTBL_ER);
1362
if (!IS_GEN2(dev)) {
1363
if (eir & I915_ERROR_PAGE_TABLE) {
1364
u32 pgtbl_err = I915_READ(PGTBL_ER);
1365
pr_err("page table error\n");
1366
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1367
I915_WRITE(PGTBL_ER, pgtbl_err);
1368
POSTING_READ(PGTBL_ER);
1372
if (eir & I915_ERROR_MEMORY_REFRESH) {
1373
pr_err("memory refresh error:\n");
1375
pr_err("pipe %c stat: 0x%08x\n",
1376
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1377
/* pipestat has already been acked */
1379
if (eir & I915_ERROR_INSTRUCTION) {
1380
pr_err("instruction error\n");
1381
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1382
for (i = 0; i < ARRAY_SIZE(instdone); i++)
1383
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1384
if (INTEL_INFO(dev)->gen < 4) {
1385
u32 ipeir = I915_READ(IPEIR);
1387
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1388
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1389
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1390
I915_WRITE(IPEIR, ipeir);
1391
POSTING_READ(IPEIR);
1393
u32 ipeir = I915_READ(IPEIR_I965);
1395
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1396
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1397
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1398
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1399
I915_WRITE(IPEIR_I965, ipeir);
1400
POSTING_READ(IPEIR_I965);
1404
I915_WRITE(EIR, eir);
1406
eir = I915_READ(EIR);
1409
* some errors might have become stuck,
1412
DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1413
I915_WRITE(EMR, I915_READ(EMR) | eir);
1414
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1419
* i915_handle_error - handle an error interrupt
1422
* Do some basic checking of regsiter state at error interrupt time and
1423
* dump it to the syslog. Also call i915_capture_error_state() to make
1424
* sure we get a record and make it available in debugfs. Fire a uevent
1425
* so userspace knows something bad happened (should trigger collection
1426
* of a ring dump etc.).
1428
void i915_handle_error(struct drm_device *dev, bool wedged)
1430
struct drm_i915_private *dev_priv = dev->dev_private;
1431
struct intel_ring_buffer *ring;
1434
i915_capture_error_state(dev);
1435
i915_report_and_clear_eir(dev);
1438
INIT_COMPLETION(dev_priv->error_completion);
1439
atomic_set(&dev_priv->mm.wedged, 1);
1442
* Wakeup waiting processes so they don't hang
1444
for_each_ring(ring, dev_priv, i)
1445
wake_up_all(&ring->irq_queue);
1448
queue_work(dev_priv->wq, &dev_priv->error_work);
1451
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1453
drm_i915_private_t *dev_priv = dev->dev_private;
1454
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1455
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1456
struct drm_i915_gem_object *obj;
1457
struct intel_unpin_work *work;
1458
unsigned long flags;
1459
bool stall_detected;
1461
/* Ignore early vblank irqs */
1462
if (intel_crtc == NULL)
1465
spin_lock_irqsave(&dev->event_lock, flags);
1466
work = intel_crtc->unpin_work;
1468
if (work == NULL || work->pending || !work->enable_stall_check) {
1469
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1470
spin_unlock_irqrestore(&dev->event_lock, flags);
1474
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1475
obj = work->pending_flip_obj;
1476
if (INTEL_INFO(dev)->gen >= 4) {
1477
int dspsurf = DSPSURF(intel_crtc->plane);
1478
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1481
int dspaddr = DSPADDR(intel_crtc->plane);
1482
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1483
crtc->y * crtc->fb->pitches[0] +
1484
crtc->x * crtc->fb->bits_per_pixel/8);
1487
spin_unlock_irqrestore(&dev->event_lock, flags);
1489
if (stall_detected) {
1490
DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1491
intel_prepare_page_flip(dev, intel_crtc->plane);
1495
/* Called from drm generic code, passed 'crtc' which
1496
* we use as a pipe index
1498
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1500
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1501
unsigned long irqflags;
1503
if (!i915_pipe_enabled(dev, pipe))
1506
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1507
if (INTEL_INFO(dev)->gen >= 4)
1508
i915_enable_pipestat(dev_priv, pipe,
1509
PIPE_START_VBLANK_INTERRUPT_ENABLE);
1511
i915_enable_pipestat(dev_priv, pipe,
1512
PIPE_VBLANK_INTERRUPT_ENABLE);
1514
/* maintain vblank delivery even in deep C-states */
1515
if (dev_priv->info->gen == 3)
1516
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1517
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1522
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1524
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1525
unsigned long irqflags;
1527
if (!i915_pipe_enabled(dev, pipe))
1530
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1531
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1532
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1533
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1538
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1540
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1541
unsigned long irqflags;
1543
if (!i915_pipe_enabled(dev, pipe))
1546
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1547
ironlake_enable_display_irq(dev_priv,
1548
DE_PIPEA_VBLANK_IVB << (5 * pipe));
1549
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1554
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1556
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1557
unsigned long irqflags;
1560
if (!i915_pipe_enabled(dev, pipe))
1563
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1564
imr = I915_READ(VLV_IMR);
1566
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1568
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1569
I915_WRITE(VLV_IMR, imr);
1570
i915_enable_pipestat(dev_priv, pipe,
1571
PIPE_START_VBLANK_INTERRUPT_ENABLE);
1572
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1577
/* Called from drm generic code, passed 'crtc' which
1578
* we use as a pipe index
1580
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1582
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1583
unsigned long irqflags;
1585
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1586
if (dev_priv->info->gen == 3)
1587
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1589
i915_disable_pipestat(dev_priv, pipe,
1590
PIPE_VBLANK_INTERRUPT_ENABLE |
1591
PIPE_START_VBLANK_INTERRUPT_ENABLE);
1592
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1595
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1597
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1598
unsigned long irqflags;
1600
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1601
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1602
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1603
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1606
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1608
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1609
unsigned long irqflags;
1611
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1612
ironlake_disable_display_irq(dev_priv,
1613
DE_PIPEA_VBLANK_IVB << (pipe * 5));
1614
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1617
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1619
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1620
unsigned long irqflags;
1623
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1624
i915_disable_pipestat(dev_priv, pipe,
1625
PIPE_START_VBLANK_INTERRUPT_ENABLE);
1626
imr = I915_READ(VLV_IMR);
1628
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1630
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1631
I915_WRITE(VLV_IMR, imr);
1632
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1636
ring_last_seqno(struct intel_ring_buffer *ring)
1638
return list_entry(ring->request_list.prev,
1639
struct drm_i915_gem_request, list)->seqno;
1642
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1644
if (list_empty(&ring->request_list) ||
1645
i915_seqno_passed(ring->get_seqno(ring, false),
1646
ring_last_seqno(ring))) {
1647
/* Issue a wake-up to catch stuck h/w. */
1648
if (waitqueue_active(&ring->irq_queue)) {
1649
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1651
wake_up_all(&ring->irq_queue);
1659
static bool kick_ring(struct intel_ring_buffer *ring)
1661
struct drm_device *dev = ring->dev;
1662
struct drm_i915_private *dev_priv = dev->dev_private;
1663
u32 tmp = I915_READ_CTL(ring);
1664
if (tmp & RING_WAIT) {
1665
DRM_ERROR("Kicking stuck wait on %s\n",
1667
I915_WRITE_CTL(ring, tmp);
1673
static bool i915_hangcheck_hung(struct drm_device *dev)
1675
drm_i915_private_t *dev_priv = dev->dev_private;
1677
if (dev_priv->hangcheck_count++ > 1) {
1680
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1681
i915_handle_error(dev, true);
1683
if (!IS_GEN2(dev)) {
1684
struct intel_ring_buffer *ring;
1687
/* Is the chip hanging on a WAIT_FOR_EVENT?
1688
* If so we can simply poke the RB_WAIT bit
1689
* and break the hang. This should work on
1690
* all but the second generation chipsets.
1692
for_each_ring(ring, dev_priv, i)
1693
hung &= !kick_ring(ring);
1703
* This is called when the chip hasn't reported back with completed
1704
* batchbuffers in a long time. The first time this is called we simply record
1705
* ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1706
* again, we assume the chip is wedged and try to fix it.
1708
void i915_hangcheck_elapsed(unsigned long data)
1710
struct drm_device *dev = (struct drm_device *)data;
1711
drm_i915_private_t *dev_priv = dev->dev_private;
1712
uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1713
struct intel_ring_buffer *ring;
1714
bool err = false, idle;
1717
if (!i915_enable_hangcheck)
1720
memset(acthd, 0, sizeof(acthd));
1722
for_each_ring(ring, dev_priv, i) {
1723
idle &= i915_hangcheck_ring_idle(ring, &err);
1724
acthd[i] = intel_ring_get_active_head(ring);
1727
/* If all work is done then ACTHD clearly hasn't advanced. */
1730
if (i915_hangcheck_hung(dev))
1736
dev_priv->hangcheck_count = 0;
1740
i915_get_extra_instdone(dev, instdone);
1741
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1742
memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1743
if (i915_hangcheck_hung(dev))
1746
dev_priv->hangcheck_count = 0;
1748
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1749
memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1753
/* Reset timer case chip hangs without another request being added */
1754
mod_timer(&dev_priv->hangcheck_timer,
1755
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1760
static void ironlake_irq_preinstall(struct drm_device *dev)
1762
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1764
atomic_set(&dev_priv->irq_received, 0);
1766
I915_WRITE(HWSTAM, 0xeffe);
1768
/* XXX hotplug from PCH */
1770
I915_WRITE(DEIMR, 0xffffffff);
1771
I915_WRITE(DEIER, 0x0);
1772
POSTING_READ(DEIER);
1775
I915_WRITE(GTIMR, 0xffffffff);
1776
I915_WRITE(GTIER, 0x0);
1777
POSTING_READ(GTIER);
1779
/* south display irq */
1780
I915_WRITE(SDEIMR, 0xffffffff);
1781
I915_WRITE(SDEIER, 0x0);
1782
POSTING_READ(SDEIER);
1785
static void valleyview_irq_preinstall(struct drm_device *dev)
1787
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1790
atomic_set(&dev_priv->irq_received, 0);
1793
I915_WRITE(VLV_IMR, 0);
1794
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1795
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1796
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1799
I915_WRITE(GTIIR, I915_READ(GTIIR));
1800
I915_WRITE(GTIIR, I915_READ(GTIIR));
1801
I915_WRITE(GTIMR, 0xffffffff);
1802
I915_WRITE(GTIER, 0x0);
1803
POSTING_READ(GTIER);
1805
I915_WRITE(DPINVGTT, 0xff);
1807
I915_WRITE(PORT_HOTPLUG_EN, 0);
1808
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1810
I915_WRITE(PIPESTAT(pipe), 0xffff);
1811
I915_WRITE(VLV_IIR, 0xffffffff);
1812
I915_WRITE(VLV_IMR, 0xffffffff);
1813
I915_WRITE(VLV_IER, 0x0);
1814
POSTING_READ(VLV_IER);
1818
* Enable digital hotplug on the PCH, and configure the DP short pulse
1819
* duration to 2ms (which is the minimum in the Display Port spec)
1821
* This register is the same on all known PCH chips.
1824
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1826
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1829
hotplug = I915_READ(PCH_PORT_HOTPLUG);
1830
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1831
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1832
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1833
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1834
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1837
static int ironlake_irq_postinstall(struct drm_device *dev)
1839
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1840
/* enable kind of interrupts always enabled */
1841
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1842
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1846
dev_priv->irq_mask = ~display_mask;
1848
/* should always can generate irq */
1849
I915_WRITE(DEIIR, I915_READ(DEIIR));
1850
I915_WRITE(DEIMR, dev_priv->irq_mask);
1851
I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1852
POSTING_READ(DEIER);
1854
dev_priv->gt_irq_mask = ~0;
1856
I915_WRITE(GTIIR, I915_READ(GTIIR));
1857
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1862
GEN6_BSD_USER_INTERRUPT |
1863
GEN6_BLITTER_USER_INTERRUPT;
1868
GT_BSD_USER_INTERRUPT;
1869
I915_WRITE(GTIER, render_irqs);
1870
POSTING_READ(GTIER);
1872
if (HAS_PCH_CPT(dev)) {
1873
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1874
SDE_PORTB_HOTPLUG_CPT |
1875
SDE_PORTC_HOTPLUG_CPT |
1876
SDE_PORTD_HOTPLUG_CPT);
1878
hotplug_mask = (SDE_CRT_HOTPLUG |
1885
dev_priv->pch_irq_mask = ~hotplug_mask;
1887
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1888
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1889
I915_WRITE(SDEIER, hotplug_mask);
1890
POSTING_READ(SDEIER);
1892
ironlake_enable_pch_hotplug(dev);
1894
if (IS_IRONLAKE_M(dev)) {
1895
/* Clear & enable PCU event interrupts */
1896
I915_WRITE(DEIIR, DE_PCU_EVENT);
1897
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1898
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1904
static int ivybridge_irq_postinstall(struct drm_device *dev)
1906
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1907
/* enable kind of interrupts always enabled */
1909
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1910
DE_PLANEC_FLIP_DONE_IVB |
1911
DE_PLANEB_FLIP_DONE_IVB |
1912
DE_PLANEA_FLIP_DONE_IVB;
1916
dev_priv->irq_mask = ~display_mask;
1918
/* should always can generate irq */
1919
I915_WRITE(DEIIR, I915_READ(DEIIR));
1920
I915_WRITE(DEIMR, dev_priv->irq_mask);
1923
DE_PIPEC_VBLANK_IVB |
1924
DE_PIPEB_VBLANK_IVB |
1925
DE_PIPEA_VBLANK_IVB);
1926
POSTING_READ(DEIER);
1928
dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1930
I915_WRITE(GTIIR, I915_READ(GTIIR));
1931
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1933
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1934
GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1935
I915_WRITE(GTIER, render_irqs);
1936
POSTING_READ(GTIER);
1938
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1939
SDE_PORTB_HOTPLUG_CPT |
1940
SDE_PORTC_HOTPLUG_CPT |
1941
SDE_PORTD_HOTPLUG_CPT);
1942
dev_priv->pch_irq_mask = ~hotplug_mask;
1944
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1945
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1946
I915_WRITE(SDEIER, hotplug_mask);
1947
POSTING_READ(SDEIER);
1949
ironlake_enable_pch_hotplug(dev);
1954
static int valleyview_irq_postinstall(struct drm_device *dev)
1956
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1958
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1959
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1963
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1964
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1965
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1966
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1967
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1970
*Leave vblank interrupts masked initially. enable/disable will
1971
* toggle them based on usage.
1973
dev_priv->irq_mask = (~enable_mask) |
1974
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1975
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1977
dev_priv->pipestat[0] = 0;
1978
dev_priv->pipestat[1] = 0;
1980
/* Hack for broken MSIs on VLV */
1981
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1982
pci_read_config_word(dev->pdev, 0x98, &msid);
1983
msid &= 0xff; /* mask out delivery bits */
1985
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1987
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1988
I915_WRITE(VLV_IER, enable_mask);
1989
I915_WRITE(VLV_IIR, 0xffffffff);
1990
I915_WRITE(PIPESTAT(0), 0xffff);
1991
I915_WRITE(PIPESTAT(1), 0xffff);
1992
POSTING_READ(VLV_IER);
1994
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1995
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1997
I915_WRITE(VLV_IIR, 0xffffffff);
1998
I915_WRITE(VLV_IIR, 0xffffffff);
2000
I915_WRITE(GTIIR, I915_READ(GTIIR));
2001
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2003
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2004
GEN6_BLITTER_USER_INTERRUPT;
2005
I915_WRITE(GTIER, render_irqs);
2006
POSTING_READ(GTIER);
2008
/* ack & enable invalid PTE error interrupts */
2009
#if 0 /* FIXME: add support to irq handler for checking these bits */
2010
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2011
I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2014
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2015
/* Note HDMI and DP share bits */
2016
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2017
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2018
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2019
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2020
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2021
hotplug_en |= HDMID_HOTPLUG_INT_EN;
2022
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2023
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2024
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2025
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2026
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2027
hotplug_en |= CRT_HOTPLUG_INT_EN;
2028
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2031
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2036
static void valleyview_irq_uninstall(struct drm_device *dev)
2038
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2045
I915_WRITE(PIPESTAT(pipe), 0xffff);
2047
I915_WRITE(HWSTAM, 0xffffffff);
2048
I915_WRITE(PORT_HOTPLUG_EN, 0);
2049
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2051
I915_WRITE(PIPESTAT(pipe), 0xffff);
2052
I915_WRITE(VLV_IIR, 0xffffffff);
2053
I915_WRITE(VLV_IMR, 0xffffffff);
2054
I915_WRITE(VLV_IER, 0x0);
2055
POSTING_READ(VLV_IER);
2058
static void ironlake_irq_uninstall(struct drm_device *dev)
2060
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2065
I915_WRITE(HWSTAM, 0xffffffff);
2067
I915_WRITE(DEIMR, 0xffffffff);
2068
I915_WRITE(DEIER, 0x0);
2069
I915_WRITE(DEIIR, I915_READ(DEIIR));
2071
I915_WRITE(GTIMR, 0xffffffff);
2072
I915_WRITE(GTIER, 0x0);
2073
I915_WRITE(GTIIR, I915_READ(GTIIR));
2075
I915_WRITE(SDEIMR, 0xffffffff);
2076
I915_WRITE(SDEIER, 0x0);
2077
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2080
static void i8xx_irq_preinstall(struct drm_device * dev)
2082
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2085
atomic_set(&dev_priv->irq_received, 0);
2088
I915_WRITE(PIPESTAT(pipe), 0);
2089
I915_WRITE16(IMR, 0xffff);
2090
I915_WRITE16(IER, 0x0);
2091
POSTING_READ16(IER);
2094
static int i8xx_irq_postinstall(struct drm_device *dev)
2096
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2098
dev_priv->pipestat[0] = 0;
2099
dev_priv->pipestat[1] = 0;
2102
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2104
/* Unmask the interrupts that we always want on. */
2105
dev_priv->irq_mask =
2106
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2107
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2108
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2109
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2110
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2111
I915_WRITE16(IMR, dev_priv->irq_mask);
2114
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2115
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2116
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2117
I915_USER_INTERRUPT);
2118
POSTING_READ16(IER);
2123
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2125
struct drm_device *dev = (struct drm_device *) arg;
2126
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2129
unsigned long irqflags;
2133
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2134
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2136
atomic_inc(&dev_priv->irq_received);
2138
iir = I915_READ16(IIR);
2142
while (iir & ~flip_mask) {
2143
/* Can't rely on pipestat interrupt bit in iir as it might
2144
* have been cleared after the pipestat interrupt was received.
2145
* It doesn't set the bit in iir again, but it still produces
2146
* interrupts (for non-MSI).
2148
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2149
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2150
i915_handle_error(dev, false);
2152
for_each_pipe(pipe) {
2153
int reg = PIPESTAT(pipe);
2154
pipe_stats[pipe] = I915_READ(reg);
2157
* Clear the PIPE*STAT regs before the IIR
2159
if (pipe_stats[pipe] & 0x8000ffff) {
2160
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2161
DRM_DEBUG_DRIVER("pipe %c underrun\n",
2163
I915_WRITE(reg, pipe_stats[pipe]);
2167
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2169
I915_WRITE16(IIR, iir & ~flip_mask);
2170
new_iir = I915_READ16(IIR); /* Flush posted writes */
2172
i915_update_dri1_breadcrumb(dev);
2174
if (iir & I915_USER_INTERRUPT)
2175
notify_ring(dev, &dev_priv->ring[RCS]);
2177
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2178
drm_handle_vblank(dev, 0)) {
2179
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2180
intel_prepare_page_flip(dev, 0);
2181
intel_finish_page_flip(dev, 0);
2182
flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2186
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2187
drm_handle_vblank(dev, 1)) {
2188
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2189
intel_prepare_page_flip(dev, 1);
2190
intel_finish_page_flip(dev, 1);
2191
flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2201
static void i8xx_irq_uninstall(struct drm_device * dev)
2203
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2206
for_each_pipe(pipe) {
2207
/* Clear enable bits; then clear status bits */
2208
I915_WRITE(PIPESTAT(pipe), 0);
2209
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2211
I915_WRITE16(IMR, 0xffff);
2212
I915_WRITE16(IER, 0x0);
2213
I915_WRITE16(IIR, I915_READ16(IIR));
2216
static void i915_irq_preinstall(struct drm_device * dev)
2218
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2221
atomic_set(&dev_priv->irq_received, 0);
2223
if (I915_HAS_HOTPLUG(dev)) {
2224
I915_WRITE(PORT_HOTPLUG_EN, 0);
2225
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2228
I915_WRITE16(HWSTAM, 0xeffe);
2230
I915_WRITE(PIPESTAT(pipe), 0);
2231
I915_WRITE(IMR, 0xffffffff);
2232
I915_WRITE(IER, 0x0);
2236
static int i915_irq_postinstall(struct drm_device *dev)
2238
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2241
dev_priv->pipestat[0] = 0;
2242
dev_priv->pipestat[1] = 0;
2244
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2246
/* Unmask the interrupts that we always want on. */
2247
dev_priv->irq_mask =
2248
~(I915_ASLE_INTERRUPT |
2249
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2250
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2251
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2252
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2253
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2256
I915_ASLE_INTERRUPT |
2257
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2258
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2259
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2260
I915_USER_INTERRUPT;
2262
if (I915_HAS_HOTPLUG(dev)) {
2263
/* Enable in IER... */
2264
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2265
/* and unmask in IMR */
2266
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2269
I915_WRITE(IMR, dev_priv->irq_mask);
2270
I915_WRITE(IER, enable_mask);
2273
if (I915_HAS_HOTPLUG(dev)) {
2274
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2276
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2277
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2278
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2279
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2280
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2281
hotplug_en |= HDMID_HOTPLUG_INT_EN;
2282
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2283
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2284
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2285
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2286
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2287
hotplug_en |= CRT_HOTPLUG_INT_EN;
2288
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2291
/* Ignore TV since it's buggy */
2293
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2296
intel_opregion_enable_asle(dev);
2301
static irqreturn_t i915_irq_handler(int irq, void *arg)
2303
struct drm_device *dev = (struct drm_device *) arg;
2304
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2305
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2306
unsigned long irqflags;
2308
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2309
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2311
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2312
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2314
int pipe, ret = IRQ_NONE;
2316
atomic_inc(&dev_priv->irq_received);
2318
iir = I915_READ(IIR);
2320
bool irq_received = (iir & ~flip_mask) != 0;
2321
bool blc_event = false;
2323
/* Can't rely on pipestat interrupt bit in iir as it might
2324
* have been cleared after the pipestat interrupt was received.
2325
* It doesn't set the bit in iir again, but it still produces
2326
* interrupts (for non-MSI).
2328
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2329
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2330
i915_handle_error(dev, false);
2332
for_each_pipe(pipe) {
2333
int reg = PIPESTAT(pipe);
2334
pipe_stats[pipe] = I915_READ(reg);
2336
/* Clear the PIPE*STAT regs before the IIR */
2337
if (pipe_stats[pipe] & 0x8000ffff) {
2338
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2339
DRM_DEBUG_DRIVER("pipe %c underrun\n",
2341
I915_WRITE(reg, pipe_stats[pipe]);
2342
irq_received = true;
2345
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2350
/* Consume port. Then clear IIR or we'll miss events */
2351
if ((I915_HAS_HOTPLUG(dev)) &&
2352
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
2353
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2355
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2357
if (hotplug_status & dev_priv->hotplug_supported_mask)
2358
queue_work(dev_priv->wq,
2359
&dev_priv->hotplug_work);
2361
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2362
POSTING_READ(PORT_HOTPLUG_STAT);
2365
I915_WRITE(IIR, iir & ~flip_mask);
2366
new_iir = I915_READ(IIR); /* Flush posted writes */
2368
if (iir & I915_USER_INTERRUPT)
2369
notify_ring(dev, &dev_priv->ring[RCS]);
2371
for_each_pipe(pipe) {
2375
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2376
drm_handle_vblank(dev, pipe)) {
2377
if (iir & flip[plane]) {
2378
intel_prepare_page_flip(dev, plane);
2379
intel_finish_page_flip(dev, pipe);
2380
flip_mask &= ~flip[plane];
2384
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2388
if (blc_event || (iir & I915_ASLE_INTERRUPT))
2389
intel_opregion_asle_intr(dev);
2391
/* With MSI, interrupts are only generated when iir
2392
* transitions from zero to nonzero. If another bit got
2393
* set while we were handling the existing iir bits, then
2394
* we would never get another interrupt.
2396
* This is fine on non-MSI as well, as if we hit this path
2397
* we avoid exiting the interrupt handler only to generate
2400
* Note that for MSI this could cause a stray interrupt report
2401
* if an interrupt landed in the time between writing IIR and
2402
* the posting read. This should be rare enough to never
2403
* trigger the 99% of 100,000 interrupts test for disabling
2408
} while (iir & ~flip_mask);
2410
i915_update_dri1_breadcrumb(dev);
2415
static void i915_irq_uninstall(struct drm_device * dev)
2417
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2420
if (I915_HAS_HOTPLUG(dev)) {
2421
I915_WRITE(PORT_HOTPLUG_EN, 0);
2422
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2425
I915_WRITE16(HWSTAM, 0xffff);
2426
for_each_pipe(pipe) {
2427
/* Clear enable bits; then clear status bits */
2428
I915_WRITE(PIPESTAT(pipe), 0);
2429
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2431
I915_WRITE(IMR, 0xffffffff);
2432
I915_WRITE(IER, 0x0);
2434
I915_WRITE(IIR, I915_READ(IIR));
2437
static void i965_irq_preinstall(struct drm_device * dev)
2439
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2442
atomic_set(&dev_priv->irq_received, 0);
2444
I915_WRITE(PORT_HOTPLUG_EN, 0);
2445
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2447
I915_WRITE(HWSTAM, 0xeffe);
2449
I915_WRITE(PIPESTAT(pipe), 0);
2450
I915_WRITE(IMR, 0xffffffff);
2451
I915_WRITE(IER, 0x0);
2455
static int i965_irq_postinstall(struct drm_device *dev)
2457
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2462
/* Unmask the interrupts that we always want on. */
2463
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2464
I915_DISPLAY_PORT_INTERRUPT |
2465
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2466
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2467
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2468
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2469
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2471
enable_mask = ~dev_priv->irq_mask;
2472
enable_mask |= I915_USER_INTERRUPT;
2475
enable_mask |= I915_BSD_USER_INTERRUPT;
2477
dev_priv->pipestat[0] = 0;
2478
dev_priv->pipestat[1] = 0;
2481
* Enable some error detection, note the instruction error mask
2482
* bit is reserved, so we leave it masked.
2485
error_mask = ~(GM45_ERROR_PAGE_TABLE |
2486
GM45_ERROR_MEM_PRIV |
2487
GM45_ERROR_CP_PRIV |
2488
I915_ERROR_MEMORY_REFRESH);
2490
error_mask = ~(I915_ERROR_PAGE_TABLE |
2491
I915_ERROR_MEMORY_REFRESH);
2493
I915_WRITE(EMR, error_mask);
2495
I915_WRITE(IMR, dev_priv->irq_mask);
2496
I915_WRITE(IER, enable_mask);
2499
/* Note HDMI and DP share hotplug bits */
2501
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2502
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2503
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2504
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2505
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2506
hotplug_en |= HDMID_HOTPLUG_INT_EN;
2508
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2509
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2510
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2511
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2513
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2514
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2515
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2516
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2518
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2519
hotplug_en |= CRT_HOTPLUG_INT_EN;
2521
/* Programming the CRT detection parameters tends
2522
to generate a spurious hotplug event about three
2523
seconds later. So just do it once.
2526
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2527
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2530
/* Ignore TV since it's buggy */
2532
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2534
intel_opregion_enable_asle(dev);
2539
static irqreturn_t i965_irq_handler(int irq, void *arg)
2541
struct drm_device *dev = (struct drm_device *) arg;
2542
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2544
u32 pipe_stats[I915_MAX_PIPES];
2545
unsigned long irqflags;
2547
int ret = IRQ_NONE, pipe;
2549
atomic_inc(&dev_priv->irq_received);
2551
iir = I915_READ(IIR);
2554
bool blc_event = false;
2556
irq_received = iir != 0;
2558
/* Can't rely on pipestat interrupt bit in iir as it might
2559
* have been cleared after the pipestat interrupt was received.
2560
* It doesn't set the bit in iir again, but it still produces
2561
* interrupts (for non-MSI).
2563
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2564
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2565
i915_handle_error(dev, false);
2567
for_each_pipe(pipe) {
2568
int reg = PIPESTAT(pipe);
2569
pipe_stats[pipe] = I915_READ(reg);
2572
* Clear the PIPE*STAT regs before the IIR
2574
if (pipe_stats[pipe] & 0x8000ffff) {
2575
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2576
DRM_DEBUG_DRIVER("pipe %c underrun\n",
2578
I915_WRITE(reg, pipe_stats[pipe]);
2582
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2589
/* Consume port. Then clear IIR or we'll miss events */
2590
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2591
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2593
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2595
if (hotplug_status & dev_priv->hotplug_supported_mask)
2596
queue_work(dev_priv->wq,
2597
&dev_priv->hotplug_work);
2599
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2600
I915_READ(PORT_HOTPLUG_STAT);
2603
I915_WRITE(IIR, iir);
2604
new_iir = I915_READ(IIR); /* Flush posted writes */
2606
if (iir & I915_USER_INTERRUPT)
2607
notify_ring(dev, &dev_priv->ring[RCS]);
2608
if (iir & I915_BSD_USER_INTERRUPT)
2609
notify_ring(dev, &dev_priv->ring[VCS]);
2611
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2612
intel_prepare_page_flip(dev, 0);
2614
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2615
intel_prepare_page_flip(dev, 1);
2617
for_each_pipe(pipe) {
2618
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2619
drm_handle_vblank(dev, pipe)) {
2620
i915_pageflip_stall_check(dev, pipe);
2621
intel_finish_page_flip(dev, pipe);
2624
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2629
if (blc_event || (iir & I915_ASLE_INTERRUPT))
2630
intel_opregion_asle_intr(dev);
2632
/* With MSI, interrupts are only generated when iir
2633
* transitions from zero to nonzero. If another bit got
2634
* set while we were handling the existing iir bits, then
2635
* we would never get another interrupt.
2637
* This is fine on non-MSI as well, as if we hit this path
2638
* we avoid exiting the interrupt handler only to generate
2641
* Note that for MSI this could cause a stray interrupt report
2642
* if an interrupt landed in the time between writing IIR and
2643
* the posting read. This should be rare enough to never
2644
* trigger the 99% of 100,000 interrupts test for disabling
2650
i915_update_dri1_breadcrumb(dev);
2655
static void i965_irq_uninstall(struct drm_device * dev)
2657
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2663
I915_WRITE(PORT_HOTPLUG_EN, 0);
2664
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2666
I915_WRITE(HWSTAM, 0xffffffff);
2668
I915_WRITE(PIPESTAT(pipe), 0);
2669
I915_WRITE(IMR, 0xffffffff);
2670
I915_WRITE(IER, 0x0);
2673
I915_WRITE(PIPESTAT(pipe),
2674
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2675
I915_WRITE(IIR, I915_READ(IIR));
2678
void intel_irq_init(struct drm_device *dev)
2680
struct drm_i915_private *dev_priv = dev->dev_private;
2682
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2683
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2684
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2685
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2687
dev->driver->get_vblank_counter = i915_get_vblank_counter;
2688
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2689
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2690
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2691
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2694
if (drm_core_check_feature(dev, DRIVER_MODESET))
2695
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2697
dev->driver->get_vblank_timestamp = NULL;
2698
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2700
if (IS_VALLEYVIEW(dev)) {
2701
dev->driver->irq_handler = valleyview_irq_handler;
2702
dev->driver->irq_preinstall = valleyview_irq_preinstall;
2703
dev->driver->irq_postinstall = valleyview_irq_postinstall;
2704
dev->driver->irq_uninstall = valleyview_irq_uninstall;
2705
dev->driver->enable_vblank = valleyview_enable_vblank;
2706
dev->driver->disable_vblank = valleyview_disable_vblank;
2707
} else if (IS_IVYBRIDGE(dev)) {
2708
/* Share pre & uninstall handlers with ILK/SNB */
2709
dev->driver->irq_handler = ivybridge_irq_handler;
2710
dev->driver->irq_preinstall = ironlake_irq_preinstall;
2711
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2712
dev->driver->irq_uninstall = ironlake_irq_uninstall;
2713
dev->driver->enable_vblank = ivybridge_enable_vblank;
2714
dev->driver->disable_vblank = ivybridge_disable_vblank;
2715
} else if (IS_HASWELL(dev)) {
2716
/* Share interrupts handling with IVB */
2717
dev->driver->irq_handler = ivybridge_irq_handler;
2718
dev->driver->irq_preinstall = ironlake_irq_preinstall;
2719
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2720
dev->driver->irq_uninstall = ironlake_irq_uninstall;
2721
dev->driver->enable_vblank = ivybridge_enable_vblank;
2722
dev->driver->disable_vblank = ivybridge_disable_vblank;
2723
} else if (HAS_PCH_SPLIT(dev)) {
2724
dev->driver->irq_handler = ironlake_irq_handler;
2725
dev->driver->irq_preinstall = ironlake_irq_preinstall;
2726
dev->driver->irq_postinstall = ironlake_irq_postinstall;
2727
dev->driver->irq_uninstall = ironlake_irq_uninstall;
2728
dev->driver->enable_vblank = ironlake_enable_vblank;
2729
dev->driver->disable_vblank = ironlake_disable_vblank;
2731
if (INTEL_INFO(dev)->gen == 2) {
2732
dev->driver->irq_preinstall = i8xx_irq_preinstall;
2733
dev->driver->irq_postinstall = i8xx_irq_postinstall;
2734
dev->driver->irq_handler = i8xx_irq_handler;
2735
dev->driver->irq_uninstall = i8xx_irq_uninstall;
2736
} else if (INTEL_INFO(dev)->gen == 3) {
2737
dev->driver->irq_preinstall = i915_irq_preinstall;
2738
dev->driver->irq_postinstall = i915_irq_postinstall;
2739
dev->driver->irq_uninstall = i915_irq_uninstall;
2740
dev->driver->irq_handler = i915_irq_handler;
2742
dev->driver->irq_preinstall = i965_irq_preinstall;
2743
dev->driver->irq_postinstall = i965_irq_postinstall;
2744
dev->driver->irq_uninstall = i965_irq_uninstall;
2745
dev->driver->irq_handler = i965_irq_handler;
2747
dev->driver->enable_vblank = i915_enable_vblank;
2748
dev->driver->disable_vblank = i915_disable_vblank;