~ubuntu-branches/ubuntu/wily/linux-ti-omap4/wily

« back to all changes in this revision

Viewing changes to ubuntu/i915/i915_irq.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati, Ubuntu: 3.5.0-20.31
  • Date: 2012-12-06 14:14:29 UTC
  • Revision ID: package-import@ubuntu.com-20121206141429-3f6mmypbjdmnmm2t
Tags: 3.5.0-216.23
* Release Tracking Bug
  - LP: #1087216

[ Paolo Pisati ]

* rebased on Ubuntu-3.5.0-20.31

[ Ubuntu: 3.5.0-20.31 ]

* Release Tracking Bug
  - LP: #1086759
* SAUCE: i915_hsw: Include #define I915_PARAM_HAS_WAIT_TIMEOUT
  - LP: #1085245
* SAUCE: i915_hsw: Include #define DRM_I915_GEM_CONTEXT_[CREATE,DESTROY]
  - LP: #1085245
* SAUCE: i915_hsw: drm/i915: add register read IOCTL
  - LP: #1085245
* SAUCE: i915_hsw: Include #define i915_execbuffer2_[set,get]_context_id
  - LP: #1085245
* SAUCE: i915_hsw: Include #define I915_GEM_PARAM_HAS_SEMAPHORES
  - LP: #1085245
* SAUCE: i915_hsw: Include #define I915_PARAM_HAS_SECURE_BATCHES
  - LP: #1085245
* SAUCE: i915_hsw: drm/i915: call intel_enable_gtt
  - LP: #1085245
* SAUCE: i915_hsw: drm: add helper to sort panels to the head of the
  connector list
  - LP: #1085245
* SAUCE: i915_hsw: drm: extract dp link bw helpers
  - LP: #1085245
* SAUCE: i915_hsw: drm: extract drm_dp_max_lane_count helper
  - LP: #1085245
* SAUCE: i915_hsw: drm: dp helper: extract drm_dp_channel_eq_ok
  - LP: #1085245
* SAUCE: i915_hsw: drm: extract helpers to compute new training values
  from sink request
  - LP: #1085245
* SAUCE: i915_hsw: drm: dp helper: extract drm_dp_clock_recovery_ok
  - LP: #1085245
* SAUCE: i915_hsw: Include #define I915_PARAM_HAS_PRIME_VMAP_FLUSH
  - LP: #1085245
* SAUCE: i915_hsw: Provide an ubuntu/i915 driver for Haswell graphics
  - LP: #1085245
* SAUCE: i915_hsw: Revert "drm: Make the .mode_fixup() operations mode
  argument a const pointer" for ubuntu/i915 driver
  - LP: #1085245
* SAUCE: i915_hsw: Rename ubuntu/i915 driver i915_hsw
  - LP: #1085245
* SAUCE: i915_hsw: Only support Haswell with ubuntu/i915 driver
  - LP: #1085245
* SAUCE: i915_hsw: Include #define DRM_I915_GEM_WAIT
  - LP: #1085245
* SAUCE: i915_hsw: drm: extract dp link train delay functions from radeon
  - LP: #1085245
* SAUCE: i915_hsw: drm/dp: Update DPCD defines
  - LP: #1085245
* SAUCE: i915_hsw: Update intel_ips.h file location
  - LP: #1085245
* SAUCE: i915_hsw: Provide updated drm_mm.h and drm_mm.c for ubuntu/i915
  - LP: #1085245
* SAUCE: i915_hsw: drm/i915: Replace the array of pages with a
  scatterlist
  - LP: #1085245
* SAUCE: i915_hsw: drm/i915: Replace the array of pages with a
  scatterlist
  - LP: #1085245
* SAUCE: i915_hsw: drm/i915: Stop using AGP layer for GEN6+
  - LP: #1085245
* SAUCE: i915_hsw: Add i915_hsw_gpu_*() calls for ubuntu/i915
  - LP: #1085245
* i915_hsw: [Config] Enable CONFIG_DRM_I915_HSW=m
  - LP: #1085245
* SAUCE: drm/i915: fix hsw_fdi_link_train "retry" code
  - LP: #1085245
* SAUCE: drm/i915: reject modes the LPT FDI receiver can't handle
  - LP: #1085245
* SAUCE: drm/i915: add support for mPHY destination on intel_sbi_{read,
  write}
  - LP: #1085245
* SAUCE: drm/i915: add lpt_init_pch_refclk
  - LP: #1085245
* SAUCE: drm/i915: set the LPT FDI RX polarity reversal bit when needed
  - LP: #1085245
* Revert "SAUCE: SECCOMP: audit: always report seccomp violations"
  - LP: #1079469
* Revert "cgroup: Drop task_lock(parent) on cgroup_fork()"
  - LP: #1084539
* Revert "cgroup: Remove task_lock() from cgroup_post_fork()"
  - LP: #1084539
* Revert "x86/mm: Fix the size calculation of mapping tables"
  - LP: #1084539
* Revert "SUNRPC: Ensure we close the socket on EPIPE errors too..."
  - LP: #1084539
* Revert "ath9k_hw: Updated AR9003 tx gain table for 5GHz"
  - LP: #1084539
* Revert "sched: Add missing call to calc_load_exit_idle()"
  - LP: #1084539
* net: fix secpath kmemleak
  - LP: #1065434
* seccomp: forcing auditing of kill condition
  - LP: #1079469
* e1000e: add device IDs for i218
  - LP: #1081796
* bonding: Bonding driver does not consider the gso_max_size/gso_max_segs
  setting of slave devices.
  - LP: #1078184
* mm/hotplug: correctly add new zone to all other nodes' zone lists
  - LP: #1079860
  - CVE-2012-5517
* xen: enable platform-pci only in a Xen guest
  - LP: #1081054
* udf: fix retun value on error path in udf_load_logicalvol
  - LP: #1084539
* usb: gadget: at91_udc: fix dt support
  - LP: #1084539
* netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP
  expectation
  - LP: #1084539
* netfilter: nf_nat_sip: fix via header translation with multiple
  parameters
  - LP: #1084539
* netfilter: nf_ct_expect: fix possible access to uninitialized timer
  - LP: #1084539
* netfilter: xt_limit: have r->cost != 0 case work
  - LP: #1084539
* netfilter: nf_conntrack: fix racy timer handling with reliable events
  - LP: #1084539
* netfilter: nfnetlink_log: fix NLA_PUT macro removal bug
  - LP: #1084539
* MIPS: ath79: Fix CPU/DDR frequency calculation for SRIF PLLs
  - LP: #1084539
* jbd: Fix assertion failure in commit code due to lacking transaction
  credits
  - LP: #1084539
* nfsd4: fix nfs4 stateid leak
  - LP: #1084539
* NFSD: pass null terminated buf to kstrtouint()
  - LP: #1084539
* mfd: 88pm860x: Move _IO resources out of ioport_ioresource
  - LP: #1084539
* target: support zero allocation length in INQUIRY
  - LP: #1084539
* target: fix truncation of mode data, support zero allocation length
  - LP: #1084539
* target: fix return code in target_core_init_configfs error path
  - LP: #1084539
* powerpc/eeh: Lock module while handling EEH event
  - LP: #1084539
* SUNRPC: Ensure that the TCP socket is closed when in CLOSE_WAIT
  - LP: #1084539
* ext4: remove erroneous ext4_superblock_csum_set() in update_backups()
  - LP: #1084539
* block: remove the duplicated setting for congestion_threshold
  - LP: #1084539
* block: lift the initial queue bypass mode on blk_register_queue()
  instead of blk_init_allocated_queue()
  - LP: #1084539
* block: fix request_queue->flags initialization
  - LP: #1084539
* viafb: don't touch clock state on OLPC XO-1.5
  - LP: #1084539
* qla2xxx: Fix endianness of task management response code
  - LP: #1084539
* iscsi-target: Correctly set 0xffffffff field within ISCSI_OP_REJECT PDU
  - LP: #1084539
* drm/i915: use adjusted_mode instead of mode for checking the 6bpc force
  flag
  - LP: #1084539
* kbuild: Do not package /boot and /lib in make tar-pkg
  - LP: #1084539
* module: taint kernel when lve module is loaded
  - LP: #1084539
* mtd: nand: allow NAND_NO_SUBPAGE_WRITE to be set from driver
  - LP: #1084539
* nfsd4: don't pin clientids to pseudoflavors
  - LP: #1084539
* lockd: use rpc client's cl_nodename for id encoding
  - LP: #1084539
* pnfsblock: fix partial page buffer wirte
  - LP: #1084539
* pnfsblock: fix non-aligned DIO read
  - LP: #1084539
* pnfsblock: fix non-aligned DIO write
  - LP: #1084539
* target/file: Re-enable optional fd_buffered_io=1 operation
  - LP: #1084539
* iscsi-target: Add explicit set of cache_dynamic_acls=1 for TPG
  demo-mode
  - LP: #1084539
* iscsit: remove incorrect unlock in iscsit_build_sendtargets_resp
  - LP: #1084539
* iscsi-target: Bump defaults for nopin_timeout + nopin_response_timeout
  values
  - LP: #1084539
* drivers/dma/dmaengine.c: lower the priority of 'failed to get' dma
  channel message
  - LP: #1084539
* ath9k: use ieee80211_free_txskb
  - LP: #1084539
* ALSA: hda - Fix hang caused by race during suspend.
  - LP: #1084539
* ACPI: EC: Make the GPE storm threshold a module parameter
  - LP: #1084539
* ACPI: EC: Add a quirk for CLEVO M720T/M730T laptop
  - LP: #1084539
* mmc: sdhci-s3c: fix the wrong number of max bus clocks
  - LP: #1084539
* mac80211: use ieee80211_free_txskb to fix possible skb leaks
  - LP: #1084539
* ARM: OMAP: counter: add locking to read_persistent_clock
  - LP: #1084539
* ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels
  - LP: #1084539
* scsi_debug: Fix off-by-one bug when unmapping region
  - LP: #1084539
* storvsc: Account for in-transit packets in the RESET path
  - LP: #1084539
* firewire: cdev: fix user memory corruption (i386 userland on amd64
  kernel)
  - LP: #1084539
* timers: Fix endless looping between cascade() and internal_add_timer()
  - LP: #1084539
* timekeeping: Cast raw_interval to u64 to avoid shift overflow
  - LP: #1084539
* video/udlfb: fix line counting in fb_write
  - LP: #1084539
* tmpfs,ceph,gfs2,isofs,reiserfs,xfs: fix fh_len checking
  - LP: #1084539
* ALSA: hda - Add missing hda_gen_spec to struct via_spec
  - LP: #1084539
* ALSA: hda - Fix memory leaks at error path in patch_cirrus.c
  - LP: #1084539
* autofs4 - fix reset pending flag on mount fail
  - LP: #1084539
* pktgen: fix crash when generating IPv6 packets
  - LP: #1084539
* md/raid10: use correct limit variable
  - LP: #1084539
* mips,kgdb: fix recursive page fault with CONFIG_KPROBES
  - LP: #1084539
* kdb,vt_console: Fix missed data due to pager overruns
  - LP: #1084539
* xen/bootup: allow read_tscp call for Xen PV guests.
  - LP: #1084539
* xen/bootup: allow {read|write}_cr8 pvops call.
  - LP: #1084539
* libceph: eliminate connection state "DEAD"
  - LP: #1084539
* libceph: kill bad_proto ceph connection op
  - LP: #1084539
* libceph: rename socket callbacks
  - LP: #1084539
* libceph: rename kvec_reset and kvec_add functions
  - LP: #1084539
* libceph: embed ceph messenger structure in ceph_client
  - LP: #1084539
* libceph: start separating connection flags from state
  - LP: #1084539
* libceph: start tracking connection socket state
  - LP: #1084539
* libceph: provide osd number when creating osd
  - LP: #1084539
* libceph: set CLOSED state bit in con_init
  - LP: #1084539
* libceph: embed ceph connection structure in mon_client
  - LP: #1084539
* libceph: drop connection refcounting for mon_client
  - LP: #1084539
* libceph: init monitor connection when opening
  - LP: #1084539
* libceph: fully initialize connection in con_init()
  - LP: #1084539
* libceph: tweak ceph_alloc_msg()
  - LP: #1084539
* libceph: have messages point to their connection
  - LP: #1084539
* libceph: have messages take a connection reference
  - LP: #1084539
* libceph: make ceph_con_revoke() a msg operation
  - LP: #1084539
* libceph: make ceph_con_revoke_message() a msg op
  - LP: #1084539
* libceph: fix overflow in __decode_pool_names()
  - LP: #1084539
* libceph: fix overflow in osdmap_decode()
  - LP: #1084539
* libceph: fix overflow in osdmap_apply_incremental()
  - LP: #1084539
* libceph: transition socket state prior to actual connect
  - LP: #1084539
* libceph: fix NULL dereference in reset_connection()
  - LP: #1084539
* libceph: use con get/put methods
  - LP: #1084539
* libceph: drop ceph_con_get/put helpers and nref member
  - LP: #1084539
* libceph: encapsulate out message data setup
  - LP: #1084539
* libceph: encapsulate advancing msg page
  - LP: #1084539
* libceph: don't mark footer complete before it is
  - LP: #1084539
* libceph: move init_bio_*() functions up
  - LP: #1084539
* libceph: move init of bio_iter
  - LP: #1084539
* libceph: don't use bio_iter as a flag
  - LP: #1084539
* libceph: SOCK_CLOSED is a flag, not a state
  - LP: #1084539
* libceph: don't change socket state on sock event
  - LP: #1084539
* libceph: just set SOCK_CLOSED when state changes
  - LP: #1084539
* libceph: don't touch con state in con_close_socket()
  - LP: #1084539
* libceph: clear CONNECTING in ceph_con_close()
  - LP: #1084539
* libceph: clear NEGOTIATING when done
  - LP: #1084539
* libceph: define and use an explicit CONNECTED state
  - LP: #1084539
* libceph: separate banner and connect writes
  - LP: #1084539
* libceph: distinguish two phases of connect sequence
  - LP: #1084539
* libceph: small changes to messenger.c
  - LP: #1084539
* libceph: add some fine ASCII art
  - LP: #1084539
* libceph: set peer name on con_open, not init
  - LP: #1084539
* libceph: initialize mon_client con only once
  - LP: #1084539
* libceph: allow sock transition from CONNECTING to CLOSED
  - LP: #1084539
* libceph: initialize msgpool message types
  - LP: #1084539
* libceph: prevent the race of incoming work during teardown
  - LP: #1084539
* libceph: report socket read/write error message
  - LP: #1084539
* libceph: fix mutex coverage for ceph_con_close
  - LP: #1084539
* libceph: resubmit linger ops when pg mapping changes
  - LP: #1084539
* libceph: (re)initialize bio_iter on start of message receive
  - LP: #1084539
* libceph: protect ceph_con_open() with mutex
  - LP: #1084539
* libceph: reset connection retry on successfully negotiation
  - LP: #1084539
* libceph: fix fault locking; close socket on lossy fault
  - LP: #1084539
* libceph: move msgr clear_standby under con mutex protection
  - LP: #1084539
* libceph: move ceph_con_send() closed check under the con mutex
  - LP: #1084539
* libceph: drop gratuitous socket close calls in con_work
  - LP: #1084539
* libceph: close socket directly from ceph_con_close()
  - LP: #1084539
* libceph: drop unnecessary CLOSED check in socket state change callback
  - LP: #1084539
* libceph: replace connection state bits with states
  - LP: #1084539
* libceph: clean up con flags
  - LP: #1084539
* libceph: clear all flags on con_close
  - LP: #1084539
* libceph: fix handling of immediate socket connect failure
  - LP: #1084539
* libceph: revoke mon_client messages on session restart
  - LP: #1084539
* libceph: verify state after retaking con lock after dispatch
  - LP: #1084539
* libceph: avoid dropping con mutex before fault
  - LP: #1084539
* libceph: change ceph_con_in_msg_alloc convention to be less weird
  - LP: #1084539
* libceph: recheck con state after allocating incoming message
  - LP: #1084539
* libceph: fix crypto key null deref, memory leak
  - LP: #1084539
* libceph: delay debugfs initialization until we learn global_id
  - LP: #1084539
* libceph: avoid truncation due to racing banners
  - LP: #1084539
* libceph: only kunmap kmapped pages
  - LP: #1084539
* rbd: reset BACKOFF if unable to re-queue
  - LP: #1084539
* libceph: avoid NULL kref_put when osd reset races with alloc_msg
  - LP: #1084539
* ceph: fix dentry reference leak in encode_fh()
  - LP: #1084539
* ceph: Fix oops when handling mdsmap that decreases max_mds
  - LP: #1084539
* libceph: check for invalid mapping
  - LP: #1084539
* ceph: avoid 32-bit page index overflow
  - LP: #1084539
* ASoC: wm2200: Use rev A register patches on rev B
  - LP: #1084539
* ASoC: wm2200: Fix non-inverted OUT2 mute control
  - LP: #1084539
* drm/i915: remove useless BUG_ON which caused a regression in 3.5.
  - LP: #1084539
* USB: Enable LPM after a failed probe.
  - LP: #1084539
* usb: Don't enable LPM if the exit latency is zero.
  - LP: #1084539
* usb: Send Set SEL before enabling parent U1/U2 timeout.
  - LP: #1084539
* ASoC: fsi: don't reschedule DMA from an atomic context
  - LP: #1084539
* drm/i915: Set guardband clipping workaround bit in the right register.
  - LP: #1084539
* pcmcia: sharpsl: don't discard sharpsl_pcmcia_ops
  - LP: #1084539
* hwmon: (coretemp) Add support for Atom CE4110/4150/4170
  - LP: #1084539
* ALSA: hda - Fix registration race of VGA switcheroo
  - LP: #1084539
* usb: dwc3: gadget: fix 'endpoint always busy' bug
  - LP: #1084539
* usb: musb: am35xx: drop spurious unplugging a device
  - LP: #1084539
* drm/radeon: Don't destroy I2C Bus Rec in radeon_ext_tmds_enc_destroy().
  - LP: #1084539
* ALSA: hda - Always check array bounds in alc_get_line_out_pfx
  - LP: #1084539
* NLM: nlm_lookup_file() may return NLMv4-specific error codes
  - LP: #1084539
* x86: Exclude E820_RESERVED regions and memory holes above 4 GB from
  direct mapping.
  - LP: #1084539
* SUNRPC: Prevent kernel stack corruption on long values of flush
  - LP: #1084539
* USB: cdc-acm: fix pipe type of write endpoint
  - LP: #1084539
* usb: acm: fix the computation of the number of data bits
  - LP: #1084539
* usb: host: xhci: New system added for Compliance Mode Patch on
  SN65LVPE502CP
  - LP: #1084539
* USB: option: blacklist net interface on ZTE devices
  - LP: #1084539
* USB: option: add more ZTE devices
  - LP: #1084539
* ext4: race-condition protection for
  ext4_convert_unwritten_extents_endio
  - LP: #1084539
* ext4: fix metadata checksum calculation for the superblock
  - LP: #1084539
* nohz: Fix idle ticks in cpu summary line of /proc/stat
  - LP: #1084539
* ring-buffer: Check for uninitialized cpu buffer before resizing
  - LP: #1084539
* Bluetooth: SMP: Fix setting unknown auth_req bits
  - LP: #1084539
* oprofile, x86: Fix wrapping bug in op_x86_get_ctrl()
  - LP: #1084539
* cfg80211/mac80211: avoid state mishmash on deauth
  - LP: #1084539
* mac80211: check if key has TKIP type before updating IV
  - LP: #1084539
* mac80211: use ieee80211_free_txskb in a few more places
  - LP: #1084539
* bcma: fix unregistration of cores
  - LP: #1084539
* net/wireless: ipw2200: Fix panic occurring in
  ipw_handle_promiscuous_tx()
  - LP: #1084539
* iwlwifi: fix 6000 series channel switch command
  - LP: #1084539
* cgroup: notify_on_release may not be triggered in some cases
  - LP: #1084539
* dt: Document: correct tegra20/30 pinctrl slew-rate name
  - LP: #1084539
* pinctrl: tegra: set low power mode bank width to 2
  - LP: #1084539
* pinctrl: tegra: correct bank for pingroup and drv pingroup
  - LP: #1084539
* s390: fix linker script for 31 bit builds
  - LP: #1084539
* pinctrl: remove mutex lock in groups show
  - LP: #1084539
* xen/x86: don't corrupt %eip when returning from a signal handler
  - LP: #1084539
* ALSA: hda - Fix silent headphone output from Toshiba P200
  - LP: #1084539
* ext4: Checksum the block bitmap properly with bigalloc enabled
  - LP: #1084539
* ARM: 7559/1: smp: switch away from the idmap before updating
  init_mm.mm_count
  - LP: #1084539
* usb hub: send clear_tt_buffer_complete events when canceling TT clear
  work
  - LP: #1084539
* staging: comedi: amplc_pc236: fix invalid register access during detach
  - LP: #1084539
* Staging: android: binder: Fix memory leak on thread/process exit
  - LP: #1084539
* Staging: android: binder: Allow using highmem for binder buffers
  - LP: #1084539
* ext4: Avoid underflow in ext4_trim_fs()
  - LP: #1084539
* cpufreq / powernow-k8: Remove usage of smp_processor_id() in
  preemptible code
  - LP: #1084539
* extcon: Unregister compat class at module unload to fix oops
  - LP: #1084539
* extcon: unregister compat link on cleanup
  - LP: #1084539
* pinctrl: fix missing unlock on error in pinctrl_groups_show()
  - LP: #1084539
* arch/tile: avoid generating .eh_frame information in modules
  - LP: #1084539
* drm/radeon: add some new SI PCI ids
  - LP: #1084539
* drm/radeon: add error output if VM CS fails on cayman
  - LP: #1084539
* xhci: endianness xhci_calculate_intel_u2_timeout
  - LP: #1084539
* xhci: fix integer overflow
  - LP: #1084539
* dmaengine: imx-dma: fix missing unlock on error in imxdma_xfer_desc()
  - LP: #1084539
* x86-64: Fix page table accounting
  - LP: #1084539
* dmaengine: sirf: fix a typo in dma_prep_interleaved
  - LP: #1084539
* dmaengine: sirf: fix a typo in moving running dma_desc to active queue
  - LP: #1084539
* amd64_edac:__amd64_set_scrub_rate(): avoid overindexing scrubrates[]
  - LP: #1084539
* SUNRPC: Clear the connect flag when socket state is TCP_CLOSE_WAIT
  - LP: #1084539
* SUNRPC: Prevent races in xs_abort_connection()
  - LP: #1084539
* SUNRPC: Get rid of the xs_error_report socket callback
  - LP: #1084539
* iommu/tegra: smmu: Fix deadly typo
  - LP: #1084539
* ARM: at91/tc: fix typo in the DT document
  - LP: #1084539
* ARM: at91: at91sam9g10: fix SOC type detection
  - LP: #1084539
* ARM: at91/i2c: change id to let i2c-gpio work
  - LP: #1084539
* b43: Fix oops on unload when firmware not found
  - LP: #1084539
* USB: serial: Fix memory leak in sierra_release()
  - LP: #1084539
* x86, mm: Trim memory in memblock to be page aligned
  - LP: #1084539
* x86, mm: Use memblock memory loop instead of e820_RAM
  - LP: #1084539
* usb-storage: add unusual_devs entry for Casio EX-N1 digital camera
  - LP: #1084539
* Drivers: hv: Cleanup error handling in vmbus_open()
  - LP: #1084539
* sysfs: sysfs_pathname/sysfs_add_one: Use strlcat() instead of strcat()
  - LP: #1084539
* vhost: fix mergeable bufs on BE hosts
  - LP: #1084539
* USB: metro-usb: fix io after disconnect
  - LP: #1084539
* USB: whiteheat: fix memory leak in error path
  - LP: #1084539
* USB: quatech2: fix memory leak in error path
  - LP: #1084539
* USB: quatech2: fix io after disconnect
  - LP: #1084539
* USB: opticon: fix DMA from stack
  - LP: #1084539
* USB: opticon: fix memory leak in error path
  - LP: #1084539
* USB: mct_u232: fix broken close
  - LP: #1084539
* USB: sierra: fix memory leak in attach error path
  - LP: #1084539
* USB: sierra: fix memory leak in probe error path
  - LP: #1084539
* USB: mos7840: fix urb leak at release
  - LP: #1084539
* USB: mos7840: fix port-device leak in error path
  - LP: #1084539
* USB: mos7840: remove NULL-urb submission
  - LP: #1084539
* USB: mos7840: remove invalid disconnect handling
  - LP: #1084539
* ehci: fix Lucid nohandoff pci quirk to be more generic with BIOS
  versions
  - LP: #1084539
* ehci: Add yet-another Lucid nohandoff pci quirk
  - LP: #1084539
* xhci: Fix potential NULL ptr deref in command cancellation.
  - LP: #1084539
* freezer: exec should clear PF_NOFREEZE along with PF_KTHREAD
  - LP: #1084539
* mm: fix XFS oops due to dirty pages without buffers on s390
  - LP: #1084539
* genalloc: stop crashing the system when destroying a pool
  - LP: #1084539
* drivers/rtc/rtc-imxdi.c: add missing spin lock initialization
  - LP: #1084539
* gen_init_cpio: avoid stack overflow when expanding
  - LP: #1084539
* fs/compat_ioctl.c: VIDEO_SET_SPU_PALETTE missing error check
  - LP: #1084539
* qmi_wwan/cdc_ether: move Novatel 551 and E362 to qmi_wwan
  - LP: #1084539
* efi: Defer freeing boot services memory until after ACPI init
  - LP: #1084539
* x86: efi: Turn off efi_enabled after setup on mixed fw/kernel
  - LP: #1082059, #1084539
* target: Re-add explict zeroing of INQUIRY bounce buffer memory
  - LP: #1084539
* ARM: 7566/1: vfp: fix save and restore when running on pre-VFPv3 and
  CONFIG_VFPv3 set
  - LP: #1084539
* libceph: drop declaration of ceph_con_get()
  - LP: #1084539
* x86, mm: Find_early_table_space based on ranges that are actually being
  mapped
  - LP: #1084539
* x86, mm: Undo incorrect revert in arch/x86/mm/init.c
  - LP: #1084539
* Linux 3.5.7.1
  - LP: #1084539
* ALSA: hda - Cirrus: Correctly clear line_out_pins when moving to
  speaker
  - LP: #1076840
* Bluetooth: ath3k: Add support for VAIO VPCEH [0489:e027]
  - LP: #898826
* i915_hsw: drm/i915: Reserve ioctl numbers for set/get_caching
  - LP: #1085245
* i915_hsw: drm: Export drm_probe_ddc()
  - LP: #1085245
* i915_hsw: drm: remove the raw_edid field from struct drm_display_info
  - LP: #1085245
* i915_hsw: drm/i915: fix hsw uncached pte
  - LP: #1085245
* i915_hsw: drm/fb-helper: delay hotplug handling when partially bound
  - LP: #1085245
* i915_hsw: drm/fb helper: don't call drm_crtc_helper_set_config
  - LP: #1085245
* i915_hsw: drm/fb-helper: don't clobber output routing in setup_crtcs
  - LP: #1085245
* i915_hsw: drm/fb helper: don't call drm_helper_connector_dpms directly
  - LP: #1085245
* i915_hsw: drm/edid: Fix potential memory leak in edid_load()
  - LP: #1085245

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
 
2
 */
 
3
/*
 
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 
5
 * All Rights Reserved.
 
6
 *
 
7
 * Permission is hereby granted, free of charge, to any person obtaining a
 
8
 * copy of this software and associated documentation files (the
 
9
 * "Software"), to deal in the Software without restriction, including
 
10
 * without limitation the rights to use, copy, modify, merge, publish,
 
11
 * distribute, sub license, and/or sell copies of the Software, and to
 
12
 * permit persons to whom the Software is furnished to do so, subject to
 
13
 * the following conditions:
 
14
 *
 
15
 * The above copyright notice and this permission notice (including the
 
16
 * next paragraph) shall be included in all copies or substantial portions
 
17
 * of the Software.
 
18
 *
 
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
26
 *
 
27
 */
 
28
 
 
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
30
 
 
31
#include <linux/sysrq.h>
 
32
#include <linux/slab.h>
 
33
#include "drmP.h"
 
34
#include "drm.h"
 
35
#include "i915_drm.h"
 
36
#include "i915_drv.h"
 
37
#include "i915_trace.h"
 
38
#include "intel_drv.h"
 
39
 
 
40
/* For display hotplug interrupt */
 
41
static void
 
42
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 
43
{
 
44
        if ((dev_priv->irq_mask & mask) != 0) {
 
45
                dev_priv->irq_mask &= ~mask;
 
46
                I915_WRITE(DEIMR, dev_priv->irq_mask);
 
47
                POSTING_READ(DEIMR);
 
48
        }
 
49
}
 
50
 
 
51
static inline void
 
52
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 
53
{
 
54
        if ((dev_priv->irq_mask & mask) != mask) {
 
55
                dev_priv->irq_mask |= mask;
 
56
                I915_WRITE(DEIMR, dev_priv->irq_mask);
 
57
                POSTING_READ(DEIMR);
 
58
        }
 
59
}
 
60
 
 
61
void
 
62
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 
63
{
 
64
        if ((dev_priv->pipestat[pipe] & mask) != mask) {
 
65
                u32 reg = PIPESTAT(pipe);
 
66
 
 
67
                dev_priv->pipestat[pipe] |= mask;
 
68
                /* Enable the interrupt, clear any pending status */
 
69
                I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
 
70
                POSTING_READ(reg);
 
71
        }
 
72
}
 
73
 
 
74
void
 
75
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 
76
{
 
77
        if ((dev_priv->pipestat[pipe] & mask) != 0) {
 
78
                u32 reg = PIPESTAT(pipe);
 
79
 
 
80
                dev_priv->pipestat[pipe] &= ~mask;
 
81
                I915_WRITE(reg, dev_priv->pipestat[pipe]);
 
82
                POSTING_READ(reg);
 
83
        }
 
84
}
 
85
 
 
86
/**
 
87
 * intel_enable_asle - enable ASLE interrupt for OpRegion
 
88
 */
 
89
void intel_enable_asle(struct drm_device *dev)
 
90
{
 
91
        drm_i915_private_t *dev_priv = dev->dev_private;
 
92
        unsigned long irqflags;
 
93
 
 
94
        /* FIXME: opregion/asle for VLV */
 
95
        if (IS_VALLEYVIEW(dev))
 
96
                return;
 
97
 
 
98
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
99
 
 
100
        if (HAS_PCH_SPLIT(dev))
 
101
                ironlake_enable_display_irq(dev_priv, DE_GSE);
 
102
        else {
 
103
                i915_enable_pipestat(dev_priv, 1,
 
104
                                     PIPE_LEGACY_BLC_EVENT_ENABLE);
 
105
                if (INTEL_INFO(dev)->gen >= 4)
 
106
                        i915_enable_pipestat(dev_priv, 0,
 
107
                                             PIPE_LEGACY_BLC_EVENT_ENABLE);
 
108
        }
 
109
 
 
110
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
111
}
 
112
 
 
113
/**
 
114
 * i915_pipe_enabled - check if a pipe is enabled
 
115
 * @dev: DRM device
 
116
 * @pipe: pipe to check
 
117
 *
 
118
 * Reading certain registers when the pipe is disabled can hang the chip.
 
119
 * Use this routine to make sure the PLL is running and the pipe is active
 
120
 * before reading such registers if unsure.
 
121
 */
 
122
static int
 
123
i915_pipe_enabled(struct drm_device *dev, int pipe)
 
124
{
 
125
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
126
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 
127
                                                                      pipe);
 
128
 
 
129
        return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
 
130
}
 
131
 
 
132
/* Called from drm generic code, passed a 'crtc', which
 
133
 * we use as a pipe index
 
134
 */
 
135
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 
136
{
 
137
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
138
        unsigned long high_frame;
 
139
        unsigned long low_frame;
 
140
        u32 high1, high2, low;
 
141
 
 
142
        if (!i915_pipe_enabled(dev, pipe)) {
 
143
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 
144
                                "pipe %c\n", pipe_name(pipe));
 
145
                return 0;
 
146
        }
 
147
 
 
148
        high_frame = PIPEFRAME(pipe);
 
149
        low_frame = PIPEFRAMEPIXEL(pipe);
 
150
 
 
151
        /*
 
152
         * High & low register fields aren't synchronized, so make sure
 
153
         * we get a low value that's stable across two reads of the high
 
154
         * register.
 
155
         */
 
156
        do {
 
157
                high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 
158
                low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
 
159
                high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 
160
        } while (high1 != high2);
 
161
 
 
162
        high1 >>= PIPE_FRAME_HIGH_SHIFT;
 
163
        low >>= PIPE_FRAME_LOW_SHIFT;
 
164
        return (high1 << 8) | low;
 
165
}
 
166
 
 
167
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 
168
{
 
169
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
170
        int reg = PIPE_FRMCOUNT_GM45(pipe);
 
171
 
 
172
        if (!i915_pipe_enabled(dev, pipe)) {
 
173
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 
174
                                 "pipe %c\n", pipe_name(pipe));
 
175
                return 0;
 
176
        }
 
177
 
 
178
        return I915_READ(reg);
 
179
}
 
180
 
 
181
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 
182
                             int *vpos, int *hpos)
 
183
{
 
184
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
185
        u32 vbl = 0, position = 0;
 
186
        int vbl_start, vbl_end, htotal, vtotal;
 
187
        bool in_vbl = true;
 
188
        int ret = 0;
 
189
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 
190
                                                                      pipe);
 
191
 
 
192
        if (!i915_pipe_enabled(dev, pipe)) {
 
193
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 
194
                                 "pipe %c\n", pipe_name(pipe));
 
195
                return 0;
 
196
        }
 
197
 
 
198
        /* Get vtotal. */
 
199
        vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 
200
 
 
201
        if (INTEL_INFO(dev)->gen >= 4) {
 
202
                /* No obvious pixelcount register. Only query vertical
 
203
                 * scanout position from Display scan line register.
 
204
                 */
 
205
                position = I915_READ(PIPEDSL(pipe));
 
206
 
 
207
                /* Decode into vertical scanout position. Don't have
 
208
                 * horizontal scanout position.
 
209
                 */
 
210
                *vpos = position & 0x1fff;
 
211
                *hpos = 0;
 
212
        } else {
 
213
                /* Have access to pixelcount since start of frame.
 
214
                 * We can split this into vertical and horizontal
 
215
                 * scanout position.
 
216
                 */
 
217
                position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
218
 
 
219
                htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 
220
                *vpos = position / htotal;
 
221
                *hpos = position - (*vpos * htotal);
 
222
        }
 
223
 
 
224
        /* Query vblank area. */
 
225
        vbl = I915_READ(VBLANK(cpu_transcoder));
 
226
 
 
227
        /* Test position against vblank region. */
 
228
        vbl_start = vbl & 0x1fff;
 
229
        vbl_end = (vbl >> 16) & 0x1fff;
 
230
 
 
231
        if ((*vpos < vbl_start) || (*vpos > vbl_end))
 
232
                in_vbl = false;
 
233
 
 
234
        /* Inside "upper part" of vblank area? Apply corrective offset: */
 
235
        if (in_vbl && (*vpos >= vbl_start))
 
236
                *vpos = *vpos - vtotal;
 
237
 
 
238
        /* Readouts valid? */
 
239
        if (vbl > 0)
 
240
                ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
241
 
 
242
        /* In vblank? */
 
243
        if (in_vbl)
 
244
                ret |= DRM_SCANOUTPOS_INVBL;
 
245
 
 
246
        return ret;
 
247
}
 
248
 
 
249
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 
250
                              int *max_error,
 
251
                              struct timeval *vblank_time,
 
252
                              unsigned flags)
 
253
{
 
254
        struct drm_i915_private *dev_priv = dev->dev_private;
 
255
        struct drm_crtc *crtc;
 
256
 
 
257
        if (pipe < 0 || pipe >= dev_priv->num_pipe) {
 
258
                DRM_ERROR("Invalid crtc %d\n", pipe);
 
259
                return -EINVAL;
 
260
        }
 
261
 
 
262
        /* Get drm_crtc to timestamp: */
 
263
        crtc = intel_get_crtc_for_pipe(dev, pipe);
 
264
        if (crtc == NULL) {
 
265
                DRM_ERROR("Invalid crtc %d\n", pipe);
 
266
                return -EINVAL;
 
267
        }
 
268
 
 
269
        if (!crtc->enabled) {
 
270
                DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 
271
                return -EBUSY;
 
272
        }
 
273
 
 
274
        /* Helper routine in DRM core does all the work: */
 
275
        return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 
276
                                                     vblank_time, flags,
 
277
                                                     crtc);
 
278
}
 
279
 
 
280
/*
 
281
 * Handle hotplug events outside the interrupt handler proper.
 
282
 */
 
283
static void i915_hotplug_work_func(struct work_struct *work)
 
284
{
 
285
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 
286
                                                    hotplug_work);
 
287
        struct drm_device *dev = dev_priv->dev;
 
288
        struct drm_mode_config *mode_config = &dev->mode_config;
 
289
        struct intel_encoder *encoder;
 
290
 
 
291
        mutex_lock(&mode_config->mutex);
 
292
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
293
 
 
294
        list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 
295
                if (encoder->hot_plug)
 
296
                        encoder->hot_plug(encoder);
 
297
 
 
298
        mutex_unlock(&mode_config->mutex);
 
299
 
 
300
        /* Just fire off a uevent and let userspace tell us what to do */
 
301
        drm_helper_hpd_irq_event(dev);
 
302
}
 
303
 
 
304
/* defined intel_pm.c */
 
305
extern spinlock_t mchdev_lock;
 
306
 
 
307
static void ironlake_handle_rps_change(struct drm_device *dev)
 
308
{
 
309
        drm_i915_private_t *dev_priv = dev->dev_private;
 
310
        u32 busy_up, busy_down, max_avg, min_avg;
 
311
        u8 new_delay;
 
312
        unsigned long flags;
 
313
 
 
314
        spin_lock_irqsave(&mchdev_lock, flags);
 
315
 
 
316
        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
317
 
 
318
        new_delay = dev_priv->ips.cur_delay;
 
319
 
 
320
        I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 
321
        busy_up = I915_READ(RCPREVBSYTUPAVG);
 
322
        busy_down = I915_READ(RCPREVBSYTDNAVG);
 
323
        max_avg = I915_READ(RCBMAXAVG);
 
324
        min_avg = I915_READ(RCBMINAVG);
 
325
 
 
326
        /* Handle RCS change request from hw */
 
327
        if (busy_up > max_avg) {
 
328
                if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
 
329
                        new_delay = dev_priv->ips.cur_delay - 1;
 
330
                if (new_delay < dev_priv->ips.max_delay)
 
331
                        new_delay = dev_priv->ips.max_delay;
 
332
        } else if (busy_down < min_avg) {
 
333
                if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
 
334
                        new_delay = dev_priv->ips.cur_delay + 1;
 
335
                if (new_delay > dev_priv->ips.min_delay)
 
336
                        new_delay = dev_priv->ips.min_delay;
 
337
        }
 
338
 
 
339
        if (ironlake_set_drps(dev, new_delay))
 
340
                dev_priv->ips.cur_delay = new_delay;
 
341
 
 
342
        spin_unlock_irqrestore(&mchdev_lock, flags);
 
343
 
 
344
        return;
 
345
}
 
346
 
 
347
static void notify_ring(struct drm_device *dev,
 
348
                        struct intel_ring_buffer *ring)
 
349
{
 
350
        struct drm_i915_private *dev_priv = dev->dev_private;
 
351
 
 
352
        if (ring->obj == NULL)
 
353
                return;
 
354
 
 
355
        trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
356
 
 
357
        wake_up_all(&ring->irq_queue);
 
358
        if (i915_enable_hangcheck) {
 
359
                dev_priv->hangcheck_count = 0;
 
360
                mod_timer(&dev_priv->hangcheck_timer,
 
361
                          round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 
362
        }
 
363
}
 
364
 
 
365
static void gen6_pm_rps_work(struct work_struct *work)
 
366
{
 
367
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 
368
                                                    rps.work);
 
369
        u32 pm_iir, pm_imr;
 
370
        u8 new_delay;
 
371
 
 
372
        spin_lock_irq(&dev_priv->rps.lock);
 
373
        pm_iir = dev_priv->rps.pm_iir;
 
374
        dev_priv->rps.pm_iir = 0;
 
375
        pm_imr = I915_READ(GEN6_PMIMR);
 
376
        I915_WRITE(GEN6_PMIMR, 0);
 
377
        spin_unlock_irq(&dev_priv->rps.lock);
 
378
 
 
379
        if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 
380
                return;
 
381
 
 
382
        mutex_lock(&dev_priv->rps.hw_lock);
 
383
 
 
384
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
 
385
                new_delay = dev_priv->rps.cur_delay + 1;
 
386
        else
 
387
                new_delay = dev_priv->rps.cur_delay - 1;
 
388
 
 
389
        /* sysfs frequency interfaces may have snuck in while servicing the
 
390
         * interrupt
 
391
         */
 
392
        if (!(new_delay > dev_priv->rps.max_delay ||
 
393
              new_delay < dev_priv->rps.min_delay)) {
 
394
                gen6_set_rps(dev_priv->dev, new_delay);
 
395
        }
 
396
 
 
397
        mutex_unlock(&dev_priv->rps.hw_lock);
 
398
}
 
399
 
 
400
 
 
401
/**
 
402
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 
403
 * occurred.
 
404
 * @work: workqueue struct
 
405
 *
 
406
 * Doesn't actually do anything except notify userspace. As a consequence of
 
407
 * this event, userspace should try to remap the bad rows since statistically
 
408
 * it is likely the same row is more likely to go bad again.
 
409
 */
 
410
static void ivybridge_parity_work(struct work_struct *work)
 
411
{
 
412
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 
413
                                                    l3_parity.error_work);
 
414
        u32 error_status, row, bank, subbank;
 
415
        char *parity_event[5];
 
416
        uint32_t misccpctl;
 
417
        unsigned long flags;
 
418
 
 
419
        /* We must turn off DOP level clock gating to access the L3 registers.
 
420
         * In order to prevent a get/put style interface, acquire struct mutex
 
421
         * any time we access those registers.
 
422
         */
 
423
        mutex_lock(&dev_priv->dev->struct_mutex);
 
424
 
 
425
        misccpctl = I915_READ(GEN7_MISCCPCTL);
 
426
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
427
        POSTING_READ(GEN7_MISCCPCTL);
 
428
 
 
429
        error_status = I915_READ(GEN7_L3CDERRST1);
 
430
        row = GEN7_PARITY_ERROR_ROW(error_status);
 
431
        bank = GEN7_PARITY_ERROR_BANK(error_status);
 
432
        subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 
433
 
 
434
        I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
 
435
                                    GEN7_L3CDERRST1_ENABLE);
 
436
        POSTING_READ(GEN7_L3CDERRST1);
 
437
 
 
438
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
439
 
 
440
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
441
        dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 
442
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
443
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
444
 
 
445
        mutex_unlock(&dev_priv->dev->struct_mutex);
 
446
 
 
447
        parity_event[0] = "L3_PARITY_ERROR=1";
 
448
        parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
 
449
        parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
 
450
        parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
 
451
        parity_event[4] = NULL;
 
452
 
 
453
        kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
 
454
                           KOBJ_CHANGE, parity_event);
 
455
 
 
456
        DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
 
457
                  row, bank, subbank);
 
458
 
 
459
        kfree(parity_event[3]);
 
460
        kfree(parity_event[2]);
 
461
        kfree(parity_event[1]);
 
462
}
 
463
 
 
464
static void ivybridge_handle_parity_error(struct drm_device *dev)
 
465
{
 
466
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
467
        unsigned long flags;
 
468
 
 
469
        if (!HAS_L3_GPU_CACHE(dev))
 
470
                return;
 
471
 
 
472
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
473
        dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 
474
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
475
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
476
 
 
477
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 
478
}
 
479
 
 
480
static void snb_gt_irq_handler(struct drm_device *dev,
 
481
                               struct drm_i915_private *dev_priv,
 
482
                               u32 gt_iir)
 
483
{
 
484
 
 
485
        if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
 
486
                      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
 
487
                notify_ring(dev, &dev_priv->ring[RCS]);
 
488
        if (gt_iir & GEN6_BSD_USER_INTERRUPT)
 
489
                notify_ring(dev, &dev_priv->ring[VCS]);
 
490
        if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
 
491
                notify_ring(dev, &dev_priv->ring[BCS]);
 
492
 
 
493
        if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
 
494
                      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
 
495
                      GT_RENDER_CS_ERROR_INTERRUPT)) {
 
496
                DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
 
497
                i915_handle_error(dev, false);
 
498
        }
 
499
 
 
500
        if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
 
501
                ivybridge_handle_parity_error(dev);
 
502
}
 
503
 
 
504
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 
505
                                u32 pm_iir)
 
506
{
 
507
        unsigned long flags;
 
508
 
 
509
        /*
 
510
         * IIR bits should never already be set because IMR should
 
511
         * prevent an interrupt from being shown in IIR. The warning
 
512
         * displays a case where we've unsafely cleared
 
513
         * dev_priv->rps.pm_iir. Although missing an interrupt of the same
 
514
         * type is not a problem, it displays a problem in the logic.
 
515
         *
 
516
         * The mask bit in IMR is cleared by dev_priv->rps.work.
 
517
         */
 
518
 
 
519
        spin_lock_irqsave(&dev_priv->rps.lock, flags);
 
520
        dev_priv->rps.pm_iir |= pm_iir;
 
521
        I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 
522
        POSTING_READ(GEN6_PMIMR);
 
523
        spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
524
 
 
525
        queue_work(dev_priv->wq, &dev_priv->rps.work);
 
526
}
 
527
 
 
528
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
529
{
 
530
        struct drm_device *dev = (struct drm_device *) arg;
 
531
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
532
        u32 iir, gt_iir, pm_iir;
 
533
        irqreturn_t ret = IRQ_NONE;
 
534
        unsigned long irqflags;
 
535
        int pipe;
 
536
        u32 pipe_stats[I915_MAX_PIPES];
 
537
        bool blc_event;
 
538
 
 
539
        atomic_inc(&dev_priv->irq_received);
 
540
 
 
541
        while (true) {
 
542
                iir = I915_READ(VLV_IIR);
 
543
                gt_iir = I915_READ(GTIIR);
 
544
                pm_iir = I915_READ(GEN6_PMIIR);
 
545
 
 
546
                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
 
547
                        goto out;
 
548
 
 
549
                ret = IRQ_HANDLED;
 
550
 
 
551
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
552
 
 
553
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
554
                for_each_pipe(pipe) {
 
555
                        int reg = PIPESTAT(pipe);
 
556
                        pipe_stats[pipe] = I915_READ(reg);
 
557
 
 
558
                        /*
 
559
                         * Clear the PIPE*STAT regs before the IIR
 
560
                         */
 
561
                        if (pipe_stats[pipe] & 0x8000ffff) {
 
562
                                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 
563
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
 
564
                                                         pipe_name(pipe));
 
565
                                I915_WRITE(reg, pipe_stats[pipe]);
 
566
                        }
 
567
                }
 
568
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
569
 
 
570
                for_each_pipe(pipe) {
 
571
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
 
572
                                drm_handle_vblank(dev, pipe);
 
573
 
 
574
                        if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 
575
                                intel_prepare_page_flip(dev, pipe);
 
576
                                intel_finish_page_flip(dev, pipe);
 
577
                        }
 
578
                }
 
579
 
 
580
                /* Consume port.  Then clear IIR or we'll miss events */
 
581
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
 
582
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
583
 
 
584
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 
585
                                         hotplug_status);
 
586
                        if (hotplug_status & dev_priv->hotplug_supported_mask)
 
587
                                queue_work(dev_priv->wq,
 
588
                                           &dev_priv->hotplug_work);
 
589
 
 
590
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 
591
                        I915_READ(PORT_HOTPLUG_STAT);
 
592
                }
 
593
 
 
594
                if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 
595
                        blc_event = true;
 
596
 
 
597
                if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
 
598
                        gen6_queue_rps_work(dev_priv, pm_iir);
 
599
 
 
600
                I915_WRITE(GTIIR, gt_iir);
 
601
                I915_WRITE(GEN6_PMIIR, pm_iir);
 
602
                I915_WRITE(VLV_IIR, iir);
 
603
        }
 
604
 
 
605
out:
 
606
        return ret;
 
607
}
 
608
 
 
609
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 
610
{
 
611
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
612
        int pipe;
 
613
 
 
614
        if (pch_iir & SDE_HOTPLUG_MASK)
 
615
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
616
 
 
617
        if (pch_iir & SDE_AUDIO_POWER_MASK)
 
618
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 
619
                                 (pch_iir & SDE_AUDIO_POWER_MASK) >>
 
620
                                 SDE_AUDIO_POWER_SHIFT);
 
621
 
 
622
        if (pch_iir & SDE_GMBUS)
 
623
                DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 
624
 
 
625
        if (pch_iir & SDE_AUDIO_HDCP_MASK)
 
626
                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
 
627
 
 
628
        if (pch_iir & SDE_AUDIO_TRANS_MASK)
 
629
                DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
 
630
 
 
631
        if (pch_iir & SDE_POISON)
 
632
                DRM_ERROR("PCH poison interrupt\n");
 
633
 
 
634
        if (pch_iir & SDE_FDI_MASK)
 
635
                for_each_pipe(pipe)
 
636
                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
 
637
                                         pipe_name(pipe),
 
638
                                         I915_READ(FDI_RX_IIR(pipe)));
 
639
 
 
640
        if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
 
641
                DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
 
642
 
 
643
        if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
 
644
                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
645
 
 
646
        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
 
647
                DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
 
648
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
 
649
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 
650
}
 
651
 
 
652
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 
653
{
 
654
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
655
        int pipe;
 
656
 
 
657
        if (pch_iir & SDE_HOTPLUG_MASK_CPT)
 
658
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
659
 
 
660
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
 
661
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 
662
                                 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
 
663
                                 SDE_AUDIO_POWER_SHIFT_CPT);
 
664
 
 
665
        if (pch_iir & SDE_AUX_MASK_CPT)
 
666
                DRM_DEBUG_DRIVER("AUX channel interrupt\n");
 
667
 
 
668
        if (pch_iir & SDE_GMBUS_CPT)
 
669
                DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 
670
 
 
671
        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
 
672
                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
 
673
 
 
674
        if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
 
675
                DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
 
676
 
 
677
        if (pch_iir & SDE_FDI_MASK_CPT)
 
678
                for_each_pipe(pipe)
 
679
                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
 
680
                                         pipe_name(pipe),
 
681
                                         I915_READ(FDI_RX_IIR(pipe)));
 
682
}
 
683
 
 
684
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
685
{
 
686
        struct drm_device *dev = (struct drm_device *) arg;
 
687
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
688
        u32 de_iir, gt_iir, de_ier, pm_iir;
 
689
        irqreturn_t ret = IRQ_NONE;
 
690
        int i;
 
691
 
 
692
        atomic_inc(&dev_priv->irq_received);
 
693
 
 
694
        /* disable master interrupt before clearing iir  */
 
695
        de_ier = I915_READ(DEIER);
 
696
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
697
 
 
698
        gt_iir = I915_READ(GTIIR);
 
699
        if (gt_iir) {
 
700
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
701
                I915_WRITE(GTIIR, gt_iir);
 
702
                ret = IRQ_HANDLED;
 
703
        }
 
704
 
 
705
        de_iir = I915_READ(DEIIR);
 
706
        if (de_iir) {
 
707
                if (de_iir & DE_GSE_IVB)
 
708
                        intel_opregion_gse_intr(dev);
 
709
 
 
710
                for (i = 0; i < 3; i++) {
 
711
                        if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
 
712
                                drm_handle_vblank(dev, i);
 
713
                        if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
 
714
                                intel_prepare_page_flip(dev, i);
 
715
                                intel_finish_page_flip_plane(dev, i);
 
716
                        }
 
717
                }
 
718
 
 
719
                /* check event from PCH */
 
720
                if (de_iir & DE_PCH_EVENT_IVB) {
 
721
                        u32 pch_iir = I915_READ(SDEIIR);
 
722
 
 
723
                        cpt_irq_handler(dev, pch_iir);
 
724
 
 
725
                        /* clear PCH hotplug event before clear CPU irq */
 
726
                        I915_WRITE(SDEIIR, pch_iir);
 
727
                }
 
728
 
 
729
                I915_WRITE(DEIIR, de_iir);
 
730
                ret = IRQ_HANDLED;
 
731
        }
 
732
 
 
733
        pm_iir = I915_READ(GEN6_PMIIR);
 
734
        if (pm_iir) {
 
735
                if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
 
736
                        gen6_queue_rps_work(dev_priv, pm_iir);
 
737
                I915_WRITE(GEN6_PMIIR, pm_iir);
 
738
                ret = IRQ_HANDLED;
 
739
        }
 
740
 
 
741
        I915_WRITE(DEIER, de_ier);
 
742
        POSTING_READ(DEIER);
 
743
 
 
744
        return ret;
 
745
}
 
746
 
 
747
static void ilk_gt_irq_handler(struct drm_device *dev,
 
748
                               struct drm_i915_private *dev_priv,
 
749
                               u32 gt_iir)
 
750
{
 
751
        if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
 
752
                notify_ring(dev, &dev_priv->ring[RCS]);
 
753
        if (gt_iir & GT_BSD_USER_INTERRUPT)
 
754
                notify_ring(dev, &dev_priv->ring[VCS]);
 
755
}
 
756
 
 
757
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 
758
{
 
759
        struct drm_device *dev = (struct drm_device *) arg;
 
760
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
761
        int ret = IRQ_NONE;
 
762
        u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 
763
 
 
764
        atomic_inc(&dev_priv->irq_received);
 
765
 
 
766
        /* disable master interrupt before clearing iir  */
 
767
        de_ier = I915_READ(DEIER);
 
768
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
769
        POSTING_READ(DEIER);
 
770
 
 
771
        de_iir = I915_READ(DEIIR);
 
772
        gt_iir = I915_READ(GTIIR);
 
773
        pch_iir = I915_READ(SDEIIR);
 
774
        pm_iir = I915_READ(GEN6_PMIIR);
 
775
 
 
776
        if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
 
777
            (!IS_GEN6(dev) || pm_iir == 0))
 
778
                goto done;
 
779
 
 
780
        ret = IRQ_HANDLED;
 
781
 
 
782
        if (IS_GEN5(dev))
 
783
                ilk_gt_irq_handler(dev, dev_priv, gt_iir);
 
784
        else
 
785
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
786
 
 
787
        if (de_iir & DE_GSE)
 
788
                intel_opregion_gse_intr(dev);
 
789
 
 
790
        if (de_iir & DE_PIPEA_VBLANK)
 
791
                drm_handle_vblank(dev, 0);
 
792
 
 
793
        if (de_iir & DE_PIPEB_VBLANK)
 
794
                drm_handle_vblank(dev, 1);
 
795
 
 
796
        if (de_iir & DE_PLANEA_FLIP_DONE) {
 
797
                intel_prepare_page_flip(dev, 0);
 
798
                intel_finish_page_flip_plane(dev, 0);
 
799
        }
 
800
 
 
801
        if (de_iir & DE_PLANEB_FLIP_DONE) {
 
802
                intel_prepare_page_flip(dev, 1);
 
803
                intel_finish_page_flip_plane(dev, 1);
 
804
        }
 
805
 
 
806
        /* check event from PCH */
 
807
        if (de_iir & DE_PCH_EVENT) {
 
808
                if (HAS_PCH_CPT(dev))
 
809
                        cpt_irq_handler(dev, pch_iir);
 
810
                else
 
811
                        ibx_irq_handler(dev, pch_iir);
 
812
        }
 
813
 
 
814
        if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
 
815
                ironlake_handle_rps_change(dev);
 
816
 
 
817
        if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
 
818
                gen6_queue_rps_work(dev_priv, pm_iir);
 
819
 
 
820
        /* should clear PCH hotplug event before clear CPU irq */
 
821
        I915_WRITE(SDEIIR, pch_iir);
 
822
        I915_WRITE(GTIIR, gt_iir);
 
823
        I915_WRITE(DEIIR, de_iir);
 
824
        I915_WRITE(GEN6_PMIIR, pm_iir);
 
825
 
 
826
done:
 
827
        I915_WRITE(DEIER, de_ier);
 
828
        POSTING_READ(DEIER);
 
829
 
 
830
        return ret;
 
831
}
 
832
 
 
833
/**
 
834
 * i915_error_work_func - do process context error handling work
 
835
 * @work: work struct
 
836
 *
 
837
 * Fire an error uevent so userspace can see that a hang or error
 
838
 * was detected.
 
839
 */
 
840
static void i915_error_work_func(struct work_struct *work)
 
841
{
 
842
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 
843
                                                    error_work);
 
844
        struct drm_device *dev = dev_priv->dev;
 
845
        char *error_event[] = { "ERROR=1", NULL };
 
846
        char *reset_event[] = { "RESET=1", NULL };
 
847
        char *reset_done_event[] = { "ERROR=0", NULL };
 
848
 
 
849
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
850
 
 
851
        if (atomic_read(&dev_priv->mm.wedged)) {
 
852
                DRM_DEBUG_DRIVER("resetting chip\n");
 
853
                kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
 
854
                if (!i915_reset(dev)) {
 
855
                        atomic_set(&dev_priv->mm.wedged, 0);
 
856
                        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
 
857
                }
 
858
                complete_all(&dev_priv->error_completion);
 
859
        }
 
860
}
 
861
 
 
862
/* NB: please notice the memset */
 
863
static void i915_get_extra_instdone(struct drm_device *dev,
 
864
                                    uint32_t *instdone)
 
865
{
 
866
        struct drm_i915_private *dev_priv = dev->dev_private;
 
867
        memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
868
 
 
869
        switch(INTEL_INFO(dev)->gen) {
 
870
        case 2:
 
871
        case 3:
 
872
                instdone[0] = I915_READ(INSTDONE);
 
873
                break;
 
874
        case 4:
 
875
        case 5:
 
876
        case 6:
 
877
                instdone[0] = I915_READ(INSTDONE_I965);
 
878
                instdone[1] = I915_READ(INSTDONE1);
 
879
                break;
 
880
        default:
 
881
                WARN_ONCE(1, "Unsupported platform\n");
 
882
        case 7:
 
883
                instdone[0] = I915_READ(GEN7_INSTDONE_1);
 
884
                instdone[1] = I915_READ(GEN7_SC_INSTDONE);
 
885
                instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
 
886
                instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
 
887
                break;
 
888
        }
 
889
}
 
890
 
 
891
#ifdef CONFIG_DEBUG_FS
 
892
static struct drm_i915_error_object *
 
893
i915_error_object_create(struct drm_i915_private *dev_priv,
 
894
                         struct drm_i915_gem_object *src)
 
895
{
 
896
        struct drm_i915_error_object *dst;
 
897
        int i, count;
 
898
        u32 reloc_offset;
 
899
 
 
900
        if (src == NULL || src->pages == NULL)
 
901
                return NULL;
 
902
 
 
903
        count = src->base.size / PAGE_SIZE;
 
904
 
 
905
        dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
 
906
        if (dst == NULL)
 
907
                return NULL;
 
908
 
 
909
        reloc_offset = src->gtt_offset;
 
910
        for (i = 0; i < count; i++) {
 
911
                unsigned long flags;
 
912
                void *d;
 
913
 
 
914
                d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 
915
                if (d == NULL)
 
916
                        goto unwind;
 
917
 
 
918
                local_irq_save(flags);
 
919
                if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
 
920
                    src->has_global_gtt_mapping) {
 
921
                        void __iomem *s;
 
922
 
 
923
                        /* Simply ignore tiling or any overlapping fence.
 
924
                         * It's part of the error state, and this hopefully
 
925
                         * captures what the GPU read.
 
926
                         */
 
927
 
 
928
                        s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 
929
                                                     reloc_offset);
 
930
                        memcpy_fromio(d, s, PAGE_SIZE);
 
931
                        io_mapping_unmap_atomic(s);
 
932
                } else {
 
933
                        struct page *page;
 
934
                        void *s;
 
935
 
 
936
                        page = i915_gem_object_get_page(src, i);
 
937
 
 
938
                        drm_clflush_pages(&page, 1);
 
939
 
 
940
                        s = kmap_atomic(page);
 
941
                        memcpy(d, s, PAGE_SIZE);
 
942
                        kunmap_atomic(s);
 
943
 
 
944
                        drm_clflush_pages(&page, 1);
 
945
                }
 
946
                local_irq_restore(flags);
 
947
 
 
948
                dst->pages[i] = d;
 
949
 
 
950
                reloc_offset += PAGE_SIZE;
 
951
        }
 
952
        dst->page_count = count;
 
953
        dst->gtt_offset = src->gtt_offset;
 
954
 
 
955
        return dst;
 
956
 
 
957
unwind:
 
958
        while (i--)
 
959
                kfree(dst->pages[i]);
 
960
        kfree(dst);
 
961
        return NULL;
 
962
}
 
963
 
 
964
static void
 
965
i915_error_object_free(struct drm_i915_error_object *obj)
 
966
{
 
967
        int page;
 
968
 
 
969
        if (obj == NULL)
 
970
                return;
 
971
 
 
972
        for (page = 0; page < obj->page_count; page++)
 
973
                kfree(obj->pages[page]);
 
974
 
 
975
        kfree(obj);
 
976
}
 
977
 
 
978
void
 
979
i915_error_state_free(struct kref *error_ref)
 
980
{
 
981
        struct drm_i915_error_state *error = container_of(error_ref,
 
982
                                                          typeof(*error), ref);
 
983
        int i;
 
984
 
 
985
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 
986
                i915_error_object_free(error->ring[i].batchbuffer);
 
987
                i915_error_object_free(error->ring[i].ringbuffer);
 
988
                kfree(error->ring[i].requests);
 
989
        }
 
990
 
 
991
        kfree(error->active_bo);
 
992
        kfree(error->overlay);
 
993
        kfree(error);
 
994
}
 
995
static void capture_bo(struct drm_i915_error_buffer *err,
 
996
                       struct drm_i915_gem_object *obj)
 
997
{
 
998
        err->size = obj->base.size;
 
999
        err->name = obj->base.name;
 
1000
        err->rseqno = obj->last_read_seqno;
 
1001
        err->wseqno = obj->last_write_seqno;
 
1002
        err->gtt_offset = obj->gtt_offset;
 
1003
        err->read_domains = obj->base.read_domains;
 
1004
        err->write_domain = obj->base.write_domain;
 
1005
        err->fence_reg = obj->fence_reg;
 
1006
        err->pinned = 0;
 
1007
        if (obj->pin_count > 0)
 
1008
                err->pinned = 1;
 
1009
        if (obj->user_pin_count > 0)
 
1010
                err->pinned = -1;
 
1011
        err->tiling = obj->tiling_mode;
 
1012
        err->dirty = obj->dirty;
 
1013
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
 
1014
        err->ring = obj->ring ? obj->ring->id : -1;
 
1015
        err->cache_level = obj->cache_level;
 
1016
}
 
1017
 
 
1018
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
 
1019
                             int count, struct list_head *head)
 
1020
{
 
1021
        struct drm_i915_gem_object *obj;
 
1022
        int i = 0;
 
1023
 
 
1024
        list_for_each_entry(obj, head, mm_list) {
 
1025
                capture_bo(err++, obj);
 
1026
                if (++i == count)
 
1027
                        break;
 
1028
        }
 
1029
 
 
1030
        return i;
 
1031
}
 
1032
 
 
1033
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 
1034
                             int count, struct list_head *head)
 
1035
{
 
1036
        struct drm_i915_gem_object *obj;
 
1037
        int i = 0;
 
1038
 
 
1039
        list_for_each_entry(obj, head, gtt_list) {
 
1040
                if (obj->pin_count == 0)
 
1041
                        continue;
 
1042
 
 
1043
                capture_bo(err++, obj);
 
1044
                if (++i == count)
 
1045
                        break;
 
1046
        }
 
1047
 
 
1048
        return i;
 
1049
}
 
1050
 
 
1051
static void i915_gem_record_fences(struct drm_device *dev,
 
1052
                                   struct drm_i915_error_state *error)
 
1053
{
 
1054
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1055
        int i;
 
1056
 
 
1057
        /* Fences */
 
1058
        switch (INTEL_INFO(dev)->gen) {
 
1059
        case 7:
 
1060
        case 6:
 
1061
                for (i = 0; i < 16; i++)
 
1062
                        error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
 
1063
                break;
 
1064
        case 5:
 
1065
        case 4:
 
1066
                for (i = 0; i < 16; i++)
 
1067
                        error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
 
1068
                break;
 
1069
        case 3:
 
1070
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 
1071
                        for (i = 0; i < 8; i++)
 
1072
                                error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
 
1073
        case 2:
 
1074
                for (i = 0; i < 8; i++)
 
1075
                        error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
 
1076
                break;
 
1077
 
 
1078
        }
 
1079
}
 
1080
 
 
1081
static struct drm_i915_error_object *
 
1082
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 
1083
                             struct intel_ring_buffer *ring)
 
1084
{
 
1085
        struct drm_i915_gem_object *obj;
 
1086
        u32 seqno;
 
1087
 
 
1088
        if (!ring->get_seqno)
 
1089
                return NULL;
 
1090
 
 
1091
        seqno = ring->get_seqno(ring, false);
 
1092
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 
1093
                if (obj->ring != ring)
 
1094
                        continue;
 
1095
 
 
1096
                if (i915_seqno_passed(seqno, obj->last_read_seqno))
 
1097
                        continue;
 
1098
 
 
1099
                if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
 
1100
                        continue;
 
1101
 
 
1102
                /* We need to copy these to an anonymous buffer as the simplest
 
1103
                 * method to avoid being overwritten by userspace.
 
1104
                 */
 
1105
                return i915_error_object_create(dev_priv, obj);
 
1106
        }
 
1107
 
 
1108
        return NULL;
 
1109
}
 
1110
 
 
1111
static void i915_record_ring_state(struct drm_device *dev,
 
1112
                                   struct drm_i915_error_state *error,
 
1113
                                   struct intel_ring_buffer *ring)
 
1114
{
 
1115
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1116
 
 
1117
        if (INTEL_INFO(dev)->gen >= 6) {
 
1118
                error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
 
1119
                error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
 
1120
                error->semaphore_mboxes[ring->id][0]
 
1121
                        = I915_READ(RING_SYNC_0(ring->mmio_base));
 
1122
                error->semaphore_mboxes[ring->id][1]
 
1123
                        = I915_READ(RING_SYNC_1(ring->mmio_base));
 
1124
        }
 
1125
 
 
1126
        if (INTEL_INFO(dev)->gen >= 4) {
 
1127
                error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
 
1128
                error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
 
1129
                error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 
1130
                error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 
1131
                error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 
1132
                if (ring->id == RCS)
 
1133
                        error->bbaddr = I915_READ64(BB_ADDR);
 
1134
        } else {
 
1135
                error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 
1136
                error->ipeir[ring->id] = I915_READ(IPEIR);
 
1137
                error->ipehr[ring->id] = I915_READ(IPEHR);
 
1138
                error->instdone[ring->id] = I915_READ(INSTDONE);
 
1139
        }
 
1140
 
 
1141
        error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
 
1142
        error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
 
1143
        error->seqno[ring->id] = ring->get_seqno(ring, false);
 
1144
        error->acthd[ring->id] = intel_ring_get_active_head(ring);
 
1145
        error->head[ring->id] = I915_READ_HEAD(ring);
 
1146
        error->tail[ring->id] = I915_READ_TAIL(ring);
 
1147
 
 
1148
        error->cpu_ring_head[ring->id] = ring->head;
 
1149
        error->cpu_ring_tail[ring->id] = ring->tail;
 
1150
}
 
1151
 
 
1152
static void i915_gem_record_rings(struct drm_device *dev,
 
1153
                                  struct drm_i915_error_state *error)
 
1154
{
 
1155
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1156
        struct intel_ring_buffer *ring;
 
1157
        struct drm_i915_gem_request *request;
 
1158
        int i, count;
 
1159
 
 
1160
        for_each_ring(ring, dev_priv, i) {
 
1161
                i915_record_ring_state(dev, error, ring);
 
1162
 
 
1163
                error->ring[i].batchbuffer =
 
1164
                        i915_error_first_batchbuffer(dev_priv, ring);
 
1165
 
 
1166
                error->ring[i].ringbuffer =
 
1167
                        i915_error_object_create(dev_priv, ring->obj);
 
1168
 
 
1169
                count = 0;
 
1170
                list_for_each_entry(request, &ring->request_list, list)
 
1171
                        count++;
 
1172
 
 
1173
                error->ring[i].num_requests = count;
 
1174
                error->ring[i].requests =
 
1175
                        kmalloc(count*sizeof(struct drm_i915_error_request),
 
1176
                                GFP_ATOMIC);
 
1177
                if (error->ring[i].requests == NULL) {
 
1178
                        error->ring[i].num_requests = 0;
 
1179
                        continue;
 
1180
                }
 
1181
 
 
1182
                count = 0;
 
1183
                list_for_each_entry(request, &ring->request_list, list) {
 
1184
                        struct drm_i915_error_request *erq;
 
1185
 
 
1186
                        erq = &error->ring[i].requests[count++];
 
1187
                        erq->seqno = request->seqno;
 
1188
                        erq->jiffies = request->emitted_jiffies;
 
1189
                        erq->tail = request->tail;
 
1190
                }
 
1191
        }
 
1192
}
 
1193
 
 
1194
/**
 
1195
 * i915_capture_error_state - capture an error record for later analysis
 
1196
 * @dev: drm device
 
1197
 *
 
1198
 * Should be called when an error is detected (either a hang or an error
 
1199
 * interrupt) to capture error state from the time of the error.  Fills
 
1200
 * out a structure which becomes available in debugfs for user level tools
 
1201
 * to pick up.
 
1202
 */
 
1203
static void i915_capture_error_state(struct drm_device *dev)
 
1204
{
 
1205
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1206
        struct drm_i915_gem_object *obj;
 
1207
        struct drm_i915_error_state *error;
 
1208
        unsigned long flags;
 
1209
        int i, pipe;
 
1210
 
 
1211
        spin_lock_irqsave(&dev_priv->error_lock, flags);
 
1212
        error = dev_priv->first_error;
 
1213
        spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
1214
        if (error)
 
1215
                return;
 
1216
 
 
1217
        /* Account for pipe specific data like PIPE*STAT */
 
1218
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
 
1219
        if (!error) {
 
1220
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
 
1221
                return;
 
1222
        }
 
1223
 
 
1224
        DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
 
1225
                 dev->primary->index);
 
1226
 
 
1227
        kref_init(&error->ref);
 
1228
        error->eir = I915_READ(EIR);
 
1229
        error->pgtbl_er = I915_READ(PGTBL_ER);
 
1230
        error->ccid = I915_READ(CCID);
 
1231
 
 
1232
        if (HAS_PCH_SPLIT(dev))
 
1233
                error->ier = I915_READ(DEIER) | I915_READ(GTIER);
 
1234
        else if (IS_VALLEYVIEW(dev))
 
1235
                error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
 
1236
        else if (IS_GEN2(dev))
 
1237
                error->ier = I915_READ16(IER);
 
1238
        else
 
1239
                error->ier = I915_READ(IER);
 
1240
 
 
1241
        for_each_pipe(pipe)
 
1242
                error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
1243
 
 
1244
        if (INTEL_INFO(dev)->gen >= 6) {
 
1245
                error->error = I915_READ(ERROR_GEN6);
 
1246
                error->done_reg = I915_READ(DONE_REG);
 
1247
        }
 
1248
 
 
1249
        if (INTEL_INFO(dev)->gen == 7)
 
1250
                error->err_int = I915_READ(GEN7_ERR_INT);
 
1251
 
 
1252
        i915_get_extra_instdone(dev, error->extra_instdone);
 
1253
 
 
1254
        i915_gem_record_fences(dev, error);
 
1255
        i915_gem_record_rings(dev, error);
 
1256
 
 
1257
        /* Record buffers on the active and pinned lists. */
 
1258
        error->active_bo = NULL;
 
1259
        error->pinned_bo = NULL;
 
1260
 
 
1261
        i = 0;
 
1262
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
 
1263
                i++;
 
1264
        error->active_bo_count = i;
 
1265
        list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 
1266
                if (obj->pin_count)
 
1267
                        i++;
 
1268
        error->pinned_bo_count = i - error->active_bo_count;
 
1269
 
 
1270
        error->active_bo = NULL;
 
1271
        error->pinned_bo = NULL;
 
1272
        if (i) {
 
1273
                error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
 
1274
                                           GFP_ATOMIC);
 
1275
                if (error->active_bo)
 
1276
                        error->pinned_bo =
 
1277
                                error->active_bo + error->active_bo_count;
 
1278
        }
 
1279
 
 
1280
        if (error->active_bo)
 
1281
                error->active_bo_count =
 
1282
                        capture_active_bo(error->active_bo,
 
1283
                                          error->active_bo_count,
 
1284
                                          &dev_priv->mm.active_list);
 
1285
 
 
1286
        if (error->pinned_bo)
 
1287
                error->pinned_bo_count =
 
1288
                        capture_pinned_bo(error->pinned_bo,
 
1289
                                          error->pinned_bo_count,
 
1290
                                          &dev_priv->mm.bound_list);
 
1291
 
 
1292
        do_gettimeofday(&error->time);
 
1293
 
 
1294
        error->overlay = intel_overlay_capture_error_state(dev);
 
1295
        error->display = intel_display_capture_error_state(dev);
 
1296
 
 
1297
        spin_lock_irqsave(&dev_priv->error_lock, flags);
 
1298
        if (dev_priv->first_error == NULL) {
 
1299
                dev_priv->first_error = error;
 
1300
                error = NULL;
 
1301
        }
 
1302
        spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
1303
 
 
1304
        if (error)
 
1305
                i915_error_state_free(&error->ref);
 
1306
}
 
1307
 
 
1308
void i915_destroy_error_state(struct drm_device *dev)
 
1309
{
 
1310
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1311
        struct drm_i915_error_state *error;
 
1312
        unsigned long flags;
 
1313
 
 
1314
        spin_lock_irqsave(&dev_priv->error_lock, flags);
 
1315
        error = dev_priv->first_error;
 
1316
        dev_priv->first_error = NULL;
 
1317
        spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
1318
 
 
1319
        if (error)
 
1320
                kref_put(&error->ref, i915_error_state_free);
 
1321
}
 
1322
#else
 
1323
#define i915_capture_error_state(x)
 
1324
#endif
 
1325
 
 
1326
static void i915_report_and_clear_eir(struct drm_device *dev)
 
1327
{
 
1328
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1329
        uint32_t instdone[I915_NUM_INSTDONE_REG];
 
1330
        u32 eir = I915_READ(EIR);
 
1331
        int pipe, i;
 
1332
 
 
1333
        if (!eir)
 
1334
                return;
 
1335
 
 
1336
        pr_err("render error detected, EIR: 0x%08x\n", eir);
 
1337
 
 
1338
        i915_get_extra_instdone(dev, instdone);
 
1339
 
 
1340
        if (IS_G4X(dev)) {
 
1341
                if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
 
1342
                        u32 ipeir = I915_READ(IPEIR_I965);
 
1343
 
 
1344
                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 
1345
                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
 
1346
                        for (i = 0; i < ARRAY_SIZE(instdone); i++)
 
1347
                                pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 
1348
                        pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
 
1349
                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 
1350
                        I915_WRITE(IPEIR_I965, ipeir);
 
1351
                        POSTING_READ(IPEIR_I965);
 
1352
                }
 
1353
                if (eir & GM45_ERROR_PAGE_TABLE) {
 
1354
                        u32 pgtbl_err = I915_READ(PGTBL_ER);
 
1355
                        pr_err("page table error\n");
 
1356
                        pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
 
1357
                        I915_WRITE(PGTBL_ER, pgtbl_err);
 
1358
                        POSTING_READ(PGTBL_ER);
 
1359
                }
 
1360
        }
 
1361
 
 
1362
        if (!IS_GEN2(dev)) {
 
1363
                if (eir & I915_ERROR_PAGE_TABLE) {
 
1364
                        u32 pgtbl_err = I915_READ(PGTBL_ER);
 
1365
                        pr_err("page table error\n");
 
1366
                        pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
 
1367
                        I915_WRITE(PGTBL_ER, pgtbl_err);
 
1368
                        POSTING_READ(PGTBL_ER);
 
1369
                }
 
1370
        }
 
1371
 
 
1372
        if (eir & I915_ERROR_MEMORY_REFRESH) {
 
1373
                pr_err("memory refresh error:\n");
 
1374
                for_each_pipe(pipe)
 
1375
                        pr_err("pipe %c stat: 0x%08x\n",
 
1376
                               pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
 
1377
                /* pipestat has already been acked */
 
1378
        }
 
1379
        if (eir & I915_ERROR_INSTRUCTION) {
 
1380
                pr_err("instruction error\n");
 
1381
                pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
 
1382
                for (i = 0; i < ARRAY_SIZE(instdone); i++)
 
1383
                        pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 
1384
                if (INTEL_INFO(dev)->gen < 4) {
 
1385
                        u32 ipeir = I915_READ(IPEIR);
 
1386
 
 
1387
                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
 
1388
                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
 
1389
                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
 
1390
                        I915_WRITE(IPEIR, ipeir);
 
1391
                        POSTING_READ(IPEIR);
 
1392
                } else {
 
1393
                        u32 ipeir = I915_READ(IPEIR_I965);
 
1394
 
 
1395
                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 
1396
                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
 
1397
                        pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
 
1398
                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 
1399
                        I915_WRITE(IPEIR_I965, ipeir);
 
1400
                        POSTING_READ(IPEIR_I965);
 
1401
                }
 
1402
        }
 
1403
 
 
1404
        I915_WRITE(EIR, eir);
 
1405
        POSTING_READ(EIR);
 
1406
        eir = I915_READ(EIR);
 
1407
        if (eir) {
 
1408
                /*
 
1409
                 * some errors might have become stuck,
 
1410
                 * mask them.
 
1411
                 */
 
1412
                DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
 
1413
                I915_WRITE(EMR, I915_READ(EMR) | eir);
 
1414
                I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
1415
        }
 
1416
}
 
1417
 
 
1418
/**
 
1419
 * i915_handle_error - handle an error interrupt
 
1420
 * @dev: drm device
 
1421
 *
 
1422
 * Do some basic checking of regsiter state at error interrupt time and
 
1423
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 
1424
 * sure we get a record and make it available in debugfs.  Fire a uevent
 
1425
 * so userspace knows something bad happened (should trigger collection
 
1426
 * of a ring dump etc.).
 
1427
 */
 
1428
void i915_handle_error(struct drm_device *dev, bool wedged)
 
1429
{
 
1430
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1431
        struct intel_ring_buffer *ring;
 
1432
        int i;
 
1433
 
 
1434
        i915_capture_error_state(dev);
 
1435
        i915_report_and_clear_eir(dev);
 
1436
 
 
1437
        if (wedged) {
 
1438
                INIT_COMPLETION(dev_priv->error_completion);
 
1439
                atomic_set(&dev_priv->mm.wedged, 1);
 
1440
 
 
1441
                /*
 
1442
                 * Wakeup waiting processes so they don't hang
 
1443
                 */
 
1444
                for_each_ring(ring, dev_priv, i)
 
1445
                        wake_up_all(&ring->irq_queue);
 
1446
        }
 
1447
 
 
1448
        queue_work(dev_priv->wq, &dev_priv->error_work);
 
1449
}
 
1450
 
 
1451
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 
1452
{
 
1453
        drm_i915_private_t *dev_priv = dev->dev_private;
 
1454
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
1455
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
1456
        struct drm_i915_gem_object *obj;
 
1457
        struct intel_unpin_work *work;
 
1458
        unsigned long flags;
 
1459
        bool stall_detected;
 
1460
 
 
1461
        /* Ignore early vblank irqs */
 
1462
        if (intel_crtc == NULL)
 
1463
                return;
 
1464
 
 
1465
        spin_lock_irqsave(&dev->event_lock, flags);
 
1466
        work = intel_crtc->unpin_work;
 
1467
 
 
1468
        if (work == NULL || work->pending || !work->enable_stall_check) {
 
1469
                /* Either the pending flip IRQ arrived, or we're too early. Don't check */
 
1470
                spin_unlock_irqrestore(&dev->event_lock, flags);
 
1471
                return;
 
1472
        }
 
1473
 
 
1474
        /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
 
1475
        obj = work->pending_flip_obj;
 
1476
        if (INTEL_INFO(dev)->gen >= 4) {
 
1477
                int dspsurf = DSPSURF(intel_crtc->plane);
 
1478
                stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
 
1479
                                        obj->gtt_offset;
 
1480
        } else {
 
1481
                int dspaddr = DSPADDR(intel_crtc->plane);
 
1482
                stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
 
1483
                                                        crtc->y * crtc->fb->pitches[0] +
 
1484
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
 
1485
        }
 
1486
 
 
1487
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
1488
 
 
1489
        if (stall_detected) {
 
1490
                DRM_DEBUG_DRIVER("Pageflip stall detected\n");
 
1491
                intel_prepare_page_flip(dev, intel_crtc->plane);
 
1492
        }
 
1493
}
 
1494
 
 
1495
/* Called from drm generic code, passed 'crtc' which
 
1496
 * we use as a pipe index
 
1497
 */
 
1498
static int i915_enable_vblank(struct drm_device *dev, int pipe)
 
1499
{
 
1500
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1501
        unsigned long irqflags;
 
1502
 
 
1503
        if (!i915_pipe_enabled(dev, pipe))
 
1504
                return -EINVAL;
 
1505
 
 
1506
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1507
        if (INTEL_INFO(dev)->gen >= 4)
 
1508
                i915_enable_pipestat(dev_priv, pipe,
 
1509
                                     PIPE_START_VBLANK_INTERRUPT_ENABLE);
 
1510
        else
 
1511
                i915_enable_pipestat(dev_priv, pipe,
 
1512
                                     PIPE_VBLANK_INTERRUPT_ENABLE);
 
1513
 
 
1514
        /* maintain vblank delivery even in deep C-states */
 
1515
        if (dev_priv->info->gen == 3)
 
1516
                I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
 
1517
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1518
 
 
1519
        return 0;
 
1520
}
 
1521
 
 
1522
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 
1523
{
 
1524
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1525
        unsigned long irqflags;
 
1526
 
 
1527
        if (!i915_pipe_enabled(dev, pipe))
 
1528
                return -EINVAL;
 
1529
 
 
1530
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1531
        ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
 
1532
                                    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
 
1533
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1534
 
 
1535
        return 0;
 
1536
}
 
1537
 
 
1538
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 
1539
{
 
1540
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1541
        unsigned long irqflags;
 
1542
 
 
1543
        if (!i915_pipe_enabled(dev, pipe))
 
1544
                return -EINVAL;
 
1545
 
 
1546
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1547
        ironlake_enable_display_irq(dev_priv,
 
1548
                                    DE_PIPEA_VBLANK_IVB << (5 * pipe));
 
1549
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1550
 
 
1551
        return 0;
 
1552
}
 
1553
 
 
1554
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 
1555
{
 
1556
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1557
        unsigned long irqflags;
 
1558
        u32 imr;
 
1559
 
 
1560
        if (!i915_pipe_enabled(dev, pipe))
 
1561
                return -EINVAL;
 
1562
 
 
1563
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1564
        imr = I915_READ(VLV_IMR);
 
1565
        if (pipe == 0)
 
1566
                imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 
1567
        else
 
1568
                imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
1569
        I915_WRITE(VLV_IMR, imr);
 
1570
        i915_enable_pipestat(dev_priv, pipe,
 
1571
                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
 
1572
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1573
 
 
1574
        return 0;
 
1575
}
 
1576
 
 
1577
/* Called from drm generic code, passed 'crtc' which
 
1578
 * we use as a pipe index
 
1579
 */
 
1580
static void i915_disable_vblank(struct drm_device *dev, int pipe)
 
1581
{
 
1582
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1583
        unsigned long irqflags;
 
1584
 
 
1585
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1586
        if (dev_priv->info->gen == 3)
 
1587
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
 
1588
 
 
1589
        i915_disable_pipestat(dev_priv, pipe,
 
1590
                              PIPE_VBLANK_INTERRUPT_ENABLE |
 
1591
                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
 
1592
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1593
}
 
1594
 
 
1595
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 
1596
{
 
1597
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1598
        unsigned long irqflags;
 
1599
 
 
1600
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1601
        ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
 
1602
                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
 
1603
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1604
}
 
1605
 
 
1606
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
 
1607
{
 
1608
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1609
        unsigned long irqflags;
 
1610
 
 
1611
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1612
        ironlake_disable_display_irq(dev_priv,
 
1613
                                     DE_PIPEA_VBLANK_IVB << (pipe * 5));
 
1614
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1615
}
 
1616
 
 
1617
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 
1618
{
 
1619
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1620
        unsigned long irqflags;
 
1621
        u32 imr;
 
1622
 
 
1623
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
1624
        i915_disable_pipestat(dev_priv, pipe,
 
1625
                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
 
1626
        imr = I915_READ(VLV_IMR);
 
1627
        if (pipe == 0)
 
1628
                imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 
1629
        else
 
1630
                imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
1631
        I915_WRITE(VLV_IMR, imr);
 
1632
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
1633
}
 
1634
 
 
1635
static u32
 
1636
ring_last_seqno(struct intel_ring_buffer *ring)
 
1637
{
 
1638
        return list_entry(ring->request_list.prev,
 
1639
                          struct drm_i915_gem_request, list)->seqno;
 
1640
}
 
1641
 
 
1642
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
 
1643
{
 
1644
        if (list_empty(&ring->request_list) ||
 
1645
            i915_seqno_passed(ring->get_seqno(ring, false),
 
1646
                              ring_last_seqno(ring))) {
 
1647
                /* Issue a wake-up to catch stuck h/w. */
 
1648
                if (waitqueue_active(&ring->irq_queue)) {
 
1649
                        DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
 
1650
                                  ring->name);
 
1651
                        wake_up_all(&ring->irq_queue);
 
1652
                        *err = true;
 
1653
                }
 
1654
                return true;
 
1655
        }
 
1656
        return false;
 
1657
}
 
1658
 
 
1659
static bool kick_ring(struct intel_ring_buffer *ring)
 
1660
{
 
1661
        struct drm_device *dev = ring->dev;
 
1662
        struct drm_i915_private *dev_priv = dev->dev_private;
 
1663
        u32 tmp = I915_READ_CTL(ring);
 
1664
        if (tmp & RING_WAIT) {
 
1665
                DRM_ERROR("Kicking stuck wait on %s\n",
 
1666
                          ring->name);
 
1667
                I915_WRITE_CTL(ring, tmp);
 
1668
                return true;
 
1669
        }
 
1670
        return false;
 
1671
}
 
1672
 
 
1673
static bool i915_hangcheck_hung(struct drm_device *dev)
 
1674
{
 
1675
        drm_i915_private_t *dev_priv = dev->dev_private;
 
1676
 
 
1677
        if (dev_priv->hangcheck_count++ > 1) {
 
1678
                bool hung = true;
 
1679
 
 
1680
                DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
 
1681
                i915_handle_error(dev, true);
 
1682
 
 
1683
                if (!IS_GEN2(dev)) {
 
1684
                        struct intel_ring_buffer *ring;
 
1685
                        int i;
 
1686
 
 
1687
                        /* Is the chip hanging on a WAIT_FOR_EVENT?
 
1688
                         * If so we can simply poke the RB_WAIT bit
 
1689
                         * and break the hang. This should work on
 
1690
                         * all but the second generation chipsets.
 
1691
                         */
 
1692
                        for_each_ring(ring, dev_priv, i)
 
1693
                                hung &= !kick_ring(ring);
 
1694
                }
 
1695
 
 
1696
                return hung;
 
1697
        }
 
1698
 
 
1699
        return false;
 
1700
}
 
1701
 
 
1702
/**
 
1703
 * This is called when the chip hasn't reported back with completed
 
1704
 * batchbuffers in a long time. The first time this is called we simply record
 
1705
 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
 
1706
 * again, we assume the chip is wedged and try to fix it.
 
1707
 */
 
1708
void i915_hangcheck_elapsed(unsigned long data)
 
1709
{
 
1710
        struct drm_device *dev = (struct drm_device *)data;
 
1711
        drm_i915_private_t *dev_priv = dev->dev_private;
 
1712
        uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
 
1713
        struct intel_ring_buffer *ring;
 
1714
        bool err = false, idle;
 
1715
        int i;
 
1716
 
 
1717
        if (!i915_enable_hangcheck)
 
1718
                return;
 
1719
 
 
1720
        memset(acthd, 0, sizeof(acthd));
 
1721
        idle = true;
 
1722
        for_each_ring(ring, dev_priv, i) {
 
1723
            idle &= i915_hangcheck_ring_idle(ring, &err);
 
1724
            acthd[i] = intel_ring_get_active_head(ring);
 
1725
        }
 
1726
 
 
1727
        /* If all work is done then ACTHD clearly hasn't advanced. */
 
1728
        if (idle) {
 
1729
                if (err) {
 
1730
                        if (i915_hangcheck_hung(dev))
 
1731
                                return;
 
1732
 
 
1733
                        goto repeat;
 
1734
                }
 
1735
 
 
1736
                dev_priv->hangcheck_count = 0;
 
1737
                return;
 
1738
        }
 
1739
 
 
1740
        i915_get_extra_instdone(dev, instdone);
 
1741
        if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
 
1742
            memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
 
1743
                if (i915_hangcheck_hung(dev))
 
1744
                        return;
 
1745
        } else {
 
1746
                dev_priv->hangcheck_count = 0;
 
1747
 
 
1748
                memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
 
1749
                memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
 
1750
        }
 
1751
 
 
1752
repeat:
 
1753
        /* Reset timer case chip hangs without another request being added */
 
1754
        mod_timer(&dev_priv->hangcheck_timer,
 
1755
                  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 
1756
}
 
1757
 
 
1758
/* drm_dma.h hooks
 
1759
*/
 
1760
static void ironlake_irq_preinstall(struct drm_device *dev)
 
1761
{
 
1762
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1763
 
 
1764
        atomic_set(&dev_priv->irq_received, 0);
 
1765
 
 
1766
        I915_WRITE(HWSTAM, 0xeffe);
 
1767
 
 
1768
        /* XXX hotplug from PCH */
 
1769
 
 
1770
        I915_WRITE(DEIMR, 0xffffffff);
 
1771
        I915_WRITE(DEIER, 0x0);
 
1772
        POSTING_READ(DEIER);
 
1773
 
 
1774
        /* and GT */
 
1775
        I915_WRITE(GTIMR, 0xffffffff);
 
1776
        I915_WRITE(GTIER, 0x0);
 
1777
        POSTING_READ(GTIER);
 
1778
 
 
1779
        /* south display irq */
 
1780
        I915_WRITE(SDEIMR, 0xffffffff);
 
1781
        I915_WRITE(SDEIER, 0x0);
 
1782
        POSTING_READ(SDEIER);
 
1783
}
 
1784
 
 
1785
static void valleyview_irq_preinstall(struct drm_device *dev)
 
1786
{
 
1787
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1788
        int pipe;
 
1789
 
 
1790
        atomic_set(&dev_priv->irq_received, 0);
 
1791
 
 
1792
        /* VLV magic */
 
1793
        I915_WRITE(VLV_IMR, 0);
 
1794
        I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
 
1795
        I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
 
1796
        I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
 
1797
 
 
1798
        /* and GT */
 
1799
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
1800
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
1801
        I915_WRITE(GTIMR, 0xffffffff);
 
1802
        I915_WRITE(GTIER, 0x0);
 
1803
        POSTING_READ(GTIER);
 
1804
 
 
1805
        I915_WRITE(DPINVGTT, 0xff);
 
1806
 
 
1807
        I915_WRITE(PORT_HOTPLUG_EN, 0);
 
1808
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
1809
        for_each_pipe(pipe)
 
1810
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
1811
        I915_WRITE(VLV_IIR, 0xffffffff);
 
1812
        I915_WRITE(VLV_IMR, 0xffffffff);
 
1813
        I915_WRITE(VLV_IER, 0x0);
 
1814
        POSTING_READ(VLV_IER);
 
1815
}
 
1816
 
 
1817
/*
 
1818
 * Enable digital hotplug on the PCH, and configure the DP short pulse
 
1819
 * duration to 2ms (which is the minimum in the Display Port spec)
 
1820
 *
 
1821
 * This register is the same on all known PCH chips.
 
1822
 */
 
1823
 
 
1824
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
 
1825
{
 
1826
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1827
        u32     hotplug;
 
1828
 
 
1829
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
 
1830
        hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
 
1831
        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
 
1832
        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
 
1833
        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
 
1834
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 
1835
}
 
1836
 
 
1837
static int ironlake_irq_postinstall(struct drm_device *dev)
 
1838
{
 
1839
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1840
        /* enable kind of interrupts always enabled */
 
1841
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 
1842
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
 
1843
        u32 render_irqs;
 
1844
        u32 hotplug_mask;
 
1845
 
 
1846
        dev_priv->irq_mask = ~display_mask;
 
1847
 
 
1848
        /* should always can generate irq */
 
1849
        I915_WRITE(DEIIR, I915_READ(DEIIR));
 
1850
        I915_WRITE(DEIMR, dev_priv->irq_mask);
 
1851
        I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
 
1852
        POSTING_READ(DEIER);
 
1853
 
 
1854
        dev_priv->gt_irq_mask = ~0;
 
1855
 
 
1856
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
1857
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
1858
 
 
1859
        if (IS_GEN6(dev))
 
1860
                render_irqs =
 
1861
                        GT_USER_INTERRUPT |
 
1862
                        GEN6_BSD_USER_INTERRUPT |
 
1863
                        GEN6_BLITTER_USER_INTERRUPT;
 
1864
        else
 
1865
                render_irqs =
 
1866
                        GT_USER_INTERRUPT |
 
1867
                        GT_PIPE_NOTIFY |
 
1868
                        GT_BSD_USER_INTERRUPT;
 
1869
        I915_WRITE(GTIER, render_irqs);
 
1870
        POSTING_READ(GTIER);
 
1871
 
 
1872
        if (HAS_PCH_CPT(dev)) {
 
1873
                hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
 
1874
                                SDE_PORTB_HOTPLUG_CPT |
 
1875
                                SDE_PORTC_HOTPLUG_CPT |
 
1876
                                SDE_PORTD_HOTPLUG_CPT);
 
1877
        } else {
 
1878
                hotplug_mask = (SDE_CRT_HOTPLUG |
 
1879
                                SDE_PORTB_HOTPLUG |
 
1880
                                SDE_PORTC_HOTPLUG |
 
1881
                                SDE_PORTD_HOTPLUG |
 
1882
                                SDE_AUX_MASK);
 
1883
        }
 
1884
 
 
1885
        dev_priv->pch_irq_mask = ~hotplug_mask;
 
1886
 
 
1887
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
 
1888
        I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
 
1889
        I915_WRITE(SDEIER, hotplug_mask);
 
1890
        POSTING_READ(SDEIER);
 
1891
 
 
1892
        ironlake_enable_pch_hotplug(dev);
 
1893
 
 
1894
        if (IS_IRONLAKE_M(dev)) {
 
1895
                /* Clear & enable PCU event interrupts */
 
1896
                I915_WRITE(DEIIR, DE_PCU_EVENT);
 
1897
                I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
 
1898
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
 
1899
        }
 
1900
 
 
1901
        return 0;
 
1902
}
 
1903
 
 
1904
static int ivybridge_irq_postinstall(struct drm_device *dev)
 
1905
{
 
1906
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1907
        /* enable kind of interrupts always enabled */
 
1908
        u32 display_mask =
 
1909
                DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
 
1910
                DE_PLANEC_FLIP_DONE_IVB |
 
1911
                DE_PLANEB_FLIP_DONE_IVB |
 
1912
                DE_PLANEA_FLIP_DONE_IVB;
 
1913
        u32 render_irqs;
 
1914
        u32 hotplug_mask;
 
1915
 
 
1916
        dev_priv->irq_mask = ~display_mask;
 
1917
 
 
1918
        /* should always can generate irq */
 
1919
        I915_WRITE(DEIIR, I915_READ(DEIIR));
 
1920
        I915_WRITE(DEIMR, dev_priv->irq_mask);
 
1921
        I915_WRITE(DEIER,
 
1922
                   display_mask |
 
1923
                   DE_PIPEC_VBLANK_IVB |
 
1924
                   DE_PIPEB_VBLANK_IVB |
 
1925
                   DE_PIPEA_VBLANK_IVB);
 
1926
        POSTING_READ(DEIER);
 
1927
 
 
1928
        dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 
1929
 
 
1930
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
1931
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
1932
 
 
1933
        render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
 
1934
                GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 
1935
        I915_WRITE(GTIER, render_irqs);
 
1936
        POSTING_READ(GTIER);
 
1937
 
 
1938
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
 
1939
                        SDE_PORTB_HOTPLUG_CPT |
 
1940
                        SDE_PORTC_HOTPLUG_CPT |
 
1941
                        SDE_PORTD_HOTPLUG_CPT);
 
1942
        dev_priv->pch_irq_mask = ~hotplug_mask;
 
1943
 
 
1944
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
 
1945
        I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
 
1946
        I915_WRITE(SDEIER, hotplug_mask);
 
1947
        POSTING_READ(SDEIER);
 
1948
 
 
1949
        ironlake_enable_pch_hotplug(dev);
 
1950
 
 
1951
        return 0;
 
1952
}
 
1953
 
 
1954
static int valleyview_irq_postinstall(struct drm_device *dev)
 
1955
{
 
1956
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1957
        u32 enable_mask;
 
1958
        u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 
1959
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
 
1960
        u32 render_irqs;
 
1961
        u16 msid;
 
1962
 
 
1963
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
 
1964
        enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
1965
                I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
 
1966
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
1967
                I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
1968
 
 
1969
        /*
 
1970
         *Leave vblank interrupts masked initially.  enable/disable will
 
1971
         * toggle them based on usage.
 
1972
         */
 
1973
        dev_priv->irq_mask = (~enable_mask) |
 
1974
                I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
 
1975
                I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
1976
 
 
1977
        dev_priv->pipestat[0] = 0;
 
1978
        dev_priv->pipestat[1] = 0;
 
1979
 
 
1980
        /* Hack for broken MSIs on VLV */
 
1981
        pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
 
1982
        pci_read_config_word(dev->pdev, 0x98, &msid);
 
1983
        msid &= 0xff; /* mask out delivery bits */
 
1984
        msid |= (1<<14);
 
1985
        pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
 
1986
 
 
1987
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
 
1988
        I915_WRITE(VLV_IER, enable_mask);
 
1989
        I915_WRITE(VLV_IIR, 0xffffffff);
 
1990
        I915_WRITE(PIPESTAT(0), 0xffff);
 
1991
        I915_WRITE(PIPESTAT(1), 0xffff);
 
1992
        POSTING_READ(VLV_IER);
 
1993
 
 
1994
        i915_enable_pipestat(dev_priv, 0, pipestat_enable);
 
1995
        i915_enable_pipestat(dev_priv, 1, pipestat_enable);
 
1996
 
 
1997
        I915_WRITE(VLV_IIR, 0xffffffff);
 
1998
        I915_WRITE(VLV_IIR, 0xffffffff);
 
1999
 
 
2000
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
2001
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
2002
 
 
2003
        render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
 
2004
                GEN6_BLITTER_USER_INTERRUPT;
 
2005
        I915_WRITE(GTIER, render_irqs);
 
2006
        POSTING_READ(GTIER);
 
2007
 
 
2008
        /* ack & enable invalid PTE error interrupts */
 
2009
#if 0 /* FIXME: add support to irq handler for checking these bits */
 
2010
        I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
 
2011
        I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
 
2012
#endif
 
2013
 
 
2014
        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 
2015
        /* Note HDMI and DP share bits */
 
2016
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 
2017
                hotplug_en |= HDMIB_HOTPLUG_INT_EN;
 
2018
        if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
 
2019
                hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 
2020
        if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 
2021
                hotplug_en |= HDMID_HOTPLUG_INT_EN;
 
2022
        if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
 
2023
                hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 
2024
        if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
 
2025
                hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 
2026
        if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 
2027
                hotplug_en |= CRT_HOTPLUG_INT_EN;
 
2028
                hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
2029
        }
 
2030
 
 
2031
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
2032
 
 
2033
        return 0;
 
2034
}
 
2035
 
 
2036
static void valleyview_irq_uninstall(struct drm_device *dev)
 
2037
{
 
2038
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2039
        int pipe;
 
2040
 
 
2041
        if (!dev_priv)
 
2042
                return;
 
2043
 
 
2044
        for_each_pipe(pipe)
 
2045
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
2046
 
 
2047
        I915_WRITE(HWSTAM, 0xffffffff);
 
2048
        I915_WRITE(PORT_HOTPLUG_EN, 0);
 
2049
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
2050
        for_each_pipe(pipe)
 
2051
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
2052
        I915_WRITE(VLV_IIR, 0xffffffff);
 
2053
        I915_WRITE(VLV_IMR, 0xffffffff);
 
2054
        I915_WRITE(VLV_IER, 0x0);
 
2055
        POSTING_READ(VLV_IER);
 
2056
}
 
2057
 
 
2058
static void ironlake_irq_uninstall(struct drm_device *dev)
 
2059
{
 
2060
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2061
 
 
2062
        if (!dev_priv)
 
2063
                return;
 
2064
 
 
2065
        I915_WRITE(HWSTAM, 0xffffffff);
 
2066
 
 
2067
        I915_WRITE(DEIMR, 0xffffffff);
 
2068
        I915_WRITE(DEIER, 0x0);
 
2069
        I915_WRITE(DEIIR, I915_READ(DEIIR));
 
2070
 
 
2071
        I915_WRITE(GTIMR, 0xffffffff);
 
2072
        I915_WRITE(GTIER, 0x0);
 
2073
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 
2074
 
 
2075
        I915_WRITE(SDEIMR, 0xffffffff);
 
2076
        I915_WRITE(SDEIER, 0x0);
 
2077
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
 
2078
}
 
2079
 
 
2080
static void i8xx_irq_preinstall(struct drm_device * dev)
 
2081
{
 
2082
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2083
        int pipe;
 
2084
 
 
2085
        atomic_set(&dev_priv->irq_received, 0);
 
2086
 
 
2087
        for_each_pipe(pipe)
 
2088
                I915_WRITE(PIPESTAT(pipe), 0);
 
2089
        I915_WRITE16(IMR, 0xffff);
 
2090
        I915_WRITE16(IER, 0x0);
 
2091
        POSTING_READ16(IER);
 
2092
}
 
2093
 
 
2094
static int i8xx_irq_postinstall(struct drm_device *dev)
 
2095
{
 
2096
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2097
 
 
2098
        dev_priv->pipestat[0] = 0;
 
2099
        dev_priv->pipestat[1] = 0;
 
2100
 
 
2101
        I915_WRITE16(EMR,
 
2102
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
2103
 
 
2104
        /* Unmask the interrupts that we always want on. */
 
2105
        dev_priv->irq_mask =
 
2106
                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
2107
                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
2108
                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 
2109
                  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
 
2110
                  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
2111
        I915_WRITE16(IMR, dev_priv->irq_mask);
 
2112
 
 
2113
        I915_WRITE16(IER,
 
2114
                     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
2115
                     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
2116
                     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
 
2117
                     I915_USER_INTERRUPT);
 
2118
        POSTING_READ16(IER);
 
2119
 
 
2120
        return 0;
 
2121
}
 
2122
 
 
2123
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 
2124
{
 
2125
        struct drm_device *dev = (struct drm_device *) arg;
 
2126
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2127
        u16 iir, new_iir;
 
2128
        u32 pipe_stats[2];
 
2129
        unsigned long irqflags;
 
2130
        int irq_received;
 
2131
        int pipe;
 
2132
        u16 flip_mask =
 
2133
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 
2134
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
2135
 
 
2136
        atomic_inc(&dev_priv->irq_received);
 
2137
 
 
2138
        iir = I915_READ16(IIR);
 
2139
        if (iir == 0)
 
2140
                return IRQ_NONE;
 
2141
 
 
2142
        while (iir & ~flip_mask) {
 
2143
                /* Can't rely on pipestat interrupt bit in iir as it might
 
2144
                 * have been cleared after the pipestat interrupt was received.
 
2145
                 * It doesn't set the bit in iir again, but it still produces
 
2146
                 * interrupts (for non-MSI).
 
2147
                 */
 
2148
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
2149
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
2150
                        i915_handle_error(dev, false);
 
2151
 
 
2152
                for_each_pipe(pipe) {
 
2153
                        int reg = PIPESTAT(pipe);
 
2154
                        pipe_stats[pipe] = I915_READ(reg);
 
2155
 
 
2156
                        /*
 
2157
                         * Clear the PIPE*STAT regs before the IIR
 
2158
                         */
 
2159
                        if (pipe_stats[pipe] & 0x8000ffff) {
 
2160
                                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 
2161
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
 
2162
                                                         pipe_name(pipe));
 
2163
                                I915_WRITE(reg, pipe_stats[pipe]);
 
2164
                                irq_received = 1;
 
2165
                        }
 
2166
                }
 
2167
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
2168
 
 
2169
                I915_WRITE16(IIR, iir & ~flip_mask);
 
2170
                new_iir = I915_READ16(IIR); /* Flush posted writes */
 
2171
 
 
2172
                i915_update_dri1_breadcrumb(dev);
 
2173
 
 
2174
                if (iir & I915_USER_INTERRUPT)
 
2175
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
2176
 
 
2177
                if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
 
2178
                    drm_handle_vblank(dev, 0)) {
 
2179
                        if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
 
2180
                                intel_prepare_page_flip(dev, 0);
 
2181
                                intel_finish_page_flip(dev, 0);
 
2182
                                flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
 
2183
                        }
 
2184
                }
 
2185
 
 
2186
                if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
 
2187
                    drm_handle_vblank(dev, 1)) {
 
2188
                        if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
 
2189
                                intel_prepare_page_flip(dev, 1);
 
2190
                                intel_finish_page_flip(dev, 1);
 
2191
                                flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
2192
                        }
 
2193
                }
 
2194
 
 
2195
                iir = new_iir;
 
2196
        }
 
2197
 
 
2198
        return IRQ_HANDLED;
 
2199
}
 
2200
 
 
2201
static void i8xx_irq_uninstall(struct drm_device * dev)
 
2202
{
 
2203
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2204
        int pipe;
 
2205
 
 
2206
        for_each_pipe(pipe) {
 
2207
                /* Clear enable bits; then clear status bits */
 
2208
                I915_WRITE(PIPESTAT(pipe), 0);
 
2209
                I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
 
2210
        }
 
2211
        I915_WRITE16(IMR, 0xffff);
 
2212
        I915_WRITE16(IER, 0x0);
 
2213
        I915_WRITE16(IIR, I915_READ16(IIR));
 
2214
}
 
2215
 
 
2216
static void i915_irq_preinstall(struct drm_device * dev)
 
2217
{
 
2218
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2219
        int pipe;
 
2220
 
 
2221
        atomic_set(&dev_priv->irq_received, 0);
 
2222
 
 
2223
        if (I915_HAS_HOTPLUG(dev)) {
 
2224
                I915_WRITE(PORT_HOTPLUG_EN, 0);
 
2225
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
2226
        }
 
2227
 
 
2228
        I915_WRITE16(HWSTAM, 0xeffe);
 
2229
        for_each_pipe(pipe)
 
2230
                I915_WRITE(PIPESTAT(pipe), 0);
 
2231
        I915_WRITE(IMR, 0xffffffff);
 
2232
        I915_WRITE(IER, 0x0);
 
2233
        POSTING_READ(IER);
 
2234
}
 
2235
 
 
2236
static int i915_irq_postinstall(struct drm_device *dev)
 
2237
{
 
2238
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2239
        u32 enable_mask;
 
2240
 
 
2241
        dev_priv->pipestat[0] = 0;
 
2242
        dev_priv->pipestat[1] = 0;
 
2243
 
 
2244
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
2245
 
 
2246
        /* Unmask the interrupts that we always want on. */
 
2247
        dev_priv->irq_mask =
 
2248
                ~(I915_ASLE_INTERRUPT |
 
2249
                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
2250
                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
2251
                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 
2252
                  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
 
2253
                  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
2254
 
 
2255
        enable_mask =
 
2256
                I915_ASLE_INTERRUPT |
 
2257
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
2258
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
2259
                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
 
2260
                I915_USER_INTERRUPT;
 
2261
 
 
2262
        if (I915_HAS_HOTPLUG(dev)) {
 
2263
                /* Enable in IER... */
 
2264
                enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
 
2265
                /* and unmask in IMR */
 
2266
                dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
 
2267
        }
 
2268
 
 
2269
        I915_WRITE(IMR, dev_priv->irq_mask);
 
2270
        I915_WRITE(IER, enable_mask);
 
2271
        POSTING_READ(IER);
 
2272
 
 
2273
        if (I915_HAS_HOTPLUG(dev)) {
 
2274
                u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 
2275
 
 
2276
                if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 
2277
                        hotplug_en |= HDMIB_HOTPLUG_INT_EN;
 
2278
                if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
 
2279
                        hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 
2280
                if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 
2281
                        hotplug_en |= HDMID_HOTPLUG_INT_EN;
 
2282
                if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
 
2283
                        hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 
2284
                if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
 
2285
                        hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 
2286
                if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 
2287
                        hotplug_en |= CRT_HOTPLUG_INT_EN;
 
2288
                        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
2289
                }
 
2290
 
 
2291
                /* Ignore TV since it's buggy */
 
2292
 
 
2293
                I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
2294
        }
 
2295
 
 
2296
        intel_opregion_enable_asle(dev);
 
2297
 
 
2298
        return 0;
 
2299
}
 
2300
 
 
2301
static irqreturn_t i915_irq_handler(int irq, void *arg)
 
2302
{
 
2303
        struct drm_device *dev = (struct drm_device *) arg;
 
2304
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2305
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
 
2306
        unsigned long irqflags;
 
2307
        u32 flip_mask =
 
2308
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 
2309
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
2310
        u32 flip[2] = {
 
2311
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
 
2312
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
 
2313
        };
 
2314
        int pipe, ret = IRQ_NONE;
 
2315
 
 
2316
        atomic_inc(&dev_priv->irq_received);
 
2317
 
 
2318
        iir = I915_READ(IIR);
 
2319
        do {
 
2320
                bool irq_received = (iir & ~flip_mask) != 0;
 
2321
                bool blc_event = false;
 
2322
 
 
2323
                /* Can't rely on pipestat interrupt bit in iir as it might
 
2324
                 * have been cleared after the pipestat interrupt was received.
 
2325
                 * It doesn't set the bit in iir again, but it still produces
 
2326
                 * interrupts (for non-MSI).
 
2327
                 */
 
2328
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
2329
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
2330
                        i915_handle_error(dev, false);
 
2331
 
 
2332
                for_each_pipe(pipe) {
 
2333
                        int reg = PIPESTAT(pipe);
 
2334
                        pipe_stats[pipe] = I915_READ(reg);
 
2335
 
 
2336
                        /* Clear the PIPE*STAT regs before the IIR */
 
2337
                        if (pipe_stats[pipe] & 0x8000ffff) {
 
2338
                                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 
2339
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
 
2340
                                                         pipe_name(pipe));
 
2341
                                I915_WRITE(reg, pipe_stats[pipe]);
 
2342
                                irq_received = true;
 
2343
                        }
 
2344
                }
 
2345
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
2346
 
 
2347
                if (!irq_received)
 
2348
                        break;
 
2349
 
 
2350
                /* Consume port.  Then clear IIR or we'll miss events */
 
2351
                if ((I915_HAS_HOTPLUG(dev)) &&
 
2352
                    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
 
2353
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
2354
 
 
2355
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 
2356
                                  hotplug_status);
 
2357
                        if (hotplug_status & dev_priv->hotplug_supported_mask)
 
2358
                                queue_work(dev_priv->wq,
 
2359
                                           &dev_priv->hotplug_work);
 
2360
 
 
2361
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 
2362
                        POSTING_READ(PORT_HOTPLUG_STAT);
 
2363
                }
 
2364
 
 
2365
                I915_WRITE(IIR, iir & ~flip_mask);
 
2366
                new_iir = I915_READ(IIR); /* Flush posted writes */
 
2367
 
 
2368
                if (iir & I915_USER_INTERRUPT)
 
2369
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
2370
 
 
2371
                for_each_pipe(pipe) {
 
2372
                        int plane = pipe;
 
2373
                        if (IS_MOBILE(dev))
 
2374
                                plane = !plane;
 
2375
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
 
2376
                            drm_handle_vblank(dev, pipe)) {
 
2377
                                if (iir & flip[plane]) {
 
2378
                                        intel_prepare_page_flip(dev, plane);
 
2379
                                        intel_finish_page_flip(dev, pipe);
 
2380
                                        flip_mask &= ~flip[plane];
 
2381
                                }
 
2382
                        }
 
2383
 
 
2384
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 
2385
                                blc_event = true;
 
2386
                }
 
2387
 
 
2388
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
 
2389
                        intel_opregion_asle_intr(dev);
 
2390
 
 
2391
                /* With MSI, interrupts are only generated when iir
 
2392
                 * transitions from zero to nonzero.  If another bit got
 
2393
                 * set while we were handling the existing iir bits, then
 
2394
                 * we would never get another interrupt.
 
2395
                 *
 
2396
                 * This is fine on non-MSI as well, as if we hit this path
 
2397
                 * we avoid exiting the interrupt handler only to generate
 
2398
                 * another one.
 
2399
                 *
 
2400
                 * Note that for MSI this could cause a stray interrupt report
 
2401
                 * if an interrupt landed in the time between writing IIR and
 
2402
                 * the posting read.  This should be rare enough to never
 
2403
                 * trigger the 99% of 100,000 interrupts test for disabling
 
2404
                 * stray interrupts.
 
2405
                 */
 
2406
                ret = IRQ_HANDLED;
 
2407
                iir = new_iir;
 
2408
        } while (iir & ~flip_mask);
 
2409
 
 
2410
        i915_update_dri1_breadcrumb(dev);
 
2411
 
 
2412
        return ret;
 
2413
}
 
2414
 
 
2415
static void i915_irq_uninstall(struct drm_device * dev)
 
2416
{
 
2417
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2418
        int pipe;
 
2419
 
 
2420
        if (I915_HAS_HOTPLUG(dev)) {
 
2421
                I915_WRITE(PORT_HOTPLUG_EN, 0);
 
2422
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
2423
        }
 
2424
 
 
2425
        I915_WRITE16(HWSTAM, 0xffff);
 
2426
        for_each_pipe(pipe) {
 
2427
                /* Clear enable bits; then clear status bits */
 
2428
                I915_WRITE(PIPESTAT(pipe), 0);
 
2429
                I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
 
2430
        }
 
2431
        I915_WRITE(IMR, 0xffffffff);
 
2432
        I915_WRITE(IER, 0x0);
 
2433
 
 
2434
        I915_WRITE(IIR, I915_READ(IIR));
 
2435
}
 
2436
 
 
2437
static void i965_irq_preinstall(struct drm_device * dev)
 
2438
{
 
2439
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2440
        int pipe;
 
2441
 
 
2442
        atomic_set(&dev_priv->irq_received, 0);
 
2443
 
 
2444
        I915_WRITE(PORT_HOTPLUG_EN, 0);
 
2445
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
2446
 
 
2447
        I915_WRITE(HWSTAM, 0xeffe);
 
2448
        for_each_pipe(pipe)
 
2449
                I915_WRITE(PIPESTAT(pipe), 0);
 
2450
        I915_WRITE(IMR, 0xffffffff);
 
2451
        I915_WRITE(IER, 0x0);
 
2452
        POSTING_READ(IER);
 
2453
}
 
2454
 
 
2455
static int i965_irq_postinstall(struct drm_device *dev)
 
2456
{
 
2457
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2458
        u32 hotplug_en;
 
2459
        u32 enable_mask;
 
2460
        u32 error_mask;
 
2461
 
 
2462
        /* Unmask the interrupts that we always want on. */
 
2463
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
 
2464
                               I915_DISPLAY_PORT_INTERRUPT |
 
2465
                               I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
2466
                               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 
2467
                               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 
2468
                               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
 
2469
                               I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
2470
 
 
2471
        enable_mask = ~dev_priv->irq_mask;
 
2472
        enable_mask |= I915_USER_INTERRUPT;
 
2473
 
 
2474
        if (IS_G4X(dev))
 
2475
                enable_mask |= I915_BSD_USER_INTERRUPT;
 
2476
 
 
2477
        dev_priv->pipestat[0] = 0;
 
2478
        dev_priv->pipestat[1] = 0;
 
2479
 
 
2480
        /*
 
2481
         * Enable some error detection, note the instruction error mask
 
2482
         * bit is reserved, so we leave it masked.
 
2483
         */
 
2484
        if (IS_G4X(dev)) {
 
2485
                error_mask = ~(GM45_ERROR_PAGE_TABLE |
 
2486
                               GM45_ERROR_MEM_PRIV |
 
2487
                               GM45_ERROR_CP_PRIV |
 
2488
                               I915_ERROR_MEMORY_REFRESH);
 
2489
        } else {
 
2490
                error_mask = ~(I915_ERROR_PAGE_TABLE |
 
2491
                               I915_ERROR_MEMORY_REFRESH);
 
2492
        }
 
2493
        I915_WRITE(EMR, error_mask);
 
2494
 
 
2495
        I915_WRITE(IMR, dev_priv->irq_mask);
 
2496
        I915_WRITE(IER, enable_mask);
 
2497
        POSTING_READ(IER);
 
2498
 
 
2499
        /* Note HDMI and DP share hotplug bits */
 
2500
        hotplug_en = 0;
 
2501
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 
2502
                hotplug_en |= HDMIB_HOTPLUG_INT_EN;
 
2503
        if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
 
2504
                hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 
2505
        if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 
2506
                hotplug_en |= HDMID_HOTPLUG_INT_EN;
 
2507
        if (IS_G4X(dev)) {
 
2508
                if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
 
2509
                        hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 
2510
                if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
 
2511
                        hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 
2512
        } else {
 
2513
                if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
 
2514
                        hotplug_en |= SDVOC_HOTPLUG_INT_EN;
 
2515
                if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
 
2516
                        hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 
2517
        }
 
2518
        if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 
2519
                hotplug_en |= CRT_HOTPLUG_INT_EN;
 
2520
 
 
2521
                /* Programming the CRT detection parameters tends
 
2522
                   to generate a spurious hotplug event about three
 
2523
                   seconds later.  So just do it once.
 
2524
                   */
 
2525
                if (IS_G4X(dev))
 
2526
                        hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
 
2527
                hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
2528
        }
 
2529
 
 
2530
        /* Ignore TV since it's buggy */
 
2531
 
 
2532
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
2533
 
 
2534
        intel_opregion_enable_asle(dev);
 
2535
 
 
2536
        return 0;
 
2537
}
 
2538
 
 
2539
static irqreturn_t i965_irq_handler(int irq, void *arg)
 
2540
{
 
2541
        struct drm_device *dev = (struct drm_device *) arg;
 
2542
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2543
        u32 iir, new_iir;
 
2544
        u32 pipe_stats[I915_MAX_PIPES];
 
2545
        unsigned long irqflags;
 
2546
        int irq_received;
 
2547
        int ret = IRQ_NONE, pipe;
 
2548
 
 
2549
        atomic_inc(&dev_priv->irq_received);
 
2550
 
 
2551
        iir = I915_READ(IIR);
 
2552
 
 
2553
        for (;;) {
 
2554
                bool blc_event = false;
 
2555
 
 
2556
                irq_received = iir != 0;
 
2557
 
 
2558
                /* Can't rely on pipestat interrupt bit in iir as it might
 
2559
                 * have been cleared after the pipestat interrupt was received.
 
2560
                 * It doesn't set the bit in iir again, but it still produces
 
2561
                 * interrupts (for non-MSI).
 
2562
                 */
 
2563
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
2564
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
2565
                        i915_handle_error(dev, false);
 
2566
 
 
2567
                for_each_pipe(pipe) {
 
2568
                        int reg = PIPESTAT(pipe);
 
2569
                        pipe_stats[pipe] = I915_READ(reg);
 
2570
 
 
2571
                        /*
 
2572
                         * Clear the PIPE*STAT regs before the IIR
 
2573
                         */
 
2574
                        if (pipe_stats[pipe] & 0x8000ffff) {
 
2575
                                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 
2576
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
 
2577
                                                         pipe_name(pipe));
 
2578
                                I915_WRITE(reg, pipe_stats[pipe]);
 
2579
                                irq_received = 1;
 
2580
                        }
 
2581
                }
 
2582
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
2583
 
 
2584
                if (!irq_received)
 
2585
                        break;
 
2586
 
 
2587
                ret = IRQ_HANDLED;
 
2588
 
 
2589
                /* Consume port.  Then clear IIR or we'll miss events */
 
2590
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
 
2591
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
2592
 
 
2593
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 
2594
                                  hotplug_status);
 
2595
                        if (hotplug_status & dev_priv->hotplug_supported_mask)
 
2596
                                queue_work(dev_priv->wq,
 
2597
                                           &dev_priv->hotplug_work);
 
2598
 
 
2599
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 
2600
                        I915_READ(PORT_HOTPLUG_STAT);
 
2601
                }
 
2602
 
 
2603
                I915_WRITE(IIR, iir);
 
2604
                new_iir = I915_READ(IIR); /* Flush posted writes */
 
2605
 
 
2606
                if (iir & I915_USER_INTERRUPT)
 
2607
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
2608
                if (iir & I915_BSD_USER_INTERRUPT)
 
2609
                        notify_ring(dev, &dev_priv->ring[VCS]);
 
2610
 
 
2611
                if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
 
2612
                        intel_prepare_page_flip(dev, 0);
 
2613
 
 
2614
                if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
 
2615
                        intel_prepare_page_flip(dev, 1);
 
2616
 
 
2617
                for_each_pipe(pipe) {
 
2618
                        if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
 
2619
                            drm_handle_vblank(dev, pipe)) {
 
2620
                                i915_pageflip_stall_check(dev, pipe);
 
2621
                                intel_finish_page_flip(dev, pipe);
 
2622
                        }
 
2623
 
 
2624
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 
2625
                                blc_event = true;
 
2626
                }
 
2627
 
 
2628
 
 
2629
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
 
2630
                        intel_opregion_asle_intr(dev);
 
2631
 
 
2632
                /* With MSI, interrupts are only generated when iir
 
2633
                 * transitions from zero to nonzero.  If another bit got
 
2634
                 * set while we were handling the existing iir bits, then
 
2635
                 * we would never get another interrupt.
 
2636
                 *
 
2637
                 * This is fine on non-MSI as well, as if we hit this path
 
2638
                 * we avoid exiting the interrupt handler only to generate
 
2639
                 * another one.
 
2640
                 *
 
2641
                 * Note that for MSI this could cause a stray interrupt report
 
2642
                 * if an interrupt landed in the time between writing IIR and
 
2643
                 * the posting read.  This should be rare enough to never
 
2644
                 * trigger the 99% of 100,000 interrupts test for disabling
 
2645
                 * stray interrupts.
 
2646
                 */
 
2647
                iir = new_iir;
 
2648
        }
 
2649
 
 
2650
        i915_update_dri1_breadcrumb(dev);
 
2651
 
 
2652
        return ret;
 
2653
}
 
2654
 
 
2655
static void i965_irq_uninstall(struct drm_device * dev)
 
2656
{
 
2657
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
2658
        int pipe;
 
2659
 
 
2660
        if (!dev_priv)
 
2661
                return;
 
2662
 
 
2663
        I915_WRITE(PORT_HOTPLUG_EN, 0);
 
2664
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
2665
 
 
2666
        I915_WRITE(HWSTAM, 0xffffffff);
 
2667
        for_each_pipe(pipe)
 
2668
                I915_WRITE(PIPESTAT(pipe), 0);
 
2669
        I915_WRITE(IMR, 0xffffffff);
 
2670
        I915_WRITE(IER, 0x0);
 
2671
 
 
2672
        for_each_pipe(pipe)
 
2673
                I915_WRITE(PIPESTAT(pipe),
 
2674
                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
 
2675
        I915_WRITE(IIR, I915_READ(IIR));
 
2676
}
 
2677
 
 
2678
void intel_irq_init(struct drm_device *dev)
 
2679
{
 
2680
        struct drm_i915_private *dev_priv = dev->dev_private;
 
2681
 
 
2682
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 
2683
        INIT_WORK(&dev_priv->error_work, i915_error_work_func);
 
2684
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 
2685
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
2686
 
 
2687
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
 
2688
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
2689
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 
2690
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 
2691
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 
2692
        }
 
2693
 
 
2694
        if (drm_core_check_feature(dev, DRIVER_MODESET))
 
2695
                dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
 
2696
        else
 
2697
                dev->driver->get_vblank_timestamp = NULL;
 
2698
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
2699
 
 
2700
        if (IS_VALLEYVIEW(dev)) {
 
2701
                dev->driver->irq_handler = valleyview_irq_handler;
 
2702
                dev->driver->irq_preinstall = valleyview_irq_preinstall;
 
2703
                dev->driver->irq_postinstall = valleyview_irq_postinstall;
 
2704
                dev->driver->irq_uninstall = valleyview_irq_uninstall;
 
2705
                dev->driver->enable_vblank = valleyview_enable_vblank;
 
2706
                dev->driver->disable_vblank = valleyview_disable_vblank;
 
2707
        } else if (IS_IVYBRIDGE(dev)) {
 
2708
                /* Share pre & uninstall handlers with ILK/SNB */
 
2709
                dev->driver->irq_handler = ivybridge_irq_handler;
 
2710
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
 
2711
                dev->driver->irq_postinstall = ivybridge_irq_postinstall;
 
2712
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
 
2713
                dev->driver->enable_vblank = ivybridge_enable_vblank;
 
2714
                dev->driver->disable_vblank = ivybridge_disable_vblank;
 
2715
        } else if (IS_HASWELL(dev)) {
 
2716
                /* Share interrupts handling with IVB */
 
2717
                dev->driver->irq_handler = ivybridge_irq_handler;
 
2718
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
 
2719
                dev->driver->irq_postinstall = ivybridge_irq_postinstall;
 
2720
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
 
2721
                dev->driver->enable_vblank = ivybridge_enable_vblank;
 
2722
                dev->driver->disable_vblank = ivybridge_disable_vblank;
 
2723
        } else if (HAS_PCH_SPLIT(dev)) {
 
2724
                dev->driver->irq_handler = ironlake_irq_handler;
 
2725
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
 
2726
                dev->driver->irq_postinstall = ironlake_irq_postinstall;
 
2727
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
 
2728
                dev->driver->enable_vblank = ironlake_enable_vblank;
 
2729
                dev->driver->disable_vblank = ironlake_disable_vblank;
 
2730
        } else {
 
2731
                if (INTEL_INFO(dev)->gen == 2) {
 
2732
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
 
2733
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
 
2734
                        dev->driver->irq_handler = i8xx_irq_handler;
 
2735
                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
 
2736
                } else if (INTEL_INFO(dev)->gen == 3) {
 
2737
                        dev->driver->irq_preinstall = i915_irq_preinstall;
 
2738
                        dev->driver->irq_postinstall = i915_irq_postinstall;
 
2739
                        dev->driver->irq_uninstall = i915_irq_uninstall;
 
2740
                        dev->driver->irq_handler = i915_irq_handler;
 
2741
                } else {
 
2742
                        dev->driver->irq_preinstall = i965_irq_preinstall;
 
2743
                        dev->driver->irq_postinstall = i965_irq_postinstall;
 
2744
                        dev->driver->irq_uninstall = i965_irq_uninstall;
 
2745
                        dev->driver->irq_handler = i965_irq_handler;
 
2746
                }
 
2747
                dev->driver->enable_vblank = i915_enable_vblank;
 
2748
                dev->driver->disable_vblank = i915_disable_vblank;
 
2749
        }
 
2750
}