1
/**************************************************************************
2
* Copyright (c) 2007, Intel Corporation.
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms and conditions of the GNU General Public License,
7
* version 2, as published by the Free Software Foundation.
9
* This program is distributed in the hope it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
* You should have received a copy of the GNU General Public License along with
15
* this program; if not, write to the Free Software Foundation, Inc.,
16
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
* Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19
* develop this driver.
21
**************************************************************************/
24
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30
#include "psb_scene.h"
31
#include "psb_msvdx.h"
33
#define PSB_2D_TIMEOUT_MSEC 100
35
void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
39
val = _PSB_CS_RESET_BIF_RESET |
40
_PSB_CS_RESET_DPM_RESET |
41
_PSB_CS_RESET_TA_RESET |
42
_PSB_CS_RESET_USE_RESET |
43
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
46
val |= _PSB_CS_RESET_TWOD_RESET;
48
PSB_WSGX32(val, PSB_CR_SOFT_RESET);
49
(void)PSB_RSGX32(PSB_CR_SOFT_RESET);
53
PSB_WSGX32(0, PSB_CR_SOFT_RESET);
55
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
58
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
61
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
63
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
66
void psb_print_pagefault(struct drm_psb_private *dev_priv)
71
val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
72
addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
75
if (val & _PSB_CBI_STAT_PF_N_RW)
76
DRM_ERROR("Poulsbo MMU page fault:\n");
78
DRM_ERROR("Poulsbo MMU read / write "
79
"protection fault:\n");
81
if (val & _PSB_CBI_STAT_FAULT_CACHE)
82
DRM_ERROR("\tCache requestor.\n");
83
if (val & _PSB_CBI_STAT_FAULT_TA)
84
DRM_ERROR("\tTA requestor.\n");
85
if (val & _PSB_CBI_STAT_FAULT_VDM)
86
DRM_ERROR("\tVDM requestor.\n");
87
if (val & _PSB_CBI_STAT_FAULT_2D)
88
DRM_ERROR("\t2D requestor.\n");
89
if (val & _PSB_CBI_STAT_FAULT_PBE)
90
DRM_ERROR("\tPBE requestor.\n");
91
if (val & _PSB_CBI_STAT_FAULT_TSP)
92
DRM_ERROR("\tTSP requestor.\n");
93
if (val & _PSB_CBI_STAT_FAULT_ISP)
94
DRM_ERROR("\tISP requestor.\n");
95
if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
96
DRM_ERROR("\tUSSEPDS requestor.\n");
97
if (val & _PSB_CBI_STAT_FAULT_HOST)
98
DRM_ERROR("\tHost requestor.\n");
100
DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
104
void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
106
struct timer_list *wt = &dev_priv->watchdog_timer;
107
unsigned long irq_flags;
109
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
110
if (dev_priv->timer_available && !timer_pending(wt)) {
111
wt->expires = jiffies + PSB_WATCHDOG_DELAY;
114
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
118
static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
119
unsigned int engine, int *lockup, int *idle)
121
uint32_t received_seq;
123
received_seq = dev_priv->comm[engine << 4];
124
spin_lock(&dev_priv->sequence_lock);
125
*idle = (received_seq == dev_priv->sequence[engine]);
126
spin_unlock(&dev_priv->sequence_lock);
129
dev_priv->idle[engine] = 1;
134
if (dev_priv->idle[engine]) {
135
dev_priv->idle[engine] = 0;
136
dev_priv->last_sequence[engine] = received_seq;
141
*lockup = (dev_priv->last_sequence[engine] == received_seq);
145
static void psb_watchdog_func(unsigned long data)
147
struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
154
unsigned long irq_flags;
156
psb_scheduler_lockup(dev_priv, &lockup, &idle);
157
psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
159
psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
164
if (lockup || msvdx_lockup || lockup_2d) {
165
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
166
dev_priv->timer_available = 0;
167
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
169
psb_print_pagefault(dev_priv);
170
schedule_work(&dev_priv->watchdog_wq);
173
schedule_work(&dev_priv->msvdx_watchdog_wq);
175
if (!idle || !msvdx_idle || !idle_2d)
176
psb_schedule_watchdog(dev_priv);
179
void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
181
struct drm_psb_private *dev_priv = dev->dev_private;
182
struct psb_msvdx_cmd_queue *msvdx_cmd;
183
struct list_head *list, *next;
184
/*Flush the msvdx cmd queue and signal all fences in the queue */
185
list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
186
msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
187
PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
188
msvdx_cmd->sequence);
189
dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
190
psb_fence_error(dev, PSB_ENGINE_VIDEO,
191
dev_priv->msvdx_current_sequence,
192
DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
194
kfree(msvdx_cmd->cmd);
195
drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
200
static void psb_msvdx_reset_wq(struct work_struct *work)
202
struct drm_psb_private *dev_priv =
203
container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
205
struct psb_scheduler *scheduler = &dev_priv->scheduler;
206
unsigned long irq_flags;
208
mutex_lock(&dev_priv->msvdx_mutex);
209
dev_priv->msvdx_needs_reset = 1;
210
dev_priv->msvdx_current_sequence++;
212
("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
213
dev_priv->msvdx_current_sequence);
215
psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
216
dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
219
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
220
dev_priv->timer_available = 1;
221
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
223
spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
224
psb_msvdx_flush_cmd_queue(scheduler->dev);
225
spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
227
psb_schedule_watchdog(dev_priv);
228
mutex_unlock(&dev_priv->msvdx_mutex);
231
static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
233
struct psb_xhw_buf buf;
236
INIT_LIST_HEAD(&buf.head);
237
psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
238
bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
239
PSB_WSGX32(bif_ctrl |
240
_PSB_CB_CTRL_CLEAR_FAULT |
241
_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
242
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
244
PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
245
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
246
return psb_xhw_reset_dpm(dev_priv, &buf);
250
* Block command submission and reset hardware and schedulers.
253
static void psb_reset_wq(struct work_struct *work)
255
struct drm_psb_private *dev_priv =
256
container_of(work, struct drm_psb_private, watchdog_wq);
259
unsigned long irq_flags;
262
struct psb_xhw_buf buf;
266
* Block command submission.
269
mutex_lock(&dev_priv->reset_mutex);
271
INIT_LIST_HEAD(&buf.head);
272
if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
273
if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
275
* no lockup, just re-schedule
277
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
278
dev_priv->timer_available = 1;
279
spin_unlock_irqrestore(&dev_priv->watchdog_lock,
281
psb_schedule_watchdog(dev_priv);
282
mutex_unlock(&dev_priv->reset_mutex);
287
msleep(PSB_2D_TIMEOUT_MSEC);
289
psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
293
spin_lock(&dev_priv->sequence_lock);
294
seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
295
spin_unlock(&dev_priv->sequence_lock);
296
psb_fence_error(dev_priv->scheduler.dev,
298
seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
299
DRM_INFO("Resetting 2D engine.\n");
302
psb_reset(dev_priv, lockup_2d);
306
psb_reset(dev_priv, 0);
308
(void)psb_xhw_mmu_reset(dev_priv);
309
DRM_INFO("Resetting scheduler.\n");
310
psb_scheduler_pause(dev_priv);
311
psb_scheduler_reset(dev_priv, -EBUSY);
312
psb_scheduler_ta_mem_check(dev_priv);
314
while (dev_priv->ta_mem &&
315
!dev_priv->force_ta_mem_load && ++reset_count < 10) {
318
* TA memory is currently fenced so offsets
319
* are valid. Reload offsets into the dpm now.
322
struct psb_xhw_buf buf;
323
INIT_LIST_HEAD(&buf.head);
326
DRM_INFO("Trying to reload TA memory.\n");
327
ret = psb_xhw_ta_mem_load(dev_priv, &buf,
329
PSB_TA_MEM_FLAG_RASTER |
330
PSB_TA_MEM_FLAG_HOSTA |
331
PSB_TA_MEM_FLAG_HOSTD |
332
PSB_TA_MEM_FLAG_INIT,
333
dev_priv->ta_mem->ta_memory->offset,
334
dev_priv->ta_mem->hw_data->offset,
335
dev_priv->ta_mem->hw_cookie);
339
psb_reset(dev_priv, 0);
340
(void)psb_xhw_mmu_reset(dev_priv);
343
psb_scheduler_restart(dev_priv);
344
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
345
dev_priv->timer_available = 1;
346
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
347
mutex_unlock(&dev_priv->reset_mutex);
350
void psb_watchdog_init(struct drm_psb_private *dev_priv)
352
struct timer_list *wt = &dev_priv->watchdog_timer;
353
unsigned long irq_flags;
355
dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
356
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
358
INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
359
INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
360
wt->data = (unsigned long)dev_priv;
361
wt->function = &psb_watchdog_func;
362
dev_priv->timer_available = 1;
363
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
366
void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
368
unsigned long irq_flags;
370
spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
371
dev_priv->timer_available = 0;
372
spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
373
(void)del_timer_sync(&dev_priv->watchdog_timer);