~ubuntu-branches/ubuntu/precise/linux-lowlatency/precise

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/vmwgfx/vmwgfx_irq.c

  • Committer: Package Import Robot
  • Author(s): Alessio Igor Bogani
  • Date: 2011-10-26 11:13:05 UTC
  • Revision ID: package-import@ubuntu.com-20111026111305-tz023xykf0i6eosh
Tags: upstream-3.2.0
ImportĀ upstreamĀ versionĀ 3.2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/**************************************************************************
 
2
 *
 
3
 * Copyright Ā© 2009 VMware, Inc., Palo Alto, CA., USA
 
4
 * All Rights Reserved.
 
5
 *
 
6
 * Permission is hereby granted, free of charge, to any person obtaining a
 
7
 * copy of this software and associated documentation files (the
 
8
 * "Software"), to deal in the Software without restriction, including
 
9
 * without limitation the rights to use, copy, modify, merge, publish,
 
10
 * distribute, sub license, and/or sell copies of the Software, and to
 
11
 * permit persons to whom the Software is furnished to do so, subject to
 
12
 * the following conditions:
 
13
 *
 
14
 * The above copyright notice and this permission notice (including the
 
15
 * next paragraph) shall be included in all copies or substantial portions
 
16
 * of the Software.
 
17
 *
 
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 
25
 *
 
26
 **************************************************************************/
 
27
 
 
28
#include "drmP.h"
 
29
#include "vmwgfx_drv.h"
 
30
 
 
31
#define VMW_FENCE_WRAP (1 << 24)
 
32
 
 
33
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
 
34
{
 
35
        struct drm_device *dev = (struct drm_device *)arg;
 
36
        struct vmw_private *dev_priv = vmw_priv(dev);
 
37
        uint32_t status, masked_status;
 
38
 
 
39
        spin_lock(&dev_priv->irq_lock);
 
40
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
41
        masked_status = status & dev_priv->irq_mask;
 
42
        spin_unlock(&dev_priv->irq_lock);
 
43
 
 
44
        if (likely(status))
 
45
                outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
46
 
 
47
        if (!masked_status)
 
48
                return IRQ_NONE;
 
49
 
 
50
        if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
 
51
                             SVGA_IRQFLAG_FENCE_GOAL)) {
 
52
                vmw_fences_update(dev_priv->fman);
 
53
                wake_up_all(&dev_priv->fence_queue);
 
54
        }
 
55
 
 
56
        if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 
57
                wake_up_all(&dev_priv->fifo_queue);
 
58
 
 
59
 
 
60
        return IRQ_HANDLED;
 
61
}
 
62
 
 
63
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 
64
{
 
65
        uint32_t busy;
 
66
 
 
67
        mutex_lock(&dev_priv->hw_mutex);
 
68
        busy = vmw_read(dev_priv, SVGA_REG_BUSY);
 
69
        mutex_unlock(&dev_priv->hw_mutex);
 
70
 
 
71
        return (busy == 0);
 
72
}
 
73
 
 
74
void vmw_update_seqno(struct vmw_private *dev_priv,
 
75
                         struct vmw_fifo_state *fifo_state)
 
76
{
 
77
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
78
        uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
79
 
 
80
        if (dev_priv->last_read_seqno != seqno) {
 
81
                dev_priv->last_read_seqno = seqno;
 
82
                vmw_marker_pull(&fifo_state->marker_queue, seqno);
 
83
                vmw_fences_update(dev_priv->fman);
 
84
        }
 
85
}
 
86
 
 
87
bool vmw_seqno_passed(struct vmw_private *dev_priv,
 
88
                         uint32_t seqno)
 
89
{
 
90
        struct vmw_fifo_state *fifo_state;
 
91
        bool ret;
 
92
 
 
93
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 
94
                return true;
 
95
 
 
96
        fifo_state = &dev_priv->fifo;
 
97
        vmw_update_seqno(dev_priv, fifo_state);
 
98
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 
99
                return true;
 
100
 
 
101
        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
 
102
            vmw_fifo_idle(dev_priv, seqno))
 
103
                return true;
 
104
 
 
105
        /**
 
106
         * Then check if the seqno is higher than what we've actually
 
107
         * emitted. Then the fence is stale and signaled.
 
108
         */
 
109
 
 
110
        ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
 
111
               > VMW_FENCE_WRAP);
 
112
 
 
113
        return ret;
 
114
}
 
115
 
 
116
int vmw_fallback_wait(struct vmw_private *dev_priv,
 
117
                      bool lazy,
 
118
                      bool fifo_idle,
 
119
                      uint32_t seqno,
 
120
                      bool interruptible,
 
121
                      unsigned long timeout)
 
122
{
 
123
        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 
124
 
 
125
        uint32_t count = 0;
 
126
        uint32_t signal_seq;
 
127
        int ret;
 
128
        unsigned long end_jiffies = jiffies + timeout;
 
129
        bool (*wait_condition)(struct vmw_private *, uint32_t);
 
130
        DEFINE_WAIT(__wait);
 
131
 
 
132
        wait_condition = (fifo_idle) ? &vmw_fifo_idle :
 
133
                &vmw_seqno_passed;
 
134
 
 
135
        /**
 
136
         * Block command submission while waiting for idle.
 
137
         */
 
138
 
 
139
        if (fifo_idle)
 
140
                down_read(&fifo_state->rwsem);
 
141
        signal_seq = atomic_read(&dev_priv->marker_seq);
 
142
        ret = 0;
 
143
 
 
144
        for (;;) {
 
145
                prepare_to_wait(&dev_priv->fence_queue, &__wait,
 
146
                                (interruptible) ?
 
147
                                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
148
                if (wait_condition(dev_priv, seqno))
 
149
                        break;
 
150
                if (time_after_eq(jiffies, end_jiffies)) {
 
151
                        DRM_ERROR("SVGA device lockup.\n");
 
152
                        break;
 
153
                }
 
154
                if (lazy)
 
155
                        schedule_timeout(1);
 
156
                else if ((++count & 0x0F) == 0) {
 
157
                        /**
 
158
                         * FIXME: Use schedule_hr_timeout here for
 
159
                         * newer kernels and lower CPU utilization.
 
160
                         */
 
161
 
 
162
                        __set_current_state(TASK_RUNNING);
 
163
                        schedule();
 
164
                        __set_current_state((interruptible) ?
 
165
                                            TASK_INTERRUPTIBLE :
 
166
                                            TASK_UNINTERRUPTIBLE);
 
167
                }
 
168
                if (interruptible && signal_pending(current)) {
 
169
                        ret = -ERESTARTSYS;
 
170
                        break;
 
171
                }
 
172
        }
 
173
        finish_wait(&dev_priv->fence_queue, &__wait);
 
174
        if (ret == 0 && fifo_idle) {
 
175
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
176
                iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 
177
        }
 
178
        wake_up_all(&dev_priv->fence_queue);
 
179
        if (fifo_idle)
 
180
                up_read(&fifo_state->rwsem);
 
181
 
 
182
        return ret;
 
183
}
 
184
 
 
185
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 
186
{
 
187
        mutex_lock(&dev_priv->hw_mutex);
 
188
        if (dev_priv->fence_queue_waiters++ == 0) {
 
189
                unsigned long irq_flags;
 
190
 
 
191
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 
192
                outl(SVGA_IRQFLAG_ANY_FENCE,
 
193
                     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
194
                dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
 
195
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 
196
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 
197
        }
 
198
        mutex_unlock(&dev_priv->hw_mutex);
 
199
}
 
200
 
 
201
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 
202
{
 
203
        mutex_lock(&dev_priv->hw_mutex);
 
204
        if (--dev_priv->fence_queue_waiters == 0) {
 
205
                unsigned long irq_flags;
 
206
 
 
207
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 
208
                dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
 
209
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 
210
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 
211
        }
 
212
        mutex_unlock(&dev_priv->hw_mutex);
 
213
}
 
214
 
 
215
 
 
216
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 
217
{
 
218
        mutex_lock(&dev_priv->hw_mutex);
 
219
        if (dev_priv->goal_queue_waiters++ == 0) {
 
220
                unsigned long irq_flags;
 
221
 
 
222
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 
223
                outl(SVGA_IRQFLAG_FENCE_GOAL,
 
224
                     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
225
                dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
 
226
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 
227
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 
228
        }
 
229
        mutex_unlock(&dev_priv->hw_mutex);
 
230
}
 
231
 
 
232
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 
233
{
 
234
        mutex_lock(&dev_priv->hw_mutex);
 
235
        if (--dev_priv->goal_queue_waiters == 0) {
 
236
                unsigned long irq_flags;
 
237
 
 
238
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
 
239
                dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
 
240
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 
241
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
 
242
        }
 
243
        mutex_unlock(&dev_priv->hw_mutex);
 
244
}
 
245
 
 
246
int vmw_wait_seqno(struct vmw_private *dev_priv,
 
247
                      bool lazy, uint32_t seqno,
 
248
                      bool interruptible, unsigned long timeout)
 
249
{
 
250
        long ret;
 
251
        struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
252
 
 
253
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 
254
                return 0;
 
255
 
 
256
        if (likely(vmw_seqno_passed(dev_priv, seqno)))
 
257
                return 0;
 
258
 
 
259
        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
260
 
 
261
        if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
 
262
                return vmw_fallback_wait(dev_priv, lazy, true, seqno,
 
263
                                         interruptible, timeout);
 
264
 
 
265
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 
266
                return vmw_fallback_wait(dev_priv, lazy, false, seqno,
 
267
                                         interruptible, timeout);
 
268
 
 
269
        vmw_seqno_waiter_add(dev_priv);
 
270
 
 
271
        if (interruptible)
 
272
                ret = wait_event_interruptible_timeout
 
273
                    (dev_priv->fence_queue,
 
274
                     vmw_seqno_passed(dev_priv, seqno),
 
275
                     timeout);
 
276
        else
 
277
                ret = wait_event_timeout
 
278
                    (dev_priv->fence_queue,
 
279
                     vmw_seqno_passed(dev_priv, seqno),
 
280
                     timeout);
 
281
 
 
282
        vmw_seqno_waiter_remove(dev_priv);
 
283
 
 
284
        if (unlikely(ret == 0))
 
285
                ret = -EBUSY;
 
286
        else if (likely(ret > 0))
 
287
                ret = 0;
 
288
 
 
289
        return ret;
 
290
}
 
291
 
 
292
void vmw_irq_preinstall(struct drm_device *dev)
 
293
{
 
294
        struct vmw_private *dev_priv = vmw_priv(dev);
 
295
        uint32_t status;
 
296
 
 
297
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 
298
                return;
 
299
 
 
300
        spin_lock_init(&dev_priv->irq_lock);
 
301
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
302
        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
303
}
 
304
 
 
305
int vmw_irq_postinstall(struct drm_device *dev)
 
306
{
 
307
        return 0;
 
308
}
 
309
 
 
310
void vmw_irq_uninstall(struct drm_device *dev)
 
311
{
 
312
        struct vmw_private *dev_priv = vmw_priv(dev);
 
313
        uint32_t status;
 
314
 
 
315
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 
316
                return;
 
317
 
 
318
        mutex_lock(&dev_priv->hw_mutex);
 
319
        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 
320
        mutex_unlock(&dev_priv->hw_mutex);
 
321
 
 
322
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
323
        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
324
}