~ubuntu-branches/ubuntu/natty/linux-backports-modules-2.6.32/natty

« back to all changes in this revision

Viewing changes to updates/nouveau/ttm/ttm_lock.c

  • Committer: Bazaar Package Importer
  • Author(s): Andy Whitcroft, Andy Whitcroft, Tim Gardner
  • Date: 2010-03-08 08:59:41 UTC
  • Revision ID: james.westby@ubuntu.com-20100308085941-rrlpd5wtuh7m2an9
Tags: 2.6.32-16.6
[ Andy Whitcroft ]

* Lucid ABI 16
* nouveau -- make the nouveau package an optional build
* nouveau -- disable generation of nouveau
* nouveau -- drop the redundant nouveau source

[ Tim Gardner ]

* Added iwlwifi 6000 series firmware
* udev expects firmware in /lib/firmware/updates/`uname -r`

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/**************************************************************************
2
 
 *
3
 
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4
 
 * All Rights Reserved.
5
 
 *
6
 
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 
 * copy of this software and associated documentation files (the
8
 
 * "Software"), to deal in the Software without restriction, including
9
 
 * without limitation the rights to use, copy, modify, merge, publish,
10
 
 * distribute, sub license, and/or sell copies of the Software, and to
11
 
 * permit persons to whom the Software is furnished to do so, subject to
12
 
 * the following conditions:
13
 
 *
14
 
 * The above copyright notice and this permission notice (including the
15
 
 * next paragraph) shall be included in all copies or substantial portions
16
 
 * of the Software.
17
 
 *
18
 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 
 *
26
 
 **************************************************************************/
27
 
/*
28
 
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29
 
 */
30
 
 
31
 
#include "ttm/ttm_lock.h"
32
 
#include "ttm/ttm_module.h"
33
 
#include <asm/atomic.h>
34
 
#include <linux/errno.h>
35
 
#include <linux/wait.h>
36
 
#include <linux/sched.h>
37
 
#include <linux/module.h>
38
 
 
39
 
#define TTM_WRITE_LOCK_PENDING    (1 << 0)
40
 
#define TTM_VT_LOCK_PENDING       (1 << 1)
41
 
#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
42
 
#define TTM_VT_LOCK               (1 << 3)
43
 
#define TTM_SUSPEND_LOCK          (1 << 4)
44
 
 
45
 
void ttm_lock_init(struct ttm_lock *lock)
46
 
{
47
 
        spin_lock_init(&lock->lock);
48
 
        init_waitqueue_head(&lock->queue);
49
 
        lock->rw = 0;
50
 
        lock->flags = 0;
51
 
        lock->kill_takers = false;
52
 
        lock->signal = SIGKILL;
53
 
}
54
 
EXPORT_SYMBOL(ttm_lock_init);
55
 
 
56
 
void ttm_read_unlock(struct ttm_lock *lock)
57
 
{
58
 
        spin_lock(&lock->lock);
59
 
        if (--lock->rw == 0)
60
 
                wake_up_all(&lock->queue);
61
 
        spin_unlock(&lock->lock);
62
 
}
63
 
EXPORT_SYMBOL(ttm_read_unlock);
64
 
 
65
 
static bool __ttm_read_lock(struct ttm_lock *lock)
66
 
{
67
 
        bool locked = false;
68
 
 
69
 
        spin_lock(&lock->lock);
70
 
        if (unlikely(lock->kill_takers)) {
71
 
                send_sig(lock->signal, current, 0);
72
 
                spin_unlock(&lock->lock);
73
 
                return false;
74
 
        }
75
 
        if (lock->rw >= 0 && lock->flags == 0) {
76
 
                ++lock->rw;
77
 
                locked = true;
78
 
        }
79
 
        spin_unlock(&lock->lock);
80
 
        return locked;
81
 
}
82
 
 
83
 
int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84
 
{
85
 
        int ret = 0;
86
 
 
87
 
        if (interruptible)
88
 
                ret = wait_event_interruptible(lock->queue,
89
 
                                               __ttm_read_lock(lock));
90
 
        else
91
 
                wait_event(lock->queue, __ttm_read_lock(lock));
92
 
        return ret;
93
 
}
94
 
EXPORT_SYMBOL(ttm_read_lock);
95
 
 
96
 
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97
 
{
98
 
        bool block = true;
99
 
 
100
 
        *locked = false;
101
 
 
102
 
        spin_lock(&lock->lock);
103
 
        if (unlikely(lock->kill_takers)) {
104
 
                send_sig(lock->signal, current, 0);
105
 
                spin_unlock(&lock->lock);
106
 
                return false;
107
 
        }
108
 
        if (lock->rw >= 0 && lock->flags == 0) {
109
 
                ++lock->rw;
110
 
                block = false;
111
 
                *locked = true;
112
 
        } else if (lock->flags == 0) {
113
 
                block = false;
114
 
        }
115
 
        spin_unlock(&lock->lock);
116
 
 
117
 
        return !block;
118
 
}
119
 
 
120
 
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121
 
{
122
 
        int ret = 0;
123
 
        bool locked;
124
 
 
125
 
        if (interruptible)
126
 
                ret = wait_event_interruptible
127
 
                        (lock->queue, __ttm_read_trylock(lock, &locked));
128
 
        else
129
 
                wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130
 
 
131
 
        if (unlikely(ret != 0)) {
132
 
                BUG_ON(locked);
133
 
                return ret;
134
 
        }
135
 
 
136
 
        return (locked) ? 0 : -EBUSY;
137
 
}
138
 
 
139
 
void ttm_write_unlock(struct ttm_lock *lock)
140
 
{
141
 
        spin_lock(&lock->lock);
142
 
        lock->rw = 0;
143
 
        wake_up_all(&lock->queue);
144
 
        spin_unlock(&lock->lock);
145
 
}
146
 
EXPORT_SYMBOL(ttm_write_unlock);
147
 
 
148
 
static bool __ttm_write_lock(struct ttm_lock *lock)
149
 
{
150
 
        bool locked = false;
151
 
 
152
 
        spin_lock(&lock->lock);
153
 
        if (unlikely(lock->kill_takers)) {
154
 
                send_sig(lock->signal, current, 0);
155
 
                spin_unlock(&lock->lock);
156
 
                return false;
157
 
        }
158
 
        if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159
 
                lock->rw = -1;
160
 
                lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161
 
                locked = true;
162
 
        } else {
163
 
                lock->flags |= TTM_WRITE_LOCK_PENDING;
164
 
        }
165
 
        spin_unlock(&lock->lock);
166
 
        return locked;
167
 
}
168
 
 
169
 
int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170
 
{
171
 
        int ret = 0;
172
 
 
173
 
        if (interruptible) {
174
 
                ret = wait_event_interruptible(lock->queue,
175
 
                                               __ttm_write_lock(lock));
176
 
                if (unlikely(ret != 0)) {
177
 
                        spin_lock(&lock->lock);
178
 
                        lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179
 
                        wake_up_all(&lock->queue);
180
 
                        spin_unlock(&lock->lock);
181
 
                }
182
 
        } else
183
 
                wait_event(lock->queue, __ttm_read_lock(lock));
184
 
 
185
 
        return ret;
186
 
}
187
 
EXPORT_SYMBOL(ttm_write_lock);
188
 
 
189
 
void ttm_write_lock_downgrade(struct ttm_lock *lock)
190
 
{
191
 
        spin_lock(&lock->lock);
192
 
        lock->rw = 1;
193
 
        wake_up_all(&lock->queue);
194
 
        spin_unlock(&lock->lock);
195
 
}
196
 
 
197
 
static int __ttm_vt_unlock(struct ttm_lock *lock)
198
 
{
199
 
        int ret = 0;
200
 
 
201
 
        spin_lock(&lock->lock);
202
 
        if (unlikely(!(lock->flags & TTM_VT_LOCK)))
203
 
                ret = -EINVAL;
204
 
        lock->flags &= ~TTM_VT_LOCK;
205
 
        wake_up_all(&lock->queue);
206
 
        spin_unlock(&lock->lock);
207
 
        printk(KERN_INFO TTM_PFX "vt unlock.\n");
208
 
 
209
 
        return ret;
210
 
}
211
 
 
212
 
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
213
 
{
214
 
        struct ttm_base_object *base = *p_base;
215
 
        struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
216
 
        int ret;
217
 
 
218
 
        *p_base = NULL;
219
 
        ret = __ttm_vt_unlock(lock);
220
 
        BUG_ON(ret != 0);
221
 
}
222
 
 
223
 
static bool __ttm_vt_lock(struct ttm_lock *lock)
224
 
{
225
 
        bool locked = false;
226
 
 
227
 
        spin_lock(&lock->lock);
228
 
        if (lock->rw == 0) {
229
 
                lock->flags &= ~TTM_VT_LOCK_PENDING;
230
 
                lock->flags |= TTM_VT_LOCK;
231
 
                locked = true;
232
 
        } else {
233
 
                lock->flags |= TTM_VT_LOCK_PENDING;
234
 
        }
235
 
        spin_unlock(&lock->lock);
236
 
        return locked;
237
 
}
238
 
 
239
 
int ttm_vt_lock(struct ttm_lock *lock,
240
 
                bool interruptible,
241
 
                struct ttm_object_file *tfile)
242
 
{
243
 
        int ret = 0;
244
 
 
245
 
        if (interruptible) {
246
 
                ret = wait_event_interruptible(lock->queue,
247
 
                                               __ttm_vt_lock(lock));
248
 
                if (unlikely(ret != 0)) {
249
 
                        spin_lock(&lock->lock);
250
 
                        lock->flags &= ~TTM_VT_LOCK_PENDING;
251
 
                        wake_up_all(&lock->queue);
252
 
                        spin_unlock(&lock->lock);
253
 
                        return ret;
254
 
                }
255
 
        } else
256
 
                wait_event(lock->queue, __ttm_vt_lock(lock));
257
 
 
258
 
        /*
259
 
         * Add a base-object, the destructor of which will
260
 
         * make sure the lock is released if the client dies
261
 
         * while holding it.
262
 
         */
263
 
 
264
 
        ret = ttm_base_object_init(tfile, &lock->base, false,
265
 
                                   ttm_lock_type, &ttm_vt_lock_remove, NULL);
266
 
        if (ret)
267
 
                (void)__ttm_vt_unlock(lock);
268
 
        else {
269
 
                lock->vt_holder = tfile;
270
 
                printk(KERN_INFO TTM_PFX "vt lock.\n");
271
 
        }
272
 
 
273
 
        return ret;
274
 
}
275
 
EXPORT_SYMBOL(ttm_vt_lock);
276
 
 
277
 
int ttm_vt_unlock(struct ttm_lock *lock)
278
 
{
279
 
        return ttm_ref_object_base_unref(lock->vt_holder,
280
 
                                         lock->base.hash.key, TTM_REF_USAGE);
281
 
}
282
 
EXPORT_SYMBOL(ttm_vt_unlock);
283
 
 
284
 
void ttm_suspend_unlock(struct ttm_lock *lock)
285
 
{
286
 
        spin_lock(&lock->lock);
287
 
        lock->flags &= ~TTM_SUSPEND_LOCK;
288
 
        wake_up_all(&lock->queue);
289
 
        spin_unlock(&lock->lock);
290
 
}
291
 
EXPORT_SYMBOL(ttm_suspend_unlock);
292
 
 
293
 
static bool __ttm_suspend_lock(struct ttm_lock *lock)
294
 
{
295
 
        bool locked = false;
296
 
 
297
 
        spin_lock(&lock->lock);
298
 
        if (lock->rw == 0) {
299
 
                lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
300
 
                lock->flags |= TTM_SUSPEND_LOCK;
301
 
                locked = true;
302
 
        } else {
303
 
                lock->flags |= TTM_SUSPEND_LOCK_PENDING;
304
 
        }
305
 
        spin_unlock(&lock->lock);
306
 
        return locked;
307
 
}
308
 
 
309
 
void ttm_suspend_lock(struct ttm_lock *lock)
310
 
{
311
 
        wait_event(lock->queue, __ttm_suspend_lock(lock));
312
 
}
313
 
EXPORT_SYMBOL(ttm_suspend_lock);