~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to arch/x86/include/asm/rwsem.h

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
37
37
#endif
38
38
 
39
39
#ifdef __KERNEL__
40
 
 
41
 
#include <linux/list.h>
42
 
#include <linux/spinlock.h>
43
 
#include <linux/lockdep.h>
44
40
#include <asm/asm.h>
45
41
 
46
 
struct rwsem_waiter;
47
 
 
48
 
extern asmregparm struct rw_semaphore *
49
 
 rwsem_down_read_failed(struct rw_semaphore *sem);
50
 
extern asmregparm struct rw_semaphore *
51
 
 rwsem_down_write_failed(struct rw_semaphore *sem);
52
 
extern asmregparm struct rw_semaphore *
53
 
 rwsem_wake(struct rw_semaphore *);
54
 
extern asmregparm struct rw_semaphore *
55
 
 rwsem_downgrade_wake(struct rw_semaphore *sem);
56
 
 
57
42
/*
58
 
 * the semaphore definition
59
 
 *
60
43
 * The bias values and the counter type limits the number of
61
44
 * potential readers/writers to 32767 for 32 bits and 2147483647
62
45
 * for 64 bits.
74
57
#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
75
58
#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
76
59
 
77
 
typedef signed long rwsem_count_t;
78
 
 
79
 
struct rw_semaphore {
80
 
        rwsem_count_t           count;
81
 
        spinlock_t              wait_lock;
82
 
        struct list_head        wait_list;
83
 
#ifdef CONFIG_DEBUG_LOCK_ALLOC
84
 
        struct lockdep_map dep_map;
85
 
#endif
86
 
};
87
 
 
88
 
#ifdef CONFIG_DEBUG_LOCK_ALLOC
89
 
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
90
 
#else
91
 
# define __RWSEM_DEP_MAP_INIT(lockname)
92
 
#endif
93
 
 
94
 
 
95
 
#define __RWSEM_INITIALIZER(name)                               \
96
 
{                                                               \
97
 
        RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
98
 
        LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
99
 
}
100
 
 
101
 
#define DECLARE_RWSEM(name)                                     \
102
 
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
103
 
 
104
 
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
105
 
                         struct lock_class_key *key);
106
 
 
107
 
#define init_rwsem(sem)                                         \
108
 
do {                                                            \
109
 
        static struct lock_class_key __key;                     \
110
 
                                                                \
111
 
        __init_rwsem((sem), #sem, &__key);                      \
112
 
} while (0)
113
 
 
114
60
/*
115
61
 * lock for reading
116
62
 */
133
79
 */
134
80
static inline int __down_read_trylock(struct rw_semaphore *sem)
135
81
{
136
 
        rwsem_count_t result, tmp;
 
82
        long result, tmp;
137
83
        asm volatile("# beginning __down_read_trylock\n\t"
138
84
                     "  mov          %0,%1\n\t"
139
85
                     "1:\n\t"
155
101
 */
156
102
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
157
103
{
158
 
        rwsem_count_t tmp;
 
104
        long tmp;
159
105
        asm volatile("# beginning down_write\n\t"
160
106
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
161
107
                     /* adds 0xffff0001, returns the old value */
180
126
 */
181
127
static inline int __down_write_trylock(struct rw_semaphore *sem)
182
128
{
183
 
        rwsem_count_t ret = cmpxchg(&sem->count,
184
 
                                    RWSEM_UNLOCKED_VALUE,
185
 
                                    RWSEM_ACTIVE_WRITE_BIAS);
 
129
        long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
 
130
                           RWSEM_ACTIVE_WRITE_BIAS);
186
131
        if (ret == RWSEM_UNLOCKED_VALUE)
187
132
                return 1;
188
133
        return 0;
193
138
 */
194
139
static inline void __up_read(struct rw_semaphore *sem)
195
140
{
196
 
        rwsem_count_t tmp;
 
141
        long tmp;
197
142
        asm volatile("# beginning __up_read\n\t"
198
143
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
199
144
                     /* subtracts 1, returns the old value */
211
156
 */
212
157
static inline void __up_write(struct rw_semaphore *sem)
213
158
{
214
 
        rwsem_count_t tmp;
 
159
        long tmp;
215
160
        asm volatile("# beginning __up_write\n\t"
216
161
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
217
162
                     /* subtracts 0xffff0001, returns the old value */
247
192
/*
248
193
 * implement atomic add functionality
249
194
 */
250
 
static inline void rwsem_atomic_add(rwsem_count_t delta,
251
 
                                    struct rw_semaphore *sem)
 
195
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
252
196
{
253
197
        asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
254
198
                     : "+m" (sem->count)
258
202
/*
259
203
 * implement exchange and add functionality
260
204
 */
261
 
static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
262
 
                                                struct rw_semaphore *sem)
 
205
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
263
206
{
264
 
        rwsem_count_t tmp = delta;
 
207
        long tmp = delta;
265
208
 
266
209
        asm volatile(LOCK_PREFIX "xadd %0,%1"
267
210
                     : "+r" (tmp), "+m" (sem->count)
270
213
        return tmp + delta;
271
214
}
272
215
 
273
 
static inline int rwsem_is_locked(struct rw_semaphore *sem)
274
 
{
275
 
        return (sem->count != 0);
276
 
}
277
 
 
278
216
#endif /* __KERNEL__ */
279
217
#endif /* _ASM_X86_RWSEM_H */