2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
6
* Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7
* Copyright (c) 2000 Silicon Graphics, Inc.
12
#include <linux/types.h>
13
#include <asm/byteorder.h> /* sigh ... */
17
#include <asm/sgidefs.h>
18
#include <asm/system.h>
21
* clear_bit() doesn't provide any barrier for the compiler.
23
#define smp_mb__before_clear_bit() barrier()
24
#define smp_mb__after_clear_bit() barrier()
27
* Only disable interrupt for kernel mode stuff to keep usermode stuff
28
* that dares to use kernel include files alive.
30
#define __bi_flags unsigned long flags
31
#define __bi_cli() __cli()
32
#define __bi_save_flags(x) __save_flags(x)
33
#define __bi_save_and_cli(x) __save_and_cli(x)
34
#define __bi_restore_flags(x) __restore_flags(x)
38
#define __bi_save_flags(x)
39
#define __bi_save_and_cli(x)
40
#define __bi_restore_flags(x)
41
#endif /* __KERNEL__ */
43
#ifdef CONFIG_CPU_HAS_LLSC
45
#include <asm/mipsregs.h>
48
* These functions for MIPS ISA > 1 are interrupt and SMP proof and
53
* set_bit - Atomically set a bit in memory
55
* @addr: the address to start counting from
57
* This function is atomic and may not be reordered. See __set_bit()
58
* if you do not require the atomic guarantees.
59
* Note that @nr may be almost arbitrarily large; this function is not
60
* restricted to acting on a single-word quantity.
62
static __inline__ void
63
set_bit(int nr, volatile void *addr)
65
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
69
"1:\tll\t%0, %1\t\t# set_bit\n\t"
73
: "=&r" (temp), "=m" (*m)
74
: "ir" (1UL << (nr & 0x1f)), "m" (*m));
78
* __set_bit - Set a bit in memory
80
* @addr: the address to start counting from
82
* Unlike set_bit(), this function is non-atomic and may be reordered.
83
* If it's called on the same region of memory simultaneously, the effect
84
* may be that only one operation succeeds.
86
static __inline__ void __set_bit(int nr, volatile void * addr)
88
unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
90
*m |= 1UL << (nr & 31);
92
#define PLATFORM__SET_BIT
95
* clear_bit - Clears a bit in memory
97
* @addr: Address to start counting from
99
* clear_bit() is atomic and may not be reordered. However, it does
100
* not contain a memory barrier, so if it is used for locking purposes,
101
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
102
* in order to ensure changes are visible on other processors.
104
static __inline__ void
105
clear_bit(int nr, volatile void *addr)
107
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
110
__asm__ __volatile__(
111
"1:\tll\t%0, %1\t\t# clear_bit\n\t"
115
: "=&r" (temp), "=m" (*m)
116
: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
120
* change_bit - Toggle a bit in memory
122
* @addr: Address to start counting from
124
* change_bit() is atomic and may not be reordered.
125
* Note that @nr may be almost arbitrarily large; this function is not
126
* restricted to acting on a single-word quantity.
128
static __inline__ void
129
change_bit(int nr, volatile void *addr)
131
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
134
__asm__ __volatile__(
135
"1:\tll\t%0, %1\t\t# change_bit\n\t"
139
: "=&r" (temp), "=m" (*m)
140
: "ir" (1UL << (nr & 0x1f)), "m" (*m));
144
* __change_bit - Toggle a bit in memory
145
* @nr: the bit to set
146
* @addr: the address to start counting from
148
* Unlike change_bit(), this function is non-atomic and may be reordered.
149
* If it's called on the same region of memory simultaneously, the effect
150
* may be that only one operation succeeds.
152
static __inline__ void __change_bit(int nr, volatile void * addr)
154
unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
156
*m ^= 1UL << (nr & 31);
160
* test_and_set_bit - Set a bit and return its old value
162
* @addr: Address to count from
164
* This operation is atomic and cannot be reordered.
165
* It also implies a memory barrier.
167
static __inline__ int
168
test_and_set_bit(int nr, volatile void *addr)
170
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
171
unsigned long temp, res;
173
__asm__ __volatile__(
174
".set\tnoreorder\t\t# test_and_set_bit\n"
179
" and\t%2, %0, %3\n\t"
181
: "=&r" (temp), "=m" (*m), "=&r" (res)
182
: "r" (1UL << (nr & 0x1f)), "m" (*m)
189
* __test_and_set_bit - Set a bit and return its old value
191
* @addr: Address to count from
193
* This operation is non-atomic and can be reordered.
194
* If two examples of this operation race, one can appear to succeed
195
* but actually fail. You must protect multiple accesses with a lock.
197
static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
200
volatile int *a = addr;
203
mask = 1 << (nr & 0x1f);
204
retval = (mask & *a) != 0;
211
* test_and_clear_bit - Clear a bit and return its old value
213
* @addr: Address to count from
215
* This operation is atomic and cannot be reordered.
216
* It also implies a memory barrier.
218
static __inline__ int
219
test_and_clear_bit(int nr, volatile void *addr)
221
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
222
unsigned long temp, res;
224
__asm__ __volatile__(
225
".set\tnoreorder\t\t# test_and_clear_bit\n"
231
" and\t%2, %0, %3\n\t"
233
: "=&r" (temp), "=m" (*m), "=&r" (res)
234
: "r" (1UL << (nr & 0x1f)), "m" (*m)
241
* __test_and_clear_bit - Clear a bit and return its old value
243
* @addr: Address to count from
245
* This operation is non-atomic and can be reordered.
246
* If two examples of this operation race, one can appear to succeed
247
* but actually fail. You must protect multiple accesses with a lock.
249
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
252
volatile int *a = addr;
255
mask = 1 << (nr & 0x1f);
256
retval = (mask & *a) != 0;
263
* test_and_change_bit - Change a bit and return its new value
265
* @addr: Address to count from
267
* This operation is atomic and cannot be reordered.
268
* It also implies a memory barrier.
270
static __inline__ int
271
test_and_change_bit(int nr, volatile void *addr)
273
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
274
unsigned long temp, res;
276
__asm__ __volatile__(
277
".set\tnoreorder\t\t# test_and_change_bit\n"
279
"xor\t%2, %0, %3\n\t"
282
" and\t%2, %0, %3\n\t"
284
: "=&r" (temp), "=m" (*m), "=&r" (res)
285
: "r" (1UL << (nr & 0x1f)), "m" (*m)
292
* __test_and_change_bit - Change a bit and return its old value
294
* @addr: Address to count from
296
* This operation is non-atomic and can be reordered.
297
* If two examples of this operation race, one can appear to succeed
298
* but actually fail. You must protect multiple accesses with a lock.
300
static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
303
volatile int *a = addr;
306
mask = 1 << (nr & 0x1f);
307
retval = (mask & *a) != 0;
316
* set_bit - Atomically set a bit in memory
317
* @nr: the bit to set
318
* @addr: the address to start counting from
320
* This function is atomic and may not be reordered. See __set_bit()
321
* if you do not require the atomic guarantees.
322
* Note that @nr may be almost arbitrarily large; this function is not
323
* restricted to acting on a single-word quantity.
325
static __inline__ void set_bit(int nr, volatile void * addr)
328
volatile int *a = addr;
332
mask = 1 << (nr & 0x1f);
333
__bi_save_and_cli(flags);
335
__bi_restore_flags(flags);
339
* __set_bit - Set a bit in memory
340
* @nr: the bit to set
341
* @addr: the address to start counting from
343
* Unlike set_bit(), this function is non-atomic and may be reordered.
344
* If it's called on the same region of memory simultaneously, the effect
345
* may be that only one operation succeeds.
347
static __inline__ void __set_bit(int nr, volatile void * addr)
350
volatile int *a = addr;
353
mask = 1 << (nr & 0x1f);
358
* clear_bit - Clears a bit in memory
360
* @addr: Address to start counting from
362
* clear_bit() is atomic and may not be reordered. However, it does
363
* not contain a memory barrier, so if it is used for locking purposes,
364
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
365
* in order to ensure changes are visible on other processors.
367
static __inline__ void clear_bit(int nr, volatile void * addr)
370
volatile int *a = addr;
374
mask = 1 << (nr & 0x1f);
375
__bi_save_and_cli(flags);
377
__bi_restore_flags(flags);
381
* change_bit - Toggle a bit in memory
383
* @addr: Address to start counting from
385
* change_bit() is atomic and may not be reordered.
386
* Note that @nr may be almost arbitrarily large; this function is not
387
* restricted to acting on a single-word quantity.
389
static __inline__ void change_bit(int nr, volatile void * addr)
392
volatile int *a = addr;
396
mask = 1 << (nr & 0x1f);
397
__bi_save_and_cli(flags);
399
__bi_restore_flags(flags);
403
* __change_bit - Toggle a bit in memory
404
* @nr: the bit to set
405
* @addr: the address to start counting from
407
* Unlike change_bit(), this function is non-atomic and may be reordered.
408
* If it's called on the same region of memory simultaneously, the effect
409
* may be that only one operation succeeds.
411
static __inline__ void __change_bit(int nr, volatile void * addr)
413
unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
415
*m ^= 1UL << (nr & 31);
419
* test_and_set_bit - Set a bit and return its old value
421
* @addr: Address to count from
423
* This operation is atomic and cannot be reordered.
424
* It also implies a memory barrier.
426
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
429
volatile int *a = addr;
433
mask = 1 << (nr & 0x1f);
434
__bi_save_and_cli(flags);
435
retval = (mask & *a) != 0;
437
__bi_restore_flags(flags);
443
* __test_and_set_bit - Set a bit and return its old value
445
* @addr: Address to count from
447
* This operation is non-atomic and can be reordered.
448
* If two examples of this operation race, one can appear to succeed
449
* but actually fail. You must protect multiple accesses with a lock.
451
static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
454
volatile int *a = addr;
457
mask = 1 << (nr & 0x1f);
458
retval = (mask & *a) != 0;
465
* test_and_clear_bit - Clear a bit and return its old value
467
* @addr: Address to count from
469
* This operation is atomic and cannot be reordered.
470
* It also implies a memory barrier.
472
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
475
volatile int *a = addr;
479
mask = 1 << (nr & 0x1f);
480
__bi_save_and_cli(flags);
481
retval = (mask & *a) != 0;
483
__bi_restore_flags(flags);
489
* __test_and_clear_bit - Clear a bit and return its old value
491
* @addr: Address to count from
493
* This operation is non-atomic and can be reordered.
494
* If two examples of this operation race, one can appear to succeed
495
* but actually fail. You must protect multiple accesses with a lock.
497
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
500
volatile int *a = addr;
503
mask = 1 << (nr & 0x1f);
504
retval = (mask & *a) != 0;
511
* test_and_change_bit - Change a bit and return its new value
513
* @addr: Address to count from
515
* This operation is atomic and cannot be reordered.
516
* It also implies a memory barrier.
518
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
521
volatile int *a = addr;
525
mask = 1 << (nr & 0x1f);
526
__bi_save_and_cli(flags);
527
retval = (mask & *a) != 0;
529
__bi_restore_flags(flags);
535
* __test_and_change_bit - Change a bit and return its old value
537
* @addr: Address to count from
539
* This operation is non-atomic and can be reordered.
540
* If two examples of this operation race, one can appear to succeed
541
* but actually fail. You must protect multiple accesses with a lock.
543
static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
546
volatile int *a = addr;
549
mask = 1 << (nr & 0x1f);
550
retval = (mask & *a) != 0;
558
#undef __bi_save_flags
559
#undef __bi_restore_flags
564
* test_bit - Determine whether a bit is set
565
* @nr: bit number to test
566
* @addr: Address to start counting from
568
static __inline__ int test_bit(int nr, const volatile void *addr)
570
return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
575
/* Little endian versions. */
578
* find_first_zero_bit - find the first zero bit in a memory region
579
* @addr: The address to start the search at
580
* @size: The maximum size to search
582
* Returns the bit-number of the first zero bit, not the number of the byte
585
static __inline__ int find_first_zero_bit (void *addr, unsigned size)
593
__asm__ (".set\tnoreorder\n\t"
595
"1:\tsubu\t$1,%6,%0\n\t"
599
#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
600
(_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
601
(_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
611
#error "Fix this for big endian"
612
#endif /* __MIPSEB__ */
614
"1:\tand\t%2,$1,%1\n\t"
622
: "=r" (res), "=r" (dummy), "=r" (addr)
623
: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
624
"2" (addr), "r" (size)
631
* find_next_zero_bit - find the first zero bit in a memory region
632
* @addr: The address to base the search on
633
* @offset: The bitnumber to start searching at
634
* @size: The maximum size to search
636
static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
638
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
639
int set = 0, bit = offset & 31, res;
644
* Look for zero in first byte
647
#error "Fix this for big endian byte order"
649
__asm__(".set\tnoreorder\n\t"
651
"1:\tand\t$1,%4,%1\n\t"
659
: "=r" (set), "=r" (dummy)
660
: "0" (0), "1" (1 << bit), "r" (*p)
662
if (set < (32 - bit))
668
* No zero yet, search remaining full bytes for a zero
670
res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
671
return offset + set + res;
674
#endif /* !(__MIPSEB__) */
677
* ffz - find first zero in word.
678
* @word: The word to search
680
* Undefined if no zero exists, so code should check against ~0UL first.
682
static __inline__ unsigned long ffz(unsigned long word)
685
unsigned int mask = 1;
688
".set\tnoreorder\n\t"
691
"1:\tand\t$1,%2,%1\n\t"
699
: "=&r" (__res), "=r" (mask)
700
: "r" (word), "1" (mask)
709
* hweightN - returns the hamming weight of a N-bit word
710
* @x: the word to weigh
712
* The Hamming Weight of a number is the total number of bits set in it.
715
#define hweight32(x) generic_hweight32(x)
716
#define hweight16(x) generic_hweight16(x)
717
#define hweight8(x) generic_hweight8(x)
719
#endif /* __KERNEL__ */
723
* find_next_zero_bit - find the first zero bit in a memory region
724
* @addr: The address to base the search on
725
* @offset: The bitnumber to start searching at
726
* @size: The maximum size to search
728
static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
730
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
731
unsigned long result = offset & ~31UL;
740
tmp |= ~0UL >> (32-offset);
748
while (size & ~31UL) {
761
return result + ffz(tmp);
764
/* Linus sez that gcc can optimize the following correctly, we'll see if this
765
* holds on the Sparc as it does for the ALPHA.
768
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
770
* find_first_zero_bit - find the first zero bit in a memory region
771
* @addr: The address to start the search at
772
* @size: The maximum size to search
774
* Returns the bit-number of the first zero bit, not the number of the byte
777
static int find_first_zero_bit (void *addr, unsigned size);
780
#define find_first_zero_bit(addr, size) \
781
find_next_zero_bit((addr), (size), 0)
783
#endif /* (__MIPSEB__) */
785
/* Now for the ext2 filesystem bit operations and helper routines. */
788
static __inline__ int ext2_set_bit(int nr, void * addr)
790
int mask, retval, flags;
791
unsigned char *ADDR = (unsigned char *) addr;
794
mask = 1 << (nr & 0x07);
796
retval = (mask & *ADDR) != 0;
798
restore_flags(flags);
802
static __inline__ int ext2_clear_bit(int nr, void * addr)
804
int mask, retval, flags;
805
unsigned char *ADDR = (unsigned char *) addr;
808
mask = 1 << (nr & 0x07);
810
retval = (mask & *ADDR) != 0;
812
restore_flags(flags);
816
static __inline__ int ext2_test_bit(int nr, const void * addr)
819
const unsigned char *ADDR = (const unsigned char *) addr;
822
mask = 1 << (nr & 0x07);
823
return ((mask & *ADDR) != 0);
826
#define ext2_find_first_zero_bit(addr, size) \
827
ext2_find_next_zero_bit((addr), (size), 0)
829
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
831
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
832
unsigned long result = offset & ~31UL;
840
/* We hold the little endian value in tmp, but then the
841
* shift is illegal. So we could keep a big endian value
844
* tmp = __swab32(*(p++));
845
* tmp |= ~0UL >> (32-offset);
847
* but this would decrease preformance, so we change the
851
tmp |= __swab32(~0UL >> (32-offset));
859
while(size & ~31UL) {
870
/* tmp is little endian, so we would have to swab the shift,
871
* see above. But then we have to swab tmp below for ffz, so
872
* we might as well do this here.
874
return result + ffz(__swab32(tmp) | (~0UL << size));
876
return result + ffz(__swab32(tmp));
878
#else /* !(__MIPSEB__) */
880
/* Native ext2 byte ordering, just collapse using defines. */
881
#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
882
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
883
#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
884
#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
885
#define ext2_find_next_zero_bit(addr, size, offset) \
886
find_next_zero_bit((addr), (size), (offset))
888
#endif /* !(__MIPSEB__) */
891
* Bitmap functions for the minix filesystem.
892
* FIXME: These assume that Minix uses the native byte/bitorder.
893
* This limits the Minix filesystem's value for data exchange very much.
895
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
896
#define minix_set_bit(nr,addr) set_bit(nr,addr)
897
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
898
#define minix_test_bit(nr,addr) test_bit(nr,addr)
899
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
901
#endif /* _ASM_BITOPS_H */