|
1 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ |
|
2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_ |
|
3 |
|
4 #include <asm/types.h> |
|
5 |
|
6 #ifdef CONFIG_SMP |
|
7 #include <asm/spinlock.h> |
|
8 #include <asm/cache.h> /* we use L1_CACHE_BYTES */ |
|
9 |
|
10 /* Use an array of spinlocks for our atomic_ts. |
|
11 * Hash function to index into a different SPINLOCK. |
|
12 * Since "a" is usually an address, use one spinlock per cacheline. |
|
13 */ |
|
14 # define ATOMIC_HASH_SIZE 4 |
|
15 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
|
16 |
|
17 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
|
18 |
|
19 /* Can't use raw_spin_lock_irq because of #include problems, so |
|
20 * this is the substitute */ |
|
21 #define _atomic_spin_lock_irqsave(l,f) do { \ |
|
22 raw_spinlock_t *s = ATOMIC_HASH(l); \ |
|
23 local_irq_save(f); \ |
|
24 __raw_spin_lock(s); \ |
|
25 } while(0) |
|
26 |
|
27 #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
|
28 raw_spinlock_t *s = ATOMIC_HASH(l); \ |
|
29 __raw_spin_unlock(s); \ |
|
30 local_irq_restore(f); \ |
|
31 } while(0) |
|
32 |
|
33 |
|
34 #else |
|
35 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) |
|
36 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) |
|
37 #endif |
|
38 |
|
39 /* |
|
40 * NMI events can occur at any time, including when interrupts have been |
|
41 * disabled by *_irqsave(). So you can get NMI events occurring while a |
|
42 * *_bit function is holding a spin lock. If the NMI handler also wants |
|
43 * to do bit manipulation (and they do) then you can get a deadlock |
|
44 * between the original caller of *_bit() and the NMI handler. |
|
45 * |
|
46 * by Keith Owens |
|
47 */ |
|
48 |
|
49 /** |
|
50 * set_bit - Atomically set a bit in memory |
|
51 * @nr: the bit to set |
|
52 * @addr: the address to start counting from |
|
53 * |
|
54 * This function is atomic and may not be reordered. See __set_bit() |
|
55 * if you do not require the atomic guarantees. |
|
56 * |
|
57 * Note: there are no guarantees that this function will not be reordered |
|
58 * on non x86 architectures, so if you are writing portable code, |
|
59 * make sure not to rely on its reordering guarantees. |
|
60 * |
|
61 * Note that @nr may be almost arbitrarily large; this function is not |
|
62 * restricted to acting on a single-word quantity. |
|
63 */ |
|
64 static inline void set_bit(int nr, volatile unsigned long *addr) |
|
65 { |
|
66 unsigned long mask = BIT_MASK(nr); |
|
67 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
68 unsigned long flags; |
|
69 |
|
70 _atomic_spin_lock_irqsave(p, flags); |
|
71 *p |= mask; |
|
72 _atomic_spin_unlock_irqrestore(p, flags); |
|
73 } |
|
74 |
|
75 /** |
|
76 * clear_bit - Clears a bit in memory |
|
77 * @nr: Bit to clear |
|
78 * @addr: Address to start counting from |
|
79 * |
|
80 * clear_bit() is atomic and may not be reordered. However, it does |
|
81 * not contain a memory barrier, so if it is used for locking purposes, |
|
82 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
|
83 * in order to ensure changes are visible on other processors. |
|
84 */ |
|
85 static inline void clear_bit(int nr, volatile unsigned long *addr) |
|
86 { |
|
87 unsigned long mask = BIT_MASK(nr); |
|
88 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
89 unsigned long flags; |
|
90 |
|
91 _atomic_spin_lock_irqsave(p, flags); |
|
92 *p &= ~mask; |
|
93 _atomic_spin_unlock_irqrestore(p, flags); |
|
94 } |
|
95 |
|
96 /** |
|
97 * change_bit - Toggle a bit in memory |
|
98 * @nr: Bit to change |
|
99 * @addr: Address to start counting from |
|
100 * |
|
101 * change_bit() is atomic and may not be reordered. It may be |
|
102 * reordered on other architectures than x86. |
|
103 * Note that @nr may be almost arbitrarily large; this function is not |
|
104 * restricted to acting on a single-word quantity. |
|
105 */ |
|
106 static inline void change_bit(int nr, volatile unsigned long *addr) |
|
107 { |
|
108 unsigned long mask = BIT_MASK(nr); |
|
109 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
110 unsigned long flags; |
|
111 |
|
112 _atomic_spin_lock_irqsave(p, flags); |
|
113 *p ^= mask; |
|
114 _atomic_spin_unlock_irqrestore(p, flags); |
|
115 } |
|
116 |
|
117 /** |
|
118 * test_and_set_bit - Set a bit and return its old value |
|
119 * @nr: Bit to set |
|
120 * @addr: Address to count from |
|
121 * |
|
122 * This operation is atomic and cannot be reordered. |
|
123 * It may be reordered on other architectures than x86. |
|
124 * It also implies a memory barrier. |
|
125 */ |
|
126 static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
|
127 { |
|
128 unsigned long mask = BIT_MASK(nr); |
|
129 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
130 unsigned long old; |
|
131 unsigned long flags; |
|
132 |
|
133 _atomic_spin_lock_irqsave(p, flags); |
|
134 old = *p; |
|
135 *p = old | mask; |
|
136 _atomic_spin_unlock_irqrestore(p, flags); |
|
137 |
|
138 return (old & mask) != 0; |
|
139 } |
|
140 |
|
141 /** |
|
142 * test_and_clear_bit - Clear a bit and return its old value |
|
143 * @nr: Bit to clear |
|
144 * @addr: Address to count from |
|
145 * |
|
146 * This operation is atomic and cannot be reordered. |
|
147 * It can be reorderdered on other architectures other than x86. |
|
148 * It also implies a memory barrier. |
|
149 */ |
|
150 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
|
151 { |
|
152 unsigned long mask = BIT_MASK(nr); |
|
153 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
154 unsigned long old; |
|
155 unsigned long flags; |
|
156 |
|
157 _atomic_spin_lock_irqsave(p, flags); |
|
158 old = *p; |
|
159 *p = old & ~mask; |
|
160 _atomic_spin_unlock_irqrestore(p, flags); |
|
161 |
|
162 return (old & mask) != 0; |
|
163 } |
|
164 |
|
165 /** |
|
166 * test_and_change_bit - Change a bit and return its old value |
|
167 * @nr: Bit to change |
|
168 * @addr: Address to count from |
|
169 * |
|
170 * This operation is atomic and cannot be reordered. |
|
171 * It also implies a memory barrier. |
|
172 */ |
|
173 static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
|
174 { |
|
175 unsigned long mask = BIT_MASK(nr); |
|
176 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
|
177 unsigned long old; |
|
178 unsigned long flags; |
|
179 |
|
180 _atomic_spin_lock_irqsave(p, flags); |
|
181 old = *p; |
|
182 *p = old ^ mask; |
|
183 _atomic_spin_unlock_irqrestore(p, flags); |
|
184 |
|
185 return (old & mask) != 0; |
|
186 } |
|
187 |
|
188 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |