|
1 #ifndef __LINUX_SPINLOCK_H |
|
2 #define __LINUX_SPINLOCK_H |
|
3 |
|
4 /* |
|
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations |
|
6 * |
|
7 * here's the role of the various spinlock/rwlock related include files: |
|
8 * |
|
9 * on SMP builds: |
|
10 * |
|
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the |
|
12 * initializers |
|
13 * |
|
14 * linux/spinlock_types.h: |
|
15 * defines the generic type and initializers |
|
16 * |
|
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel |
|
18 * implementations, mostly inline assembly code |
|
19 * |
|
20 * (also included on UP-debug builds:) |
|
21 * |
|
22 * linux/spinlock_api_smp.h: |
|
23 * contains the prototypes for the _spin_*() APIs. |
|
24 * |
|
25 * linux/spinlock.h: builds the final spin_*() APIs. |
|
26 * |
|
27 * on UP builds: |
|
28 * |
|
29 * linux/spinlock_type_up.h: |
|
30 * contains the generic, simplified UP spinlock type. |
|
31 * (which is an empty structure on non-debug builds) |
|
32 * |
|
33 * linux/spinlock_types.h: |
|
34 * defines the generic type and initializers |
|
35 * |
|
36 * linux/spinlock_up.h: |
|
37 * contains the __raw_spin_*()/etc. version of UP |
|
38 * builds. (which are NOPs on non-debug, non-preempt |
|
39 * builds) |
|
40 * |
|
41 * (included on UP-non-debug builds:) |
|
42 * |
|
43 * linux/spinlock_api_up.h: |
|
44 * builds the _spin_*() APIs. |
|
45 * |
|
46 * linux/spinlock.h: builds the final spin_*() APIs. |
|
47 */ |
|
48 |
|
49 #include <linux/typecheck.h> |
|
50 #include <linux/preempt.h> |
|
51 #include <linux/linkage.h> |
|
52 #include <linux/compiler.h> |
|
53 #include <linux/thread_info.h> |
|
54 #include <linux/kernel.h> |
|
55 #include <linux/stringify.h> |
|
56 #include <linux/bottom_half.h> |
|
57 |
|
58 #include <asm/system.h> |
|
59 |
|
60 /* |
|
61 * Must define these before including other files, inline functions need them |
|
62 */ |
|
63 #define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
|
64 |
|
65 #define LOCK_SECTION_START(extra) \ |
|
66 ".subsection 1\n\t" \ |
|
67 extra \ |
|
68 ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
|
69 LOCK_SECTION_NAME ":\n\t" \ |
|
70 ".endif\n" |
|
71 |
|
72 #define LOCK_SECTION_END \ |
|
73 ".previous\n\t" |
|
74 |
|
75 #define __lockfunc __attribute__((section(".spinlock.text"))) |
|
76 |
|
77 /* |
|
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
|
79 */ |
|
80 #include <linux/spinlock_types.h> |
|
81 |
|
82 extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
|
83 |
|
84 /* |
|
85 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
|
86 */ |
|
87 #ifdef CONFIG_SMP |
|
88 # include <asm/spinlock.h> |
|
89 #else |
|
90 # include <linux/spinlock_up.h> |
|
91 #endif |
|
92 |
|
93 #ifdef CONFIG_DEBUG_SPINLOCK |
|
94 extern void __spin_lock_init(spinlock_t *lock, const char *name, |
|
95 struct lock_class_key *key); |
|
96 # define spin_lock_init(lock) \ |
|
97 do { \ |
|
98 static struct lock_class_key __key; \ |
|
99 \ |
|
100 __spin_lock_init((lock), #lock, &__key); \ |
|
101 } while (0) |
|
102 |
|
103 #else |
|
104 # define spin_lock_init(lock) \ |
|
105 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) |
|
106 #endif |
|
107 |
|
108 #ifdef CONFIG_DEBUG_SPINLOCK |
|
109 extern void __rwlock_init(rwlock_t *lock, const char *name, |
|
110 struct lock_class_key *key); |
|
111 # define rwlock_init(lock) \ |
|
112 do { \ |
|
113 static struct lock_class_key __key; \ |
|
114 \ |
|
115 __rwlock_init((lock), #lock, &__key); \ |
|
116 } while (0) |
|
117 #else |
|
118 # define rwlock_init(lock) \ |
|
119 do { *(lock) = RW_LOCK_UNLOCKED; } while (0) |
|
120 #endif |
|
121 |
|
122 #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
|
123 |
|
124 #ifdef CONFIG_GENERIC_LOCKBREAK |
|
125 #define spin_is_contended(lock) ((lock)->break_lock) |
|
126 #else |
|
127 #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) |
|
128 #endif |
|
129 |
|
130 /** |
|
131 * spin_unlock_wait - wait until the spinlock gets unlocked |
|
132 * @lock: the spinlock in question. |
|
133 */ |
|
134 #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
|
135 |
|
136 /* |
|
137 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
|
138 */ |
|
139 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
|
140 # include <linux/spinlock_api_smp.h> |
|
141 #else |
|
142 # include <linux/spinlock_api_up.h> |
|
143 #endif |
|
144 |
|
145 #ifdef CONFIG_DEBUG_SPINLOCK |
|
146 extern void _raw_spin_lock(spinlock_t *lock); |
|
147 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
|
148 extern int _raw_spin_trylock(spinlock_t *lock); |
|
149 extern void _raw_spin_unlock(spinlock_t *lock); |
|
150 extern void _raw_read_lock(rwlock_t *lock); |
|
151 extern int _raw_read_trylock(rwlock_t *lock); |
|
152 extern void _raw_read_unlock(rwlock_t *lock); |
|
153 extern void _raw_write_lock(rwlock_t *lock); |
|
154 extern int _raw_write_trylock(rwlock_t *lock); |
|
155 extern void _raw_write_unlock(rwlock_t *lock); |
|
156 #else |
|
157 # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) |
|
158 # define _raw_spin_lock_flags(lock, flags) \ |
|
159 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
|
160 # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
|
161 # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
|
162 # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
|
163 # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
|
164 # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
|
165 # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
|
166 # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
|
167 # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
|
168 #endif |
|
169 |
|
170 #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
|
171 #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
|
172 |
|
173 /* |
|
174 * Define the various spin_lock and rw_lock methods. Note we define these |
|
175 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
|
176 * methods are defined as nops in the case they are not required. |
|
177 */ |
|
178 #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) |
|
179 #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) |
|
180 #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) |
|
181 |
|
182 #define spin_lock(lock) _spin_lock(lock) |
|
183 |
|
184 #ifdef CONFIG_DEBUG_LOCK_ALLOC |
|
185 # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
|
186 # define spin_lock_nest_lock(lock, nest_lock) \ |
|
187 do { \ |
|
188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
|
189 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
|
190 } while (0) |
|
191 #else |
|
192 # define spin_lock_nested(lock, subclass) _spin_lock(lock) |
|
193 # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) |
|
194 #endif |
|
195 |
|
196 #define write_lock(lock) _write_lock(lock) |
|
197 #define read_lock(lock) _read_lock(lock) |
|
198 |
|
199 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
|
200 |
|
201 #define spin_lock_irqsave(lock, flags) \ |
|
202 do { \ |
|
203 typecheck(unsigned long, flags); \ |
|
204 flags = _spin_lock_irqsave(lock); \ |
|
205 } while (0) |
|
206 #define read_lock_irqsave(lock, flags) \ |
|
207 do { \ |
|
208 typecheck(unsigned long, flags); \ |
|
209 flags = _read_lock_irqsave(lock); \ |
|
210 } while (0) |
|
211 #define write_lock_irqsave(lock, flags) \ |
|
212 do { \ |
|
213 typecheck(unsigned long, flags); \ |
|
214 flags = _write_lock_irqsave(lock); \ |
|
215 } while (0) |
|
216 |
|
217 #ifdef CONFIG_DEBUG_LOCK_ALLOC |
|
218 #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
|
219 do { \ |
|
220 typecheck(unsigned long, flags); \ |
|
221 flags = _spin_lock_irqsave_nested(lock, subclass); \ |
|
222 } while (0) |
|
223 #else |
|
224 #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
|
225 do { \ |
|
226 typecheck(unsigned long, flags); \ |
|
227 flags = _spin_lock_irqsave(lock); \ |
|
228 } while (0) |
|
229 #endif |
|
230 |
|
231 #else |
|
232 |
|
233 #define spin_lock_irqsave(lock, flags) \ |
|
234 do { \ |
|
235 typecheck(unsigned long, flags); \ |
|
236 _spin_lock_irqsave(lock, flags); \ |
|
237 } while (0) |
|
238 #define read_lock_irqsave(lock, flags) \ |
|
239 do { \ |
|
240 typecheck(unsigned long, flags); \ |
|
241 _read_lock_irqsave(lock, flags); \ |
|
242 } while (0) |
|
243 #define write_lock_irqsave(lock, flags) \ |
|
244 do { \ |
|
245 typecheck(unsigned long, flags); \ |
|
246 _write_lock_irqsave(lock, flags); \ |
|
247 } while (0) |
|
248 #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
|
249 spin_lock_irqsave(lock, flags) |
|
250 |
|
251 #endif |
|
252 |
|
253 #define spin_lock_irq(lock) _spin_lock_irq(lock) |
|
254 #define spin_lock_bh(lock) _spin_lock_bh(lock) |
|
255 |
|
256 #define read_lock_irq(lock) _read_lock_irq(lock) |
|
257 #define read_lock_bh(lock) _read_lock_bh(lock) |
|
258 |
|
259 #define write_lock_irq(lock) _write_lock_irq(lock) |
|
260 #define write_lock_bh(lock) _write_lock_bh(lock) |
|
261 |
|
262 /* |
|
263 * We inline the unlock functions in the nondebug case: |
|
264 */ |
|
265 #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ |
|
266 !defined(CONFIG_SMP) |
|
267 # define spin_unlock(lock) _spin_unlock(lock) |
|
268 # define read_unlock(lock) _read_unlock(lock) |
|
269 # define write_unlock(lock) _write_unlock(lock) |
|
270 # define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
|
271 # define read_unlock_irq(lock) _read_unlock_irq(lock) |
|
272 # define write_unlock_irq(lock) _write_unlock_irq(lock) |
|
273 #else |
|
274 # define spin_unlock(lock) \ |
|
275 do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) |
|
276 # define read_unlock(lock) \ |
|
277 do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) |
|
278 # define write_unlock(lock) \ |
|
279 do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) |
|
280 # define spin_unlock_irq(lock) \ |
|
281 do { \ |
|
282 __raw_spin_unlock(&(lock)->raw_lock); \ |
|
283 __release(lock); \ |
|
284 local_irq_enable(); \ |
|
285 } while (0) |
|
286 # define read_unlock_irq(lock) \ |
|
287 do { \ |
|
288 __raw_read_unlock(&(lock)->raw_lock); \ |
|
289 __release(lock); \ |
|
290 local_irq_enable(); \ |
|
291 } while (0) |
|
292 # define write_unlock_irq(lock) \ |
|
293 do { \ |
|
294 __raw_write_unlock(&(lock)->raw_lock); \ |
|
295 __release(lock); \ |
|
296 local_irq_enable(); \ |
|
297 } while (0) |
|
298 #endif |
|
299 |
|
300 #define spin_unlock_irqrestore(lock, flags) \ |
|
301 do { \ |
|
302 typecheck(unsigned long, flags); \ |
|
303 _spin_unlock_irqrestore(lock, flags); \ |
|
304 } while (0) |
|
305 #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
|
306 |
|
307 #define read_unlock_irqrestore(lock, flags) \ |
|
308 do { \ |
|
309 typecheck(unsigned long, flags); \ |
|
310 _read_unlock_irqrestore(lock, flags); \ |
|
311 } while (0) |
|
312 #define read_unlock_bh(lock) _read_unlock_bh(lock) |
|
313 |
|
314 #define write_unlock_irqrestore(lock, flags) \ |
|
315 do { \ |
|
316 typecheck(unsigned long, flags); \ |
|
317 _write_unlock_irqrestore(lock, flags); \ |
|
318 } while (0) |
|
319 #define write_unlock_bh(lock) _write_unlock_bh(lock) |
|
320 |
|
321 #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
|
322 |
|
323 #define spin_trylock_irq(lock) \ |
|
324 ({ \ |
|
325 local_irq_disable(); \ |
|
326 spin_trylock(lock) ? \ |
|
327 1 : ({ local_irq_enable(); 0; }); \ |
|
328 }) |
|
329 |
|
330 #define spin_trylock_irqsave(lock, flags) \ |
|
331 ({ \ |
|
332 local_irq_save(flags); \ |
|
333 spin_trylock(lock) ? \ |
|
334 1 : ({ local_irq_restore(flags); 0; }); \ |
|
335 }) |
|
336 |
|
337 #define write_trylock_irqsave(lock, flags) \ |
|
338 ({ \ |
|
339 local_irq_save(flags); \ |
|
340 write_trylock(lock) ? \ |
|
341 1 : ({ local_irq_restore(flags); 0; }); \ |
|
342 }) |
|
343 |
|
344 /* |
|
345 * Pull the atomic_t declaration: |
|
346 * (asm-mips/atomic.h needs above definitions) |
|
347 */ |
|
348 #include <asm/atomic.h> |
|
349 /** |
|
350 * atomic_dec_and_lock - lock on reaching reference count zero |
|
351 * @atomic: the atomic counter |
|
352 * @lock: the spinlock in question |
|
353 * |
|
354 * Decrements @atomic by 1. If the result is 0, returns true and locks |
|
355 * @lock. Returns false for all other cases. |
|
356 */ |
|
357 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
|
358 #define atomic_dec_and_lock(atomic, lock) \ |
|
359 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
|
360 |
|
361 /** |
|
362 * spin_can_lock - would spin_trylock() succeed? |
|
363 * @lock: the spinlock in question. |
|
364 */ |
|
365 #define spin_can_lock(lock) (!spin_is_locked(lock)) |
|
366 |
|
367 #endif /* __LINUX_SPINLOCK_H */ |