|
1 /* |
|
2 * include/asm-generic/mutex-dec.h |
|
3 * |
|
4 * Generic implementation of the mutex fastpath, based on atomic |
|
5 * decrement/increment. |
|
6 */ |
|
7 #ifndef _ASM_GENERIC_MUTEX_DEC_H |
|
8 #define _ASM_GENERIC_MUTEX_DEC_H |
|
9 |
|
10 /** |
|
11 * __mutex_fastpath_lock - try to take the lock by moving the count |
|
12 * from 1 to a 0 value |
|
13 * @count: pointer of type atomic_t |
|
14 * @fail_fn: function to call if the original value was not 1 |
|
15 * |
|
16 * Change the count from 1 to a value lower than 1, and call <fail_fn> if |
|
17 * it wasn't 1 originally. This function MUST leave the value lower than |
|
18 * 1 even when the "1" assertion wasn't true. |
|
19 */ |
|
20 static inline void |
|
21 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
|
22 { |
|
23 if (unlikely(atomic_dec_return(count) < 0)) |
|
24 fail_fn(count); |
|
25 } |
|
26 |
|
27 /** |
|
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count |
|
29 * from 1 to a 0 value |
|
30 * @count: pointer of type atomic_t |
|
31 * @fail_fn: function to call if the original value was not 1 |
|
32 * |
|
33 * Change the count from 1 to a value lower than 1, and call <fail_fn> if |
|
34 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
|
35 * or anything the slow path function returns. |
|
36 */ |
|
37 static inline int |
|
38 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
|
39 { |
|
40 if (unlikely(atomic_dec_return(count) < 0)) |
|
41 return fail_fn(count); |
|
42 return 0; |
|
43 } |
|
44 |
|
45 /** |
|
46 * __mutex_fastpath_unlock - try to promote the count from 0 to 1 |
|
47 * @count: pointer of type atomic_t |
|
48 * @fail_fn: function to call if the original value was not 0 |
|
49 * |
|
50 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. |
|
51 * In the failure case, this function is allowed to either set the value to |
|
52 * 1, or to set it to a value lower than 1. |
|
53 * |
|
54 * If the implementation sets it to a value of lower than 1, then the |
|
55 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs |
|
56 * to return 0 otherwise. |
|
57 */ |
|
58 static inline void |
|
59 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
|
60 { |
|
61 if (unlikely(atomic_inc_return(count) <= 0)) |
|
62 fail_fn(count); |
|
63 } |
|
64 |
|
65 #define __mutex_slowpath_needs_to_unlock() 1 |
|
66 |
|
67 /** |
|
68 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting |
|
69 * |
|
70 * @count: pointer of type atomic_t |
|
71 * @fail_fn: fallback function |
|
72 * |
|
73 * Change the count from 1 to a value lower than 1, and return 0 (failure) |
|
74 * if it wasn't 1 originally, or return 1 (success) otherwise. This function |
|
75 * MUST leave the value lower than 1 even when the "1" assertion wasn't true. |
|
76 * Additionally, if the value was < 0 originally, this function must not leave |
|
77 * it to 0 on failure. |
|
78 * |
|
79 * If the architecture has no effective trylock variant, it should call the |
|
80 * <fail_fn> spinlock-based trylock variant unconditionally. |
|
81 */ |
|
82 static inline int |
|
83 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
|
84 { |
|
85 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
|
86 return 1; |
|
87 return 0; |
|
88 } |
|
89 |
|
90 #endif |