equal
deleted
inserted
replaced
57 * We split the mutex lock/unlock logic into separate fastpath and |
57 * We split the mutex lock/unlock logic into separate fastpath and |
58 * slowpath functions, to reduce the register pressure on the fastpath. |
58 * slowpath functions, to reduce the register pressure on the fastpath. |
59 * We also put the fastpath first in the kernel image, to make sure the |
59 * We also put the fastpath first in the kernel image, to make sure the |
60 * branch is predicted by the CPU as default-untaken. |
60 * branch is predicted by the CPU as default-untaken. |
61 */ |
61 */ |
62 static void noinline __sched |
62 static __used noinline void __sched |
63 __mutex_lock_slowpath(atomic_t *lock_count); |
63 __mutex_lock_slowpath(atomic_t *lock_count); |
64 |
64 |
65 /*** |
65 /*** |
66 * mutex_lock - acquire the mutex |
66 * mutex_lock - acquire the mutex |
67 * @lock: the mutex to be acquired |
67 * @lock: the mutex to be acquired |
94 } |
94 } |
95 |
95 |
96 EXPORT_SYMBOL(mutex_lock); |
96 EXPORT_SYMBOL(mutex_lock); |
97 #endif |
97 #endif |
98 |
98 |
99 static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
99 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
100 |
100 |
101 /*** |
101 /*** |
102 * mutex_unlock - release the mutex |
102 * mutex_unlock - release the mutex |
103 * @lock: the mutex to be released |
103 * @lock: the mutex to be released |
104 * |
104 * |
182 schedule(); |
182 schedule(); |
183 spin_lock_mutex(&lock->wait_lock, flags); |
183 spin_lock_mutex(&lock->wait_lock, flags); |
184 } |
184 } |
185 |
185 |
186 done: |
186 done: |
187 lock_acquired(&lock->dep_map); |
187 lock_acquired(&lock->dep_map, ip); |
188 /* got the lock - rejoice! */ |
188 /* got the lock - rejoice! */ |
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
190 debug_mutex_set_owner(lock, task_thread_info(task)); |
190 debug_mutex_set_owner(lock, task_thread_info(task)); |
191 |
191 |
192 /* set it to 0 if there are no waiters left: */ |
192 /* set it to 0 if there are no waiters left: */ |
266 } |
266 } |
267 |
267 |
268 /* |
268 /* |
269 * Release the lock, slowpath: |
269 * Release the lock, slowpath: |
270 */ |
270 */ |
271 static noinline void |
271 static __used noinline void |
272 __mutex_unlock_slowpath(atomic_t *lock_count) |
272 __mutex_unlock_slowpath(atomic_t *lock_count) |
273 { |
273 { |
274 __mutex_unlock_common_slowpath(lock_count, 1); |
274 __mutex_unlock_common_slowpath(lock_count, 1); |
275 } |
275 } |
276 |
276 |
311 return __mutex_fastpath_lock_retval |
311 return __mutex_fastpath_lock_retval |
312 (&lock->count, __mutex_lock_killable_slowpath); |
312 (&lock->count, __mutex_lock_killable_slowpath); |
313 } |
313 } |
314 EXPORT_SYMBOL(mutex_lock_killable); |
314 EXPORT_SYMBOL(mutex_lock_killable); |
315 |
315 |
316 static noinline void __sched |
316 static __used noinline void __sched |
317 __mutex_lock_slowpath(atomic_t *lock_count) |
317 __mutex_lock_slowpath(atomic_t *lock_count) |
318 { |
318 { |
319 struct mutex *lock = container_of(lock_count, struct mutex, count); |
319 struct mutex *lock = container_of(lock_count, struct mutex, count); |
320 |
320 |
321 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); |
321 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); |