arch/x86/include/asm/mutex_64.h
changeset 0 aa628870c1d3
equal deleted inserted replaced
-1:000000000000 0:aa628870c1d3
       
     1 /*
       
     2  * Assembly implementation of the mutex fastpath, based on atomic
       
     3  * decrement/increment.
       
     4  *
       
     5  * started by Ingo Molnar:
       
     6  *
       
     7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
       
     8  */
       
     9 #ifndef _ASM_X86_MUTEX_64_H
       
    10 #define _ASM_X86_MUTEX_64_H
       
    11 
       
    12 /**
       
    13  * __mutex_fastpath_lock - decrement and call function if negative
       
    14  * @v: pointer of type atomic_t
       
    15  * @fail_fn: function to call if the result is negative
       
    16  *
       
    17  * Atomically decrements @v and calls <fail_fn> if the result is negative.
       
    18  */
       
    19 #define __mutex_fastpath_lock(v, fail_fn)			\
       
    20 do {								\
       
    21 	unsigned long dummy;					\
       
    22 								\
       
    23 	typecheck(atomic_t *, v);				\
       
    24 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
       
    25 								\
       
    26 	asm volatile(LOCK_PREFIX "   decl (%%rdi)\n"		\
       
    27 		     "   jns 1f		\n"			\
       
    28 		     "   call " #fail_fn "\n"			\
       
    29 		     "1:"					\
       
    30 		     : "=D" (dummy)				\
       
    31 		     : "D" (v)					\
       
    32 		     : "rax", "rsi", "rdx", "rcx",		\
       
    33 		       "r8", "r9", "r10", "r11", "memory");	\
       
    34 } while (0)
       
    35 
       
    36 /**
       
    37  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
       
    38  *                                 from 1 to a 0 value
       
    39  *  @count: pointer of type atomic_t
       
    40  *  @fail_fn: function to call if the original value was not 1
       
    41  *
       
    42  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
       
    43  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
       
    44  * or anything the slow path function returns
       
    45  */
       
    46 static inline int __mutex_fastpath_lock_retval(atomic_t *count,
       
    47 					       int (*fail_fn)(atomic_t *))
       
    48 {
       
    49 	if (unlikely(atomic_dec_return(count) < 0))
       
    50 		return fail_fn(count);
       
    51 	else
       
    52 		return 0;
       
    53 }
       
    54 
       
    55 /**
       
    56  * __mutex_fastpath_unlock - increment and call function if nonpositive
       
    57  * @v: pointer of type atomic_t
       
    58  * @fail_fn: function to call if the result is nonpositive
       
    59  *
       
    60  * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
       
    61  */
       
    62 #define __mutex_fastpath_unlock(v, fail_fn)			\
       
    63 do {								\
       
    64 	unsigned long dummy;					\
       
    65 								\
       
    66 	typecheck(atomic_t *, v);				\
       
    67 	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
       
    68 								\
       
    69 	asm volatile(LOCK_PREFIX "   incl (%%rdi)\n"		\
       
    70 		     "   jg 1f\n"				\
       
    71 		     "   call " #fail_fn "\n"			\
       
    72 		     "1:"					\
       
    73 		     : "=D" (dummy)				\
       
    74 		     : "D" (v)					\
       
    75 		     : "rax", "rsi", "rdx", "rcx",		\
       
    76 		       "r8", "r9", "r10", "r11", "memory");	\
       
    77 } while (0)
       
    78 
       
    79 #define __mutex_slowpath_needs_to_unlock()	1
       
    80 
       
    81 /**
       
    82  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
       
    83  *
       
    84  *  @count: pointer of type atomic_t
       
    85  *  @fail_fn: fallback function
       
    86  *
       
    87  * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
       
    88  * if it wasn't 1 originally. [the fallback function is never used on
       
    89  * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
       
    90  */
       
    91 static inline int __mutex_fastpath_trylock(atomic_t *count,
       
    92 					   int (*fail_fn)(atomic_t *))
       
    93 {
       
    94 	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
       
    95 		return 1;
       
    96 	else
       
    97 		return 0;
       
    98 }
       
    99 
       
   100 #endif /* _ASM_X86_MUTEX_64_H */