diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /include/linux/bit_spinlock.h | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'include/linux/bit_spinlock.h')
-rw-r--r-- | include/linux/bit_spinlock.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 00000000..3b5bafce --- /dev/null +++ b/include/linux/bit_spinlock.h @@ -0,0 +1,100 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/atomic.h> +#include <linux/bug.h> + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + preempt_disable(); + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT_COUNT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + |