diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /arch/s390/lib/spinlock.c | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r-- | arch/s390/lib/spinlock.c | 219 |
1 files changed, 219 insertions, 0 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c new file mode 100644 index 00000000..093eb694 --- /dev/null +++ b/arch/s390/lib/spinlock.c @@ -0,0 +1,219 @@ +/* + * arch/s390/lib/spinlock.c + * Out of line spinlock code. + * + * Copyright (C) IBM Corp. 2004, 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <asm/io.h> + +int spin_retry = 1000; + +/** + * spin_retry= parameter + */ +static int __init spin_retry_setup(char *str) +{ + spin_retry = simple_strtoul(str, &str, 0); + return 1; +} +__setup("spin_retry=", spin_retry_setup); + +void arch_spin_lock_wait(arch_spinlock_t *lp) +{ + int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); + unsigned int owner; + + while (1) { + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + } + if (MACHINE_IS_LPAR) + continue; + } + owner = lp->owner_cpu; + if (owner) + smp_yield_cpu(~owner); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return; + } +} +EXPORT_SYMBOL(arch_spin_lock_wait); + +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +{ + int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); + unsigned int owner; + + local_irq_restore(flags); + while (1) { + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + local_irq_restore(flags); + } + if (MACHINE_IS_LPAR) + continue; + } + owner = lp->owner_cpu; + if (owner) + smp_yield_cpu(~owner); + local_irq_disable(); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return; + local_irq_restore(flags); + } +} +EXPORT_SYMBOL(arch_spin_lock_wait_flags); + +int arch_spin_trylock_retry(arch_spinlock_t *lp) +{ + unsigned int cpu = ~smp_processor_id(); + int count; + + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(arch_spin_trylock_retry); + +void arch_spin_relax(arch_spinlock_t *lock) +{ + unsigned int cpu = lock->owner_cpu; + if (cpu != 0) { + if (MACHINE_IS_VM || MACHINE_IS_KVM || + !smp_vcpu_scheduled(~cpu)) + smp_yield_cpu(~cpu); + } +} +EXPORT_SYMBOL(arch_spin_relax); + +void _raw_read_lock_wait(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait); + +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait_flags); + +int _raw_read_trylock_retry(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (count-- > 0) { + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_read_trylock_retry); + +void _raw_write_lock_wait(arch_rwlock_t *rw) +{ + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_write_can_lock(rw)) + continue; + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) +{ + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_write_can_lock(rw)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait_flags); + +int _raw_write_trylock_retry(arch_rwlock_t *rw) +{ + int count = spin_retry; + + while (count-- > 0) { + if (!arch_write_can_lock(rw)) + continue; + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_write_trylock_retry); |