From 871480933a1c28f8a9fed4c4d34d06c439a7a422 Mon Sep 17 00:00:00 2001 From: Srikant Patnaik Date: Sun, 11 Jan 2015 12:28:04 +0530 Subject: Moved, renamed, and deleted files The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure. --- arch/tile/include/asm/bitops_64.h | 101 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 arch/tile/include/asm/bitops_64.h (limited to 'arch/tile/include/asm/bitops_64.h') diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h new file mode 100644 index 00000000..60b87ee5 --- /dev/null +++ b/arch/tile/include/asm/bitops_64.h @@ -0,0 +1,101 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_BITOPS_64_H +#define _ASM_TILE_BITOPS_64_H + +#include +#include + +/* See for API comments. */ + +static inline void set_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask); +} + +static inline void clear_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + + +static inline void change_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + unsigned long guess, oldval; + addr += nr / BITS_PER_LONG; + oldval = *addr; + do { + guess = oldval; + oldval = atomic64_cmpxchg((atomic64_t *)addr, + guess, guess ^ mask); + } while (guess != oldval); +} + + +/* + * The test_and_xxx_bit() routines require a memory fence before we + * start the operation, and after the operation completes. We use + * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler + * barrier(), to block until the atomic op is complete. + */ + +static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) +{ + int val; + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + smp_mb(); /* barrier for proper semantics */ + val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask) + & mask) != 0; + barrier(); + return val; +} + + +static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) +{ + int val; + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + smp_mb(); /* barrier for proper semantics */ + val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask) + & mask) != 0; + barrier(); + return val; +} + + +static inline int test_and_change_bit(unsigned nr, + volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + unsigned long guess, oldval; + addr += nr / BITS_PER_LONG; + oldval = *addr; + do { + guess = oldval; + oldval = atomic64_cmpxchg((atomic64_t *)addr, + guess, guess ^ mask); + } while (guess != oldval); + return (oldval & mask) != 0; +} + +#include + +#endif /* _ASM_TILE_BITOPS_64_H */ -- cgit