diff options
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/Makefile | 9 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 131 | ||||
-rw-r--r-- | arch/s390/lib/div64.c | 149 | ||||
-rw-r--r-- | arch/s390/lib/qrnnd.S | 78 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 219 | ||||
-rw-r--r-- | arch/s390/lib/string.c | 389 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.h | 23 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_mvcos.c | 227 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 406 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_std.c | 319 | ||||
-rw-r--r-- | arch/s390/lib/ucmpdi2.c | 26 | ||||
-rw-r--r-- | arch/s390/lib/usercopy.c | 8 |
12 files changed, 1984 insertions, 0 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile new file mode 100644 index 00000000..761ab8b5 --- /dev/null +++ b/arch/s390/lib/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for s390-specific library files.. +# + +lib-y += delay.o string.o uaccess_std.o uaccess_pt.o +obj-y += usercopy.o +obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o +lib-$(CONFIG_64BIT) += uaccess_mvcos.o +lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c new file mode 100644 index 00000000..9f1f71e8 --- /dev/null +++ b/arch/s390/lib/delay.c @@ -0,0 +1,131 @@ +/* + * Precise Delay Loops for S390 + * + * Copyright IBM Corp. 1999,2008 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/module.h> +#include <linux/irqflags.h> +#include <linux/interrupt.h> +#include <asm/div64.h> +#include <asm/timer.h> + +void __delay(unsigned long loops) +{ + /* + * To end the bloody studid and useless discussion about the + * BogoMips number I took the liberty to define the __delay + * function in a way that that resulting BogoMips number will + * yield the megahertz number of the cpu. The important function + * is udelay and that is done using the tod clock. -- martin. + */ + asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); +} + +static void __udelay_disabled(unsigned long long usecs) +{ + unsigned long cr0, cr6, new; + u64 clock_saved, end; + + end = get_clock() + (usecs << 12); + clock_saved = local_tick_disable(); + __ctl_store(cr0, 0, 0); + __ctl_store(cr6, 6, 6); + new = (cr0 & 0xffff00e0) | 0x00000800; + __ctl_load(new , 0, 0); + new = 0; + __ctl_load(new, 6, 6); + lockdep_off(); + do { + set_clock_comparator(end); + vtime_stop_cpu(); + local_irq_disable(); + } while (get_clock() < end); + lockdep_on(); + __ctl_load(cr0, 0, 0); + __ctl_load(cr6, 6, 6); + local_tick_enable(clock_saved); +} + +static void __udelay_enabled(unsigned long long usecs) +{ + u64 clock_saved, end; + + end = get_clock() + (usecs << 12); + do { + clock_saved = 0; + if (end < S390_lowcore.clock_comparator) { + clock_saved = local_tick_disable(); + set_clock_comparator(end); + } + vtime_stop_cpu(); + local_irq_disable(); + if (clock_saved) + local_tick_enable(clock_saved); + } while (get_clock() < end); +} + +/* + * Waits for 'usecs' microseconds using the TOD clock comparator. + */ +void __udelay(unsigned long long usecs) +{ + unsigned long flags; + + preempt_disable(); + local_irq_save(flags); + if (in_irq()) { + __udelay_disabled(usecs); + goto out; + } + if (in_softirq()) { + if (raw_irqs_disabled_flags(flags)) + __udelay_disabled(usecs); + else + __udelay_enabled(usecs); + goto out; + } + if (raw_irqs_disabled_flags(flags)) { + local_bh_disable(); + __udelay_disabled(usecs); + _local_bh_enable(); + goto out; + } + __udelay_enabled(usecs); +out: + local_irq_restore(flags); + preempt_enable(); +} +EXPORT_SYMBOL(__udelay); + +/* + * Simple udelay variant. To be used on startup and reboot + * when the interrupt handler isn't working. + */ +void udelay_simple(unsigned long long usecs) +{ + u64 end; + + end = get_clock() + (usecs << 12); + while (get_clock() < end) + cpu_relax(); +} + +void __ndelay(unsigned long long nsecs) +{ + u64 end; + + nsecs <<= 9; + do_div(nsecs, 125); + end = get_clock() + nsecs; + if (nsecs & ~0xfffUL) + __udelay(nsecs >> 12); + while (get_clock() < end) + barrier(); +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c new file mode 100644 index 00000000..d9e62c0b --- /dev/null +++ b/arch/s390/lib/div64.c @@ -0,0 +1,149 @@ +/* + * arch/s390/lib/div64.c + * + * __div64_32 implementation for 31 bit. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include <linux/types.h> +#include <linux/module.h> + +#ifdef CONFIG_MARCH_G5 + +/* + * Function to divide an unsigned 64 bit integer by an unsigned + * 31 bit integer using signed 64/32 bit division. + */ +static uint32_t __div64_31(uint64_t *n, uint32_t base) +{ + register uint32_t reg2 asm("2"); + register uint32_t reg3 asm("3"); + uint32_t *words = (uint32_t *) n; + uint32_t tmp; + + /* Special case base==1, remainder = 0, quotient = n */ + if (base == 1) + return 0; + /* + * Special case base==0 will cause a fixed point divide exception + * on the dr instruction and may not happen anyway. For the + * following calculation we can assume base > 1. The first + * signed 64 / 32 bit division with an upper half of 0 will + * give the correct upper half of the 64 bit quotient. + */ + reg2 = 0UL; + reg3 = words[0]; + asm volatile( + " dr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[0] = reg3; + reg3 = words[1]; + /* + * To get the lower half of the 64 bit quotient and the 32 bit + * remainder we have to use a little trick. Since we only have + * a signed division the quotient can get too big. To avoid this + * the 64 bit dividend is halved, then the signed division will + * work. Afterwards the quotient and the remainder are doubled. + * If the last bit of the dividend has been one the remainder + * is increased by one then checked against the base. If the + * remainder has overflown subtract base and increase the + * quotient. Simple, no ? + */ + asm volatile( + " nr %2,%1\n" + " srdl %0,1\n" + " dr %0,%3\n" + " alr %0,%0\n" + " alr %1,%1\n" + " alr %0,%2\n" + " clr %0,%3\n" + " jl 0f\n" + " slr %0,%3\n" + " ahi %1,1\n" + "0:\n" + : "+d" (reg2), "+d" (reg3), "=d" (tmp) + : "d" (base), "2" (1UL) : "cc" ); + words[1] = reg3; + return reg2; +} + +/* + * Function to divide an unsigned 64 bit integer by an unsigned + * 32 bit integer using the unsigned 64/31 bit division. + */ +uint32_t __div64_32(uint64_t *n, uint32_t base) +{ + uint32_t r; + + /* + * If the most significant bit of base is set, divide n by + * (base/2). That allows to use 64/31 bit division and gives a + * good approximation of the result: n = (base/2)*q + r. The + * result needs to be corrected with two simple transformations. + * If base is already < 2^31-1 __div64_31 can be used directly. + */ + r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); + if ((signed) base < 0) { + uint64_t q = *n; + /* + * First transformation: + * n = (base/2)*q + r + * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r + * Since r < (base/2), r + (base/2) < base. + * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) + * n = ((base/2)*2)*q1 + r1 with r1 < base. + */ + if (q & 1) + r += base/2; + q >>= 1; + /* + * Second transformation. ((base/2)*2) could have lost the + * last bit. + * n = ((base/2)*2)*q1 + r1 + * = base*q1 - ((base&1) ? q1 : 0) + r1 + */ + if (base & 1) { + int64_t rx = r - q; + /* + * base is >= 2^31. The worst case for the while + * loop is n=2^64-1 base=2^31+1. That gives a + * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since + * base >= 2^31 the loop is finished after a maximum + * of three iterations. + */ + while (rx < 0) { + rx += base; + q--; + } + r = rx; + } + *n = q; + } + return r; +} + +#else /* MARCH_G5 */ + +uint32_t __div64_32(uint64_t *n, uint32_t base) +{ + register uint32_t reg2 asm("2"); + register uint32_t reg3 asm("3"); + uint32_t *words = (uint32_t *) n; + + reg2 = 0UL; + reg3 = words[0]; + asm volatile( + " dlr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[0] = reg3; + reg3 = words[1]; + asm volatile( + " dlr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[1] = reg3; + return reg2; +} + +#endif /* MARCH_G5 */ diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S new file mode 100644 index 00000000..d3213291 --- /dev/null +++ b/arch/s390/lib/qrnnd.S @@ -0,0 +1,78 @@ +# S/390 __udiv_qrnnd + +#include <linux/linkage.h> + +# r2 : &__r +# r3 : upper half of 64 bit word n +# r4 : lower half of 64 bit word n +# r5 : divisor d +# the reminder r of the division is to be stored to &__r and +# the quotient q is to be returned + + .text +ENTRY(__udiv_qrnnd) + st %r2,24(%r15) # store pointer to reminder for later + lr %r0,%r3 # reload n + lr %r1,%r4 + ltr %r2,%r5 # reload and test divisor + jp 5f + # divisor >= 0x80000000 + srdl %r0,2 # n/4 + srl %r2,1 # d/2 + slr %r1,%r2 # special case if last bit of d is set + brc 3,0f # (n/4) div (n/2) can overflow by 1 + ahi %r0,-1 # trick: subtract n/2, then divide +0: dr %r0,%r2 # signed division + ahi %r1,1 # trick part 2: add 1 to the quotient + # now (n >> 2) = (d >> 1) * %r1 + %r0 + lhi %r3,1 + nr %r3,%r1 # test last bit of q + jz 1f + alr %r0,%r2 # add (d>>1) to r +1: srl %r1,1 # q >>= 1 + # now (n >> 2) = (d&-2) * %r1 + %r0 + lhi %r3,1 + nr %r3,%r5 # test last bit of d + jz 2f + slr %r0,%r1 # r -= q + brc 3,2f # borrow ? + alr %r0,%r5 # r += d + ahi %r1,-1 +2: # now (n >> 2) = d * %r1 + %r0 + alr %r1,%r1 # q <<= 1 + alr %r0,%r0 # r <<= 1 + brc 12,3f # overflow on r ? + slr %r0,%r5 # r -= d + ahi %r1,1 # q += 1 +3: lhi %r3,2 + nr %r3,%r4 # test next to last bit of n + jz 4f + ahi %r0,1 # r += 1 +4: clr %r0,%r5 # r >= d ? + jl 6f + slr %r0,%r5 # r -= d + ahi %r1,1 # q += 1 + # now (n >> 1) = d * %r1 + %r0 + j 6f +5: # divisor < 0x80000000 + srdl %r0,1 + dr %r0,%r2 # signed division + # now (n >> 1) = d * %r1 + %r0 +6: alr %r1,%r1 # q <<= 1 + alr %r0,%r0 # r <<= 1 + brc 12,7f # overflow on r ? + slr %r0,%r5 # r -= d + ahi %r1,1 # q += 1 +7: lhi %r3,1 + nr %r3,%r4 # isolate last bit of n + alr %r0,%r3 # r += (n & 1) + clr %r0,%r5 # r >= d ? + jl 8f + slr %r0,%r5 # r -= d + ahi %r1,1 # q += 1 +8: # now n = d * %r1 + %r0 + l %r2,24(%r15) + st %r0,0(%r2) + lr %r2,%r1 + br %r14 + .end __udiv_qrnnd diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c new file mode 100644 index 00000000..093eb694 --- /dev/null +++ b/arch/s390/lib/spinlock.c @@ -0,0 +1,219 @@ +/* + * arch/s390/lib/spinlock.c + * Out of line spinlock code. + * + * Copyright (C) IBM Corp. 2004, 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <asm/io.h> + +int spin_retry = 1000; + +/** + * spin_retry= parameter + */ +static int __init spin_retry_setup(char *str) +{ + spin_retry = simple_strtoul(str, &str, 0); + return 1; +} +__setup("spin_retry=", spin_retry_setup); + +void arch_spin_lock_wait(arch_spinlock_t *lp) +{ + int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); + unsigned int owner; + + while (1) { + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + } + if (MACHINE_IS_LPAR) + continue; + } + owner = lp->owner_cpu; + if (owner) + smp_yield_cpu(~owner); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return; + } +} +EXPORT_SYMBOL(arch_spin_lock_wait); + +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +{ + int count = spin_retry; + unsigned int cpu = ~smp_processor_id(); + unsigned int owner; + + local_irq_restore(flags); + while (1) { + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + local_irq_restore(flags); + } + if (MACHINE_IS_LPAR) + continue; + } + owner = lp->owner_cpu; + if (owner) + smp_yield_cpu(~owner); + local_irq_disable(); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return; + local_irq_restore(flags); + } +} +EXPORT_SYMBOL(arch_spin_lock_wait_flags); + +int arch_spin_trylock_retry(arch_spinlock_t *lp) +{ + unsigned int cpu = ~smp_processor_id(); + int count; + + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(arch_spin_trylock_retry); + +void arch_spin_relax(arch_spinlock_t *lock) +{ + unsigned int cpu = lock->owner_cpu; + if (cpu != 0) { + if (MACHINE_IS_VM || MACHINE_IS_KVM || + !smp_vcpu_scheduled(~cpu)) + smp_yield_cpu(~cpu); + } +} +EXPORT_SYMBOL(arch_spin_relax); + +void _raw_read_lock_wait(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait); + +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait_flags); + +int _raw_read_trylock_retry(arch_rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (count-- > 0) { + if (!arch_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_read_trylock_retry); + +void _raw_write_lock_wait(arch_rwlock_t *rw) +{ + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_write_can_lock(rw)) + continue; + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) +{ + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + smp_yield(); + count = spin_retry; + } + if (!arch_write_can_lock(rw)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait_flags); + +int _raw_write_trylock_retry(arch_rwlock_t *rw) +{ + int count = spin_retry; + + while (count-- > 0) { + if (!arch_write_can_lock(rw)) + continue; + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_write_trylock_retry); diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c new file mode 100644 index 00000000..4143b7c1 --- /dev/null +++ b/arch/s390/lib/string.c @@ -0,0 +1,389 @@ +/* + * arch/s390/lib/string.c + * Optimized string functions + * + * S390 version + * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define IN_ARCH_STRING_C 1 + +#include <linux/types.h> +#include <linux/module.h> + +/* + * Helper functions to find the end of a string + */ +static inline char *__strend(const char *s) +{ + register unsigned long r0 asm("0") = 0; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (r0), "+a" (s) : : "cc" ); + return (char *) r0; +} + +static inline char *__strnend(const char *s, size_t n) +{ + register unsigned long r0 asm("0") = 0; + const char *p = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); + return (char *) p; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + * + * returns the length of @s + */ +size_t strlen(const char *s) +{ +#if __GNUC__ < 4 + return __strend(s) - s; +#else + return __builtin_strlen(s); +#endif +} +EXPORT_SYMBOL(strlen); + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @n: The maximum number of bytes to search + * + * returns the minimum of the length of @s and @n + */ +size_t strnlen(const char * s, size_t n) +{ + return __strnend(s, n) - s; +} +EXPORT_SYMBOL(strnlen); + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * + * returns a pointer to @dest + */ +char *strcpy(char *dest, const char *src) +{ +#if __GNUC__ < 4 + register int r0 asm("0") = 0; + char *ret = dest; + + asm volatile ("0: mvst %0,%1\n" + " jo 0b" + : "+&a" (dest), "+&a" (src) : "d" (r0) + : "cc", "memory" ); + return ret; +#else + return __builtin_strcpy(dest, src); +#endif +} +EXPORT_SYMBOL(strcpy); + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = __strend(src) - src; + + if (size) { + size_t len = (ret >= size) ? size-1 : ret; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return ret; +} +EXPORT_SYMBOL(strlcpy); + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @n: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @n bytes. + */ +char *strncpy(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + __builtin_memset(dest + len, 0, n - len); + __builtin_memcpy(dest, src, len); + return dest; +} +EXPORT_SYMBOL(strncpy); + +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * + * returns a pointer to @dest + */ +char *strcat(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + unsigned long dummy; + char *ret = dest; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + "1: mvst %0,%2\n" + " jo 1b" + : "=&a" (dummy), "+a" (dest), "+a" (src) + : "d" (r0), "0" (0UL) : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL(strcat); + +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The size of the destination buffer. + */ +size_t strlcat(char *dest, const char *src, size_t n) +{ + size_t dsize = __strend(dest) - dest; + size_t len = __strend(src) - src; + size_t res = dsize + len; + + if (dsize < n) { + dest += dsize; + n -= dsize; + if (len >= n) + len = n - 1; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return res; +} +EXPORT_SYMBOL(strlcat); + +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The maximum numbers of bytes to copy + * + * returns a pointer to @dest + * + * Note that in contrast to strncpy, strncat ensures the result is + * terminated. + */ +char *strncat(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + char *p = __strend(dest); + + p[len] = '\0'; + __builtin_memcpy(p, src, len); + return dest; +} +EXPORT_SYMBOL(strncat); + +/** + * strcmp - Compare two strings + * @cs: One string + * @ct: Another string + * + * returns 0 if @cs and @ct are equal, + * < 0 if @cs is less than @ct + * > 0 if @cs is greater than @ct + */ +int strcmp(const char *cs, const char *ct) +{ + register int r0 asm("0") = 0; + int ret = 0; + + asm volatile ("0: clst %2,%3\n" + " jo 0b\n" + " je 1f\n" + " ic %0,0(%2)\n" + " ic %1,0(%3)\n" + " sr %0,%1\n" + "1:" + : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) + : : "cc" ); + return ret; +} +EXPORT_SYMBOL(strcmp); + +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char * strrchr(const char * s, int c) +{ + size_t len = __strend(s) - s; + + if (len) + do { + if (s[len] == (char) c) + return (char *) s + len; + } while (--len > 0); + return NULL; +} +EXPORT_SYMBOL(strrchr); + +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +char * strstr(const char * s1,const char * s2) +{ + int l1, l2; + + l2 = __strend(s2) - s2; + if (!l2) + return (char *) s1; + l1 = __strend(s1) - s1; + while (l1-- >= l2) { + register unsigned long r2 asm("2") = (unsigned long) s1; + register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r4 asm("4") = (unsigned long) s2; + register unsigned long r5 asm("5") = (unsigned long) l2; + int cc; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc), "+a" (r2), "+a" (r3), + "+a" (r4), "+a" (r5) : : "cc" ); + if (!cc) + return (char *) s1; + s1++; + } + return NULL; +} +EXPORT_SYMBOL(strstr); + +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +void *memchr(const void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + " jl 1f\n" + " la %0,0\n" + "1:" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL(memchr); + +/** + * memcmp - Compare two areas of memory + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + */ +int memcmp(const void *cs, const void *ct, size_t n) +{ + register unsigned long r2 asm("2") = (unsigned long) cs; + register unsigned long r3 asm("3") = (unsigned long) n; + register unsigned long r4 asm("4") = (unsigned long) ct; + register unsigned long r5 asm("5") = (unsigned long) n; + int ret; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) + : : "cc" ); + if (ret) + ret = *(char *) r2 - *(char *) r4; + return ret; +} +EXPORT_SYMBOL(memcmp); + +/** + * memscan - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +void *memscan(void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL(memscan); + +/** + * memcpy - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @n: The size of the area. + * + * returns a pointer to @dest + */ +void *memcpy(void *dest, const void *src, size_t n) +{ + return __builtin_memcpy(dest, src, n); +} +EXPORT_SYMBOL(memcpy); + +/** + * memset - Fill a region of memory with the given value + * @s: Pointer to the start of the area. + * @c: The byte to fill the area with + * @n: The size of the area. + * + * returns a pointer to @s + */ +void *memset(void *s, int c, size_t n) +{ + char *xs; + + if (c == 0) + return __builtin_memset(s, 0, n); + + xs = (char *) s; + if (n > 0) + do { + *xs++ = c; + } while (--n > 0); + return s; +} +EXPORT_SYMBOL(memset); diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h new file mode 100644 index 00000000..1d2536cb --- /dev/null +++ b/arch/s390/lib/uaccess.h @@ -0,0 +1,23 @@ +/* + * arch/s390/uaccess.h + * + * Copyright IBM Corp. 2007 + * + */ + +#ifndef __ARCH_S390_LIB_UACCESS_H +#define __ARCH_S390_LIB_UACCESS_H + +extern size_t copy_from_user_std(size_t, const void __user *, void *); +extern size_t copy_to_user_std(size_t, void __user *, const void *); +extern size_t strnlen_user_std(size_t, const char __user *); +extern size_t strncpy_from_user_std(size_t, const char __user *, char *); +extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); +extern int futex_atomic_op_std(int, u32 __user *, int, int *); + +extern size_t copy_from_user_pt(size_t, const void __user *, void *); +extern size_t copy_to_user_pt(size_t, void __user *, const void *); +extern int futex_atomic_op_pt(int, u32 __user *, int, int *); +extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); + +#endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c new file mode 100644 index 00000000..60455f10 --- /dev/null +++ b/arch/s390/lib/uaccess_mvcos.c @@ -0,0 +1,227 @@ +/* + * arch/s390/lib/uaccess_mvcos.c + * + * Optimized user space space access functions based on mvcos. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> +#include "uaccess.h" + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) +{ + register unsigned long reg0 asm("0") = 0x81UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + "9: jz 7f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 4f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + "10:"SLR" %0,%4\n" + " "ALR" %2,%4\n" + "4:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,6f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "5: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "6:"AHI" %4,-256\n" + " jnm 5b\n" + " ex %4,0(%3)\n" + " j 8f\n" + "7:"SLR" %0,%0\n" + "8: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) +{ + if (size <= 256) + return copy_from_user_std(size, ptr, x); + return copy_from_user_mvcos(size, ptr, x); +} + +static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + "6: jz 4f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + "7:"SLR" %0,%4\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, + const void *x) +{ + if (size <= 256) + return copy_to_user_std(size, ptr, x); + return copy_to_user_mvcos(size, ptr, x); +} + +static size_t copy_in_user_mvcos(size_t size, void __user *to, + const void __user *from) +{ + register unsigned long reg0 asm("0") = 0x810081UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2:"SLR" %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static size_t clear_user_mvcos(size_t size, void __user *to) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1:"ALR" %0,%2\n" + " "SLR" %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " "SLR" %3,%1\n" + " "CLR" %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " "SLR" %0,%3\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +static size_t strnlen_user_mvcos(size_t count, const char __user *src) +{ + char buf[256]; + int rc; + size_t done, len, len_str; + + done = 0; + do { + len = min(count - done, (size_t) 256); + rc = uaccess.copy_from_user(len, src + done, buf); + if (unlikely(rc == len)) + return 0; + len -= rc; + len_str = strnlen(buf, len); + done += len_str; + } while ((len_str == len) && (done < count)); + return done + 1; +} + +static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, + char *dst) +{ + int rc; + size_t done, len, len_str; + + done = 0; + do { + len = min(count - done, (size_t) 4096); + rc = uaccess.copy_from_user(len, src + done, dst); + if (unlikely(rc == len)) + return -EFAULT; + len -= rc; + len_str = strnlen(dst, len); + done += len_str; + } while ((len_str == len) && (done < count)); + return done; +} + +struct uaccess_ops uaccess_mvcos = { + .copy_from_user = copy_from_user_mvcos_check, + .copy_from_user_small = copy_from_user_std, + .copy_to_user = copy_to_user_mvcos_check, + .copy_to_user_small = copy_to_user_std, + .copy_in_user = copy_in_user_mvcos, + .clear_user = clear_user_mvcos, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op_std, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, +}; + +struct uaccess_ops uaccess_mvcos_switch = { + .copy_from_user = copy_from_user_mvcos, + .copy_from_user_small = copy_from_user_mvcos, + .copy_to_user = copy_to_user_mvcos, + .copy_to_user_small = copy_to_user_mvcos, + .copy_in_user = copy_in_user_mvcos, + .clear_user = clear_user_mvcos, + .strnlen_user = strnlen_user_mvcos, + .strncpy_from_user = strncpy_from_user_mvcos, + .futex_atomic_op = futex_atomic_op_pt, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, +}; diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c new file mode 100644 index 00000000..342ae35a --- /dev/null +++ b/arch/s390/lib/uaccess_pt.c @@ -0,0 +1,406 @@ +/* + * arch/s390/lib/uaccess_pt.c + * + * User access functions based on page table walks for enhanced + * system layout without hardware support. + * + * Copyright IBM Corp. 2006 + * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/hardirq.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> +#include "uaccess.h" + +static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + return (pte_t *) 0x3a; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud) || unlikely(pud_bad(*pud))) + return (pte_t *) 0x3b; + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + return (pte_t *) 0x10; + + return pte_offset_map(pmd, addr); +} + +static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, + size_t n, int write_user) +{ + struct mm_struct *mm = current->mm; + unsigned long offset, pfn, done, size; + pte_t *pte; + void *from, *to; + + done = 0; +retry: + spin_lock(&mm->page_table_lock); + do { + pte = follow_table(mm, uaddr); + if ((unsigned long) pte < 0x1000) + goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; + goto fault; + } else if (write_user && !pte_write(*pte)) { + pte = (pte_t *) 0x04; + goto fault; + } + + pfn = pte_pfn(*pte); + offset = uaddr & (PAGE_SIZE - 1); + size = min(n - done, PAGE_SIZE - offset); + if (write_user) { + to = (void *)((pfn << PAGE_SHIFT) + offset); + from = kptr + done; + } else { + from = (void *)((pfn << PAGE_SHIFT) + offset); + to = kptr + done; + } + memcpy(to, from, size); + done += size; + uaddr += size; + } while (done < n); + spin_unlock(&mm->page_table_lock); + return n - done; +fault: + spin_unlock(&mm->page_table_lock); + if (__handle_fault(uaddr, (unsigned long) pte, write_user)) + return n - done; + goto retry; +} + +/* + * Do DAT for user address by page table walk, return kernel address. + * This function needs to be called with current->mm->page_table_lock held. + */ +static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) +{ + struct mm_struct *mm = current->mm; + unsigned long pfn; + pte_t *pte; + int rc; + +retry: + pte = follow_table(mm, uaddr); + if ((unsigned long) pte < 0x1000) + goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; + goto fault; + } + + pfn = pte_pfn(*pte); + return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); +fault: + spin_unlock(&mm->page_table_lock); + rc = __handle_fault(uaddr, (unsigned long) pte, 0); + spin_lock(&mm->page_table_lock); + if (!rc) + goto retry; + return 0; +} + +size_t copy_from_user_pt(size_t n, const void __user *from, void *to) +{ + size_t rc; + + if (segment_eq(get_fs(), KERNEL_DS)) { + memcpy(to, (void __kernel __force *) from, n); + return 0; + } + rc = __user_copy_pt((unsigned long) from, to, n, 0); + if (unlikely(rc)) + memset(to + n - rc, 0, rc); + return rc; +} + +size_t copy_to_user_pt(size_t n, void __user *to, const void *from) +{ + if (segment_eq(get_fs(), KERNEL_DS)) { + memcpy((void __kernel __force *) to, from, n); + return 0; + } + return __user_copy_pt((unsigned long) to, (void *) from, n, 1); +} + +static size_t clear_user_pt(size_t n, void __user *to) +{ + long done, size, ret; + + if (segment_eq(get_fs(), KERNEL_DS)) { + memset((void __kernel __force *) to, 0, n); + return 0; + } + done = 0; + do { + if (n - done > PAGE_SIZE) + size = PAGE_SIZE; + else + size = n - done; + ret = __user_copy_pt((unsigned long) to + done, + &empty_zero_page, size, 1); + done += size; + if (ret) + return ret + n - done; + } while (done < n); + return 0; +} + +static size_t strnlen_user_pt(size_t count, const char __user *src) +{ + char *addr; + unsigned long uaddr = (unsigned long) src; + struct mm_struct *mm = current->mm; + unsigned long offset, pfn, done, len; + pte_t *pte; + size_t len_str; + + if (segment_eq(get_fs(), KERNEL_DS)) + return strnlen((const char __kernel __force *) src, count) + 1; + done = 0; +retry: + spin_lock(&mm->page_table_lock); + do { + pte = follow_table(mm, uaddr); + if ((unsigned long) pte < 0x1000) + goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; + goto fault; + } + + pfn = pte_pfn(*pte); + offset = uaddr & (PAGE_SIZE-1); + addr = (char *)(pfn << PAGE_SHIFT) + offset; + len = min(count - done, PAGE_SIZE - offset); + len_str = strnlen(addr, len); + done += len_str; + uaddr += len_str; + } while ((len_str == len) && (done < count)); + spin_unlock(&mm->page_table_lock); + return done + 1; +fault: + spin_unlock(&mm->page_table_lock); + if (__handle_fault(uaddr, (unsigned long) pte, 0)) + return 0; + goto retry; +} + +static size_t strncpy_from_user_pt(size_t count, const char __user *src, + char *dst) +{ + size_t n = strnlen_user_pt(count, src); + + if (!n) + return -EFAULT; + if (n > count) + n = count; + if (segment_eq(get_fs(), KERNEL_DS)) { + memcpy(dst, (const char __kernel __force *) src, n); + if (dst[n-1] == '\0') + return n-1; + else + return n; + } + if (__user_copy_pt((unsigned long) src, dst, n, 0)) + return -EFAULT; + if (dst[n-1] == '\0') + return n-1; + else + return n; +} + +static size_t copy_in_user_pt(size_t n, void __user *to, + const void __user *from) +{ + struct mm_struct *mm = current->mm; + unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, + uaddr, done, size, error_code; + unsigned long uaddr_from = (unsigned long) from; + unsigned long uaddr_to = (unsigned long) to; + pte_t *pte_from, *pte_to; + int write_user; + + if (segment_eq(get_fs(), KERNEL_DS)) { + memcpy((void __force *) to, (void __force *) from, n); + return 0; + } + done = 0; +retry: + spin_lock(&mm->page_table_lock); + do { + write_user = 0; + uaddr = uaddr_from; + pte_from = follow_table(mm, uaddr_from); + error_code = (unsigned long) pte_from; + if (error_code < 0x1000) + goto fault; + if (!pte_present(*pte_from)) { + error_code = 0x11; + goto fault; + } + + write_user = 1; + uaddr = uaddr_to; + pte_to = follow_table(mm, uaddr_to); + error_code = (unsigned long) pte_to; + if (error_code < 0x1000) + goto fault; + if (!pte_present(*pte_to)) { + error_code = 0x11; + goto fault; + } else if (!pte_write(*pte_to)) { + error_code = 0x04; + goto fault; + } + + pfn_from = pte_pfn(*pte_from); + pfn_to = pte_pfn(*pte_to); + offset_from = uaddr_from & (PAGE_SIZE-1); + offset_to = uaddr_from & (PAGE_SIZE-1); + offset_max = max(offset_from, offset_to); + size = min(n - done, PAGE_SIZE - offset_max); + + memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, + (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); + done += size; + uaddr_from += size; + uaddr_to += size; + } while (done < n); + spin_unlock(&mm->page_table_lock); + return n - done; +fault: + spin_unlock(&mm->page_table_lock); + if (__handle_fault(uaddr, error_code, write_user)) + return n - done; + goto retry; +} + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile("0: l %1,0(%6)\n" \ + "1: " insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4:\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc" ); + +static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) +{ + int oldval = 0, newval, ret; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + if (ret == 0) + *old = oldval; + return ret; +} + +int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) +{ + int ret; + + if (segment_eq(get_fs(), KERNEL_DS)) + return __futex_atomic_op_pt(op, uaddr, oparg, old); + spin_lock(¤t->mm->page_table_lock); + uaddr = (u32 __force __user *) + __dat_user_addr((__force unsigned long) uaddr); + if (!uaddr) { + spin_unlock(¤t->mm->page_table_lock); + return -EFAULT; + } + get_page(virt_to_page(uaddr)); + spin_unlock(¤t->mm->page_table_lock); + ret = __futex_atomic_op_pt(op, uaddr, oparg, old); + put_page(virt_to_page(uaddr)); + return ret; +} + +static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret; + + asm volatile("0: cs %1,%4,0(%5)\n" + "1: la %0,0\n" + "2:\n" + EX_TABLE(0b,2b) EX_TABLE(1b,2b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + *uval = oldval; + return ret; +} + +int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret; + + if (segment_eq(get_fs(), KERNEL_DS)) + return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); + spin_lock(¤t->mm->page_table_lock); + uaddr = (u32 __force __user *) + __dat_user_addr((__force unsigned long) uaddr); + if (!uaddr) { + spin_unlock(¤t->mm->page_table_lock); + return -EFAULT; + } + get_page(virt_to_page(uaddr)); + spin_unlock(¤t->mm->page_table_lock); + ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); + put_page(virt_to_page(uaddr)); + return ret; +} + +struct uaccess_ops uaccess_pt = { + .copy_from_user = copy_from_user_pt, + .copy_from_user_small = copy_from_user_pt, + .copy_to_user = copy_to_user_pt, + .copy_to_user_small = copy_to_user_pt, + .copy_in_user = copy_in_user_pt, + .clear_user = clear_user_pt, + .strnlen_user = strnlen_user_pt, + .strncpy_from_user = strncpy_from_user_pt, + .futex_atomic_op = futex_atomic_op_pt, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, +}; diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c new file mode 100644 index 00000000..bb1a7eed --- /dev/null +++ b/arch/s390/lib/uaccess_std.c @@ -0,0 +1,319 @@ +/* + * arch/s390/lib/uaccess_std.c + * + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <asm/futex.h> +#include "uaccess.h" + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + "10:jz 8f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + "11:jnz 1b\n" + " j 8f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + "12:"SLR" %0,%4\n" + " "ALR" %2,%4\n" + "5:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,7f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "6: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "7:"AHI" %4,-256\n" + " jnm 6b\n" + " ex %4,0(%3)\n" + " j 9f\n" + "8:"SLR" %0,%0\n" + "9: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) + EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +static size_t copy_from_user_std_check(size_t size, const void __user *ptr, + void *x) +{ + if (size <= 1024) + return copy_from_user_std(size, ptr, x); + return copy_from_user_pt(size, ptr, x); +} + +size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + "7: jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + "8: jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + "9:"SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +static size_t copy_to_user_std_check(size_t size, void __user *ptr, + const void *x) +{ + if (size <= 1024) + return copy_to_user_std(size, ptr, x); + return copy_to_user_pt(size, ptr, x); +} + +static size_t copy_in_user_std(size_t size, void __user *to, + const void __user *from) +{ + unsigned long tmp1; + + asm volatile( + " sacf 256\n" + " "AHI" %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + "0:"AHI" %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " "AHI" %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + "5: "SLR" %0,%0\n" + "6: sacf 0\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +static size_t clear_user_std(size_t size, void __user *to) +{ + unsigned long tmp1, tmp2; + + asm volatile( + " sacf 256\n" + " "AHI" %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0:"AHI" %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " "SLR" %2,%1\n" + " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " "AHI" %2,-1\n" + "1: ex %2,0(%3)\n" + " "AHI" %2,1\n" + " "SLR" %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + "5: "SLR" %0,%0\n" + "6: sacf 0\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t strnlen_user_std(size_t size, const char __user *src) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " "SLR" %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " "SLR" %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %3,0(%1)\n" + " la %4,0(%0,%1)\n" + " sacf 256\n" + "0: srst %4,%3\n" + " jo 0b\n" + " sacf 0\n" + " la %0,0(%4)\n" + " jh 1f\n" /* found \0 in string ? */ + " "AHI" %4,1\n" /* include \0 in copy */ + "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */ + " "SLR" %4,%1\n" /* %4 = copy length (including \0) */ + "2: mvcp 0(%4,%2),0(%1),%5\n" + " jz 9f\n" + "3:"AHI" %4,-256\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "4: mvcp 0(%4,%2),0(%1),%5\n" + " jnz 3b\n" + " j 9f\n" + "7: sacf 0\n" + "8:"LHI" %0,%6\n" + "9:\n" + EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b) + : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0), "K" (-EFAULT) : "cc", "memory"); + return size; +} + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 0\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) +{ + int oldval = 0, newval, ret; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + *old = oldval; + return ret; +} + +int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret; + + asm volatile( + " sacf 256\n" + "0: cs %1,%4,0(%5)\n" + "1: la %0,0\n" + "2: sacf 0\n" + EX_TABLE(0b,2b) EX_TABLE(1b,2b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + *uval = oldval; + return ret; +} + +struct uaccess_ops uaccess_std = { + .copy_from_user = copy_from_user_std_check, + .copy_from_user_small = copy_from_user_std, + .copy_to_user = copy_to_user_std_check, + .copy_to_user_small = copy_to_user_std, + .copy_in_user = copy_in_user_std, + .clear_user = clear_user_std, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op_std, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, +}; diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c new file mode 100644 index 00000000..3e05ff53 --- /dev/null +++ b/arch/s390/lib/ucmpdi2.c @@ -0,0 +1,26 @@ +#include <linux/module.h> + +union ull_union { + unsigned long long ull; + struct { + unsigned int high; + unsigned int low; + } ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + union ull_union au = {.ull = a}; + union ull_union bu = {.ull = b}; + + if (au.ui.high < bu.ui.high) + return 0; + else if (au.ui.high > bu.ui.high) + return 2; + if (au.ui.low < bu.ui.low) + return 0; + else if (au.ui.low > bu.ui.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c new file mode 100644 index 00000000..14b363fe --- /dev/null +++ b/arch/s390/lib/usercopy.c @@ -0,0 +1,8 @@ +#include <linux/module.h> +#include <linux/bug.h> + +void copy_from_user_overflow(void) +{ + WARN(1, "Buffer overflow detected!\n"); +} +EXPORT_SYMBOL(copy_from_user_overflow); |