summaryrefslogtreecommitdiff
path: root/arch/x86/um/asm
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /arch/x86/um/asm
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'arch/x86/um/asm')
-rw-r--r--arch/x86/um/asm/apic.h4
-rw-r--r--arch/x86/um/asm/arch_hweight.h6
-rw-r--r--arch/x86/um/asm/archparam.h20
-rw-r--r--arch/x86/um/asm/barrier.h75
-rw-r--r--arch/x86/um/asm/checksum.h10
-rw-r--r--arch/x86/um/asm/checksum_32.h201
-rw-r--r--arch/x86/um/asm/checksum_64.h144
-rw-r--r--arch/x86/um/asm/desc.h16
-rw-r--r--arch/x86/um/asm/elf.h221
-rw-r--r--arch/x86/um/asm/irq_vectors.h10
-rw-r--r--arch/x86/um/asm/mm_context.h72
-rw-r--r--arch/x86/um/asm/module.h23
-rw-r--r--arch/x86/um/asm/processor.h32
-rw-r--r--arch/x86/um/asm/processor_32.h56
-rw-r--r--arch/x86/um/asm/processor_64.h35
-rw-r--r--arch/x86/um/asm/ptrace.h5
-rw-r--r--arch/x86/um/asm/ptrace_32.h51
-rw-r--r--arch/x86/um/asm/ptrace_64.h72
-rw-r--r--arch/x86/um/asm/required-features.h9
-rw-r--r--arch/x86/um/asm/segment.h10
-rw-r--r--arch/x86/um/asm/vm-flags.h25
21 files changed, 1097 insertions, 0 deletions
diff --git a/arch/x86/um/asm/apic.h b/arch/x86/um/asm/apic.h
new file mode 100644
index 00000000..876dee84
--- /dev/null
+++ b/arch/x86/um/asm/apic.h
@@ -0,0 +1,4 @@
+#ifndef __UM_APIC_H
+#define __UM_APIC_H
+
+#endif
diff --git a/arch/x86/um/asm/arch_hweight.h b/arch/x86/um/asm/arch_hweight.h
new file mode 100644
index 00000000..c656cf44
--- /dev/null
+++ b/arch/x86/um/asm/arch_hweight.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_UM_HWEIGHT_H
+#define _ASM_UM_HWEIGHT_H
+
+#include <asm-generic/bitops/arch_hweight.h>
+
+#endif
diff --git a/arch/x86/um/asm/archparam.h b/arch/x86/um/asm/archparam.h
new file mode 100644
index 00000000..c17cf68d
--- /dev/null
+++ b/arch/x86/um/asm/archparam.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright 2003 PathScale, Inc.
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_ARCHPARAM_H
+#define __UM_ARCHPARAM_H
+
+#ifdef CONFIG_X86_32
+
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#endif
+
+#endif
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 00000000..7d01b8c5
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_UM_BARRIER_H_
+#define _ASM_UM_BARRIER_H_
+
+#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+
+#else /* CONFIG_X86_32 */
+
+#define mb() asm volatile("mfence" : : : "memory")
+#define rmb() asm volatile("lfence" : : : "memory")
+#define wmb() asm volatile("sfence" : : : "memory")
+
+#endif /* CONFIG_X86_32 */
+
+#define read_barrier_depends() do { } while (0)
+
+#ifdef CONFIG_SMP
+
+#define smp_mb() mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+#define smp_rmb() rmb()
+#else /* CONFIG_X86_PPRO_FENCE */
+#define smp_rmb() barrier()
+#endif /* CONFIG_X86_PPRO_FENCE */
+
+#ifdef CONFIG_X86_OOSTORE
+#define smp_wmb() wmb()
+#else /* CONFIG_X86_OOSTORE */
+#define smp_wmb() barrier()
+#endif /* CONFIG_X86_OOSTORE */
+
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#else /* CONFIG_SMP */
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif
diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
new file mode 100644
index 00000000..b6efe238
--- /dev/null
+++ b/arch/x86/um/asm/checksum.h
@@ -0,0 +1,10 @@
+#ifndef __UM_CHECKSUM_H
+#define __UM_CHECKSUM_H
+
+#ifdef CONFIG_X86_32
+# include "checksum_32.h"
+#else
+# include "checksum_64.h"
+#endif
+
+#endif
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h
new file mode 100644
index 00000000..caab7425
--- /dev/null
+++ b/arch/x86/um/asm/checksum_32.h
@@ -0,0 +1,201 @@
+/*
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_SYSDEP_CHECKSUM_H
+#define __UM_SYSDEP_CHECKSUM_H
+
+#include "linux/in6.h"
+#include "linux/string.h"
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * access_ok().
+ */
+
+static __inline__
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+static __inline__
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ if (copy_from_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return (__force __wsum)-1;
+ }
+
+ return csum_partial(dst, len, sum);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ __asm__ __volatile__(
+ "movl (%1), %0 ;\n"
+ "subl $4, %2 ;\n"
+ "jbe 2f ;\n"
+ "addl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0 ;\n"
+"1: adcl 16(%1), %0 ;\n"
+ "lea 4(%1), %1 ;\n"
+ "decl %2 ;\n"
+ "jne 1b ;\n"
+ "adcl $0, %0 ;\n"
+ "movl %0, %2 ;\n"
+ "shrl $16, %0 ;\n"
+ "addw %w2, %w0 ;\n"
+ "adcl $0, %0 ;\n"
+ "notl %0 ;\n"
+"2: ;\n"
+ /* Since the input registers which are loaded with iph and ipl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)sum;
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__(
+ "addl %1, %0 ;\n"
+ "adcl $0xffff, %0 ;\n"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000)
+ );
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ __asm__(
+ "addl %1, %0 ;\n"
+ "adcl %2, %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
+{
+ __asm__(
+ "addl 0(%1), %0 ;\n"
+ "adcl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0 ;\n"
+ "adcl 0(%2), %0 ;\n"
+ "adcl 4(%2), %0 ;\n"
+ "adcl 8(%2), %0 ;\n"
+ "adcl 12(%2), %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl %4, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=&r" (sum)
+ : "r" (saddr), "r" (daddr),
+ "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (copy_to_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return (__force __wsum)-1;
+ }
+
+ return csum_partial(src, len, sum);
+ }
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return (__force __wsum)-1; /* invalid checksum */
+}
+
+#endif
+
diff --git a/arch/x86/um/asm/checksum_64.h b/arch/x86/um/asm/checksum_64.h
new file mode 100644
index 00000000..a5be9031
--- /dev/null
+++ b/arch/x86/um/asm/checksum_64.h
@@ -0,0 +1,144 @@
+/*
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_SYSDEP_CHECKSUM_H
+#define __UM_SYSDEP_CHECKSUM_H
+
+#include "linux/string.h"
+#include "linux/in6.h"
+#include "asm/uaccess.h"
+
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * access_ok().
+ */
+
+static __inline__
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return(csum_partial(dst, len, sum));
+}
+
+static __inline__
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len, __wsum sum,
+ int *err_ptr)
+{
+ if (copy_from_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return (__force __wsum)-1;
+ }
+ return csum_partial(dst, len, sum);
+}
+
+/**
+ * csum_fold - Fold and invert a 32bit checksum.
+ * sum: 32bit unfolded sum
+ *
+ * Fold a 32bit running checksum to 16bit and invert it. This is usually
+ * the last step before putting a checksum into a packet.
+ * Make sure not to mix with 64bit checksums.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__(
+ " addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000)
+ );
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+/**
+ * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
+ * @saddr: source address
+ * @daddr: destination address
+ * @len: length of packet
+ * @proto: ip protocol of packet
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
+ * 32bit unfolded.
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ asm(" addl %1, %0\n"
+ " adcl %2, %0\n"
+ " adcl %3, %0\n"
+ " adcl $0, %0\n"
+ : "=r" (sum)
+ : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/**
+ * ip_fast_csum - Compute the IPv4 header checksum efficiently.
+ * iph: ipv4 header
+ * ihl: length of header / 4
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ asm( " movl (%1), %0\n"
+ " subl $4, %2\n"
+ " jbe 2f\n"
+ " addl 4(%1), %0\n"
+ " adcl 8(%1), %0\n"
+ " adcl 12(%1), %0\n"
+ "1: adcl 16(%1), %0\n"
+ " lea 4(%1), %1\n"
+ " decl %2\n"
+ " jne 1b\n"
+ " adcl $0, %0\n"
+ " movl %0, %2\n"
+ " shrl $16, %0\n"
+ " addw %w2, %w0\n"
+ " adcl $0, %0\n"
+ " notl %0\n"
+ "2:"
+ /* Since the input registers which are loaded with iph and ipl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)sum;
+}
+
+static inline unsigned add32_with_carry(unsigned a, unsigned b)
+{
+ asm("addl %2,%0\n\t"
+ "adcl $0,%0"
+ : "=r" (a)
+ : "0" (a), "r" (b));
+ return a;
+}
+
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif
diff --git a/arch/x86/um/asm/desc.h b/arch/x86/um/asm/desc.h
new file mode 100644
index 00000000..4ec34a51
--- /dev/null
+++ b/arch/x86/um/asm/desc.h
@@ -0,0 +1,16 @@
+#ifndef __UM_DESC_H
+#define __UM_DESC_H
+
+/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
+ * compile, and has never been used. */
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+#endif
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
new file mode 100644
index 00000000..f3b0633b
--- /dev/null
+++ b/arch/x86/um/asm/elf.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+#ifndef __UM_ELF_X86_H
+#define __UM_ELF_X86_H
+
+#include <asm/user.h>
+#include "skas.h"
+
+#ifdef CONFIG_X86_32
+
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_386
+
+#define ELF_PLAT_INIT(regs, load_addr) do { \
+ PT_REGS_EBX(regs) = 0; \
+ PT_REGS_ECX(regs) = 0; \
+ PT_REGS_EDX(regs) = 0; \
+ PT_REGS_ESI(regs) = 0; \
+ PT_REGS_EDI(regs) = 0; \
+ PT_REGS_EBP(regs) = 0; \
+ PT_REGS_EAX(regs) = 0; \
+} while (0)
+
+/* Shamelessly stolen from include/asm-i386/elf.h */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+ pr_reg[0] = PT_REGS_EBX(regs); \
+ pr_reg[1] = PT_REGS_ECX(regs); \
+ pr_reg[2] = PT_REGS_EDX(regs); \
+ pr_reg[3] = PT_REGS_ESI(regs); \
+ pr_reg[4] = PT_REGS_EDI(regs); \
+ pr_reg[5] = PT_REGS_EBP(regs); \
+ pr_reg[6] = PT_REGS_EAX(regs); \
+ pr_reg[7] = PT_REGS_DS(regs); \
+ pr_reg[8] = PT_REGS_ES(regs); \
+ /* fake once used fs and gs selectors? */ \
+ pr_reg[9] = PT_REGS_DS(regs); \
+ pr_reg[10] = PT_REGS_DS(regs); \
+ pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
+ pr_reg[12] = PT_REGS_IP(regs); \
+ pr_reg[13] = PT_REGS_CS(regs); \
+ pr_reg[14] = PT_REGS_EFLAGS(regs); \
+ pr_reg[15] = PT_REGS_SP(regs); \
+ pr_reg[16] = PT_REGS_SS(regs); \
+} while (0);
+
+extern char * elf_aux_platform;
+#define ELF_PLATFORM (elf_aux_platform)
+
+extern unsigned long vsyscall_ehdr;
+extern unsigned long vsyscall_end;
+extern unsigned long __kernel_vsyscall;
+
+/*
+ * This is the range that is readable by user mode, and things
+ * acting like user mode such as get_user_pages.
+ */
+#define FIXADDR_USER_START vsyscall_ehdr
+#define FIXADDR_USER_END vsyscall_end
+
+
+/*
+ * Architecture-neutral AT_ values in 0-17, leave some room
+ * for more of them, start the x86-specific ones at 32.
+ */
+#define AT_SYSINFO 32
+#define AT_SYSINFO_EHDR 33
+
+#define ARCH_DLINFO \
+do { \
+ if ( vsyscall_ehdr ) { \
+ NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
+ } \
+} while (0)
+
+#else
+
+/* x86-64 relocation types, taken from asm-x86_64/elf.h */
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+ offset to GOT */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+
+#define R_X86_64_NUM 16
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_X86_64
+
+#define ELF_PLAT_INIT(regs, load_addr) do { \
+ PT_REGS_RBX(regs) = 0; \
+ PT_REGS_RCX(regs) = 0; \
+ PT_REGS_RDX(regs) = 0; \
+ PT_REGS_RSI(regs) = 0; \
+ PT_REGS_RDI(regs) = 0; \
+ PT_REGS_RBP(regs) = 0; \
+ PT_REGS_RAX(regs) = 0; \
+ PT_REGS_R8(regs) = 0; \
+ PT_REGS_R9(regs) = 0; \
+ PT_REGS_R10(regs) = 0; \
+ PT_REGS_R11(regs) = 0; \
+ PT_REGS_R12(regs) = 0; \
+ PT_REGS_R13(regs) = 0; \
+ PT_REGS_R14(regs) = 0; \
+ PT_REGS_R15(regs) = 0; \
+} while (0)
+
+#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
+ (pr_reg)[0] = (_regs)->regs.gp[0]; \
+ (pr_reg)[1] = (_regs)->regs.gp[1]; \
+ (pr_reg)[2] = (_regs)->regs.gp[2]; \
+ (pr_reg)[3] = (_regs)->regs.gp[3]; \
+ (pr_reg)[4] = (_regs)->regs.gp[4]; \
+ (pr_reg)[5] = (_regs)->regs.gp[5]; \
+ (pr_reg)[6] = (_regs)->regs.gp[6]; \
+ (pr_reg)[7] = (_regs)->regs.gp[7]; \
+ (pr_reg)[8] = (_regs)->regs.gp[8]; \
+ (pr_reg)[9] = (_regs)->regs.gp[9]; \
+ (pr_reg)[10] = (_regs)->regs.gp[10]; \
+ (pr_reg)[11] = (_regs)->regs.gp[11]; \
+ (pr_reg)[12] = (_regs)->regs.gp[12]; \
+ (pr_reg)[13] = (_regs)->regs.gp[13]; \
+ (pr_reg)[14] = (_regs)->regs.gp[14]; \
+ (pr_reg)[15] = (_regs)->regs.gp[15]; \
+ (pr_reg)[16] = (_regs)->regs.gp[16]; \
+ (pr_reg)[17] = (_regs)->regs.gp[17]; \
+ (pr_reg)[18] = (_regs)->regs.gp[18]; \
+ (pr_reg)[19] = (_regs)->regs.gp[19]; \
+ (pr_reg)[20] = (_regs)->regs.gp[20]; \
+ (pr_reg)[21] = current->thread.arch.fs; \
+ (pr_reg)[22] = 0; \
+ (pr_reg)[23] = 0; \
+ (pr_reg)[24] = 0; \
+ (pr_reg)[25] = 0; \
+ (pr_reg)[26] = 0;
+
+#define ELF_PLATFORM "x86_64"
+
+/* No user-accessible fixmap addresses, i.e. vsyscall */
+#define FIXADDR_USER_START 0
+#define FIXADDR_USER_END 0
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+extern unsigned long um_vdso_addr;
+#define AT_SYSINFO_EHDR 33
+#define ARCH_DLINFO NEW_AUX_ENT(AT_SYSINFO_EHDR, um_vdso_addr)
+
+#endif
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_i387_struct elf_fpregset_t;
+
+#define task_pt_regs(t) (&(t)->thread.regs)
+
+struct task_struct;
+
+extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
+
+#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
+
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+extern long elf_aux_hwcap;
+#define ELF_HWCAP (elf_aux_hwcap)
+
+#define SET_PERSONALITY(ex) do ; while(0)
+#define __HAVE_ARCH_GATE_AREA 1
+
+#endif
diff --git a/arch/x86/um/asm/irq_vectors.h b/arch/x86/um/asm/irq_vectors.h
new file mode 100644
index 00000000..272a81e0
--- /dev/null
+++ b/arch/x86/um/asm/irq_vectors.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_IRQ_VECTORS_H
+#define __UM_IRQ_VECTORS_H
+
+#endif
+
diff --git a/arch/x86/um/asm/mm_context.h b/arch/x86/um/asm/mm_context.h
new file mode 100644
index 00000000..4a73d63e
--- /dev/null
+++ b/arch/x86/um/asm/mm_context.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
+ * Licensed under the GPL
+ *
+ * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
+ */
+
+#ifndef __ASM_LDT_H
+#define __ASM_LDT_H
+
+#include <linux/mutex.h>
+#include <asm/ldt.h>
+
+extern void ldt_host_info(void);
+
+#define LDT_PAGES_MAX \
+ ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
+#define LDT_ENTRIES_PER_PAGE \
+ (PAGE_SIZE/LDT_ENTRY_SIZE)
+#define LDT_DIRECT_ENTRIES \
+ ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
+
+struct ldt_entry {
+ __u32 a;
+ __u32 b;
+};
+
+typedef struct uml_ldt {
+ int entry_count;
+ struct mutex lock;
+ union {
+ struct ldt_entry * pages[LDT_PAGES_MAX];
+ struct ldt_entry entries[LDT_DIRECT_ENTRIES];
+ } u;
+} uml_ldt_t;
+
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ 0x7000)
+
+#define _LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+#ifdef CONFIG_X86_64
+#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
+#else
+#define LDT_empty(info) (_LDT_empty(info))
+#endif
+
+struct uml_arch_mm_context {
+ uml_ldt_t ldt;
+};
+
+#endif
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h
new file mode 100644
index 00000000..61af80e9
--- /dev/null
+++ b/arch/x86/um/asm/module.h
@@ -0,0 +1,23 @@
+#ifndef __UM_MODULE_H
+#define __UM_MODULE_H
+
+/* UML is simple */
+struct mod_arch_specific
+{
+};
+
+#ifdef CONFIG_X86_32
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+#else
+
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+
+#endif
+
+#endif
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
new file mode 100644
index 00000000..04f82e02
--- /dev/null
+++ b/arch/x86/um/asm/processor.h
@@ -0,0 +1,32 @@
+#ifndef __UM_PROCESSOR_H
+#define __UM_PROCESSOR_H
+
+/* include faultinfo structure */
+#include <sysdep/faultinfo.h>
+
+#ifdef CONFIG_X86_32
+# include "processor_32.h"
+#else
+# include "processor_64.h"
+#endif
+
+#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
+#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
+#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
+
+#define ARCH_IS_STACKGROW(address) \
+ (address + 65536 + 32 * sizeof(unsigned long) >= UPT_SP(&current->thread.regs.regs))
+
+#include <asm/user.h>
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+#define cpu_relax() rep_nop()
+
+#include <asm/processor-generic.h>
+
+#endif
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h
new file mode 100644
index 00000000..6c6689e5
--- /dev/null
+++ b/arch/x86/um/asm/processor_32.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_PROCESSOR_I386_H
+#define __UM_PROCESSOR_I386_H
+
+#include <linux/string.h>
+#include <asm/segment.h>
+#include <asm/ldt.h>
+
+extern int host_has_cmov;
+
+struct uml_tls_struct {
+ struct user_desc tls;
+ unsigned flushed:1;
+ unsigned present:1;
+};
+
+struct arch_thread {
+ struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+ unsigned long debugregs[8];
+ int debugregs_seq;
+ struct faultinfo faultinfo;
+};
+
+#define INIT_ARCH_THREAD { \
+ .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
+ { .present = 0, .flushed = 0 } }, \
+ .debugregs = { [ 0 ... 7 ] = 0 }, \
+ .debugregs_seq = 0, \
+ .faultinfo = { 0, 0, 0 } \
+}
+
+static inline void arch_flush_thread(struct arch_thread *thread)
+{
+ /* Clear any TLS still hanging */
+ memset(&thread->tls_array, 0, sizeof(thread->tls_array));
+}
+
+static inline void arch_copy_thread(struct arch_thread *from,
+ struct arch_thread *to)
+{
+ memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
+}
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter"). Stolen
+ * from asm-i386/processor.h
+ */
+#define current_text_addr() \
+ ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+#endif
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h
new file mode 100644
index 00000000..4b02a845
--- /dev/null
+++ b/arch/x86/um/asm/processor_64.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_PROCESSOR_X86_64_H
+#define __UM_PROCESSOR_X86_64_H
+
+struct arch_thread {
+ unsigned long debugregs[8];
+ int debugregs_seq;
+ unsigned long fs;
+ struct faultinfo faultinfo;
+};
+
+#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
+ .debugregs_seq = 0, \
+ .fs = 0, \
+ .faultinfo = { 0, 0, 0 } }
+
+static inline void arch_flush_thread(struct arch_thread *thread)
+{
+}
+
+static inline void arch_copy_thread(struct arch_thread *from,
+ struct arch_thread *to)
+{
+ to->fs = from->fs;
+}
+
+#define current_text_addr() \
+ ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
+
+#endif
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h
new file mode 100644
index 00000000..c8aca8c5
--- /dev/null
+++ b/arch/x86/um/asm/ptrace.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include "ptrace_32.h"
+#else
+# include "ptrace_64.h"
+#endif
diff --git a/arch/x86/um/asm/ptrace_32.h b/arch/x86/um/asm/ptrace_32.h
new file mode 100644
index 00000000..5d2a5911
--- /dev/null
+++ b/arch/x86/um/asm/ptrace_32.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_PTRACE_I386_H
+#define __UM_PTRACE_I386_H
+
+#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
+
+#include "linux/compiler.h"
+#include "asm/ptrace-generic.h"
+
+#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
+#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
+#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
+#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
+#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
+#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
+#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
+
+#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
+#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
+#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
+#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
+#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
+#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
+
+#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
+
+#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
+#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
+#define PT_FIX_EXEC_STACK(sp) do ; while(0)
+
+#define profile_pc(regs) PT_REGS_IP(regs)
+
+#define user_mode(r) UPT_IS_USER(&(r)->regs)
+
+/*
+ * Forward declaration to avoid including sysdep/tls.h, which causes a
+ * circular include, and compilation failures.
+ */
+struct user_desc;
+
+extern int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc);
+
+extern int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc);
+
+#endif
diff --git a/arch/x86/um/asm/ptrace_64.h b/arch/x86/um/asm/ptrace_64.h
new file mode 100644
index 00000000..706a0d80
--- /dev/null
+++ b/arch/x86/um/asm/ptrace_64.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_PTRACE_X86_64_H
+#define __UM_PTRACE_X86_64_H
+
+#include "linux/compiler.h"
+#include "asm/errno.h"
+
+#define __FRAME_OFFSETS /* Needed to get the R* macros */
+#include "asm/ptrace-generic.h"
+
+#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
+
+#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
+#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
+#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
+#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
+#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
+#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
+#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
+#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
+#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
+#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
+#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
+#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
+#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
+#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
+#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
+
+#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
+#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
+#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
+#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
+#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
+#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
+
+#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
+#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
+#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
+
+#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
+
+/* XXX */
+#define user_mode(r) UPT_IS_USER(&(r)->regs)
+#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
+#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
+
+#define PT_FIX_EXEC_STACK(sp) do ; while(0)
+
+#define profile_pc(regs) PT_REGS_IP(regs)
+
+struct user_desc;
+
+static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ return -ENOSYS;
+}
+
+static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ return -ENOSYS;
+}
+
+extern long arch_prctl(struct task_struct *task, int code,
+ unsigned long __user *addr);
+#endif
diff --git a/arch/x86/um/asm/required-features.h b/arch/x86/um/asm/required-features.h
new file mode 100644
index 00000000..dfb967b2
--- /dev/null
+++ b/arch/x86/um/asm/required-features.h
@@ -0,0 +1,9 @@
+#ifndef __UM_REQUIRED_FEATURES_H
+#define __UM_REQUIRED_FEATURES_H
+
+/*
+ * Nothing to see, just need something for the i386 and x86_64 asm
+ * headers to include.
+ */
+
+#endif
diff --git a/arch/x86/um/asm/segment.h b/arch/x86/um/asm/segment.h
new file mode 100644
index 00000000..45183fcd
--- /dev/null
+++ b/arch/x86/um/asm/segment.h
@@ -0,0 +1,10 @@
+#ifndef __UM_SEGMENT_H
+#define __UM_SEGMENT_H
+
+extern int host_gdt_entry_tls_min;
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#endif
diff --git a/arch/x86/um/asm/vm-flags.h b/arch/x86/um/asm/vm-flags.h
new file mode 100644
index 00000000..7c297e9e
--- /dev/null
+++ b/arch/x86/um/asm/vm-flags.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
+ * Copyright 2003 PathScale, Inc.
+ * Licensed under the GPL
+ */
+
+#ifndef __VM_FLAGS_X86_H
+#define __VM_FLAGS_X86_H
+
+#ifdef CONFIG_X86_32
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#else
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
+ VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif
+#endif