From 871480933a1c28f8a9fed4c4d34d06c439a7a422 Mon Sep 17 00:00:00 2001
From: Srikant Patnaik
Date: Sun, 11 Jan 2015 12:28:04 +0530
Subject: Moved, renamed, and deleted files

The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
---
 arch/xtensa/kernel/Makefile       |   34 +
 arch/xtensa/kernel/align.S        |  455 +++++++++
 arch/xtensa/kernel/asm-offsets.c  |  111 +++
 arch/xtensa/kernel/coprocessor.S  |  335 +++++++
 arch/xtensa/kernel/entry.S        | 1960 +++++++++++++++++++++++++++++++++++++
 arch/xtensa/kernel/head.S         |  244 +++++
 arch/xtensa/kernel/init_task.c    |   31 +
 arch/xtensa/kernel/io.c           |   75 ++
 arch/xtensa/kernel/irq.c          |  151 +++
 arch/xtensa/kernel/module.c       |  192 ++++
 arch/xtensa/kernel/pci-dma.c      |   94 ++
 arch/xtensa/kernel/pci.c          |  384 ++++++++
 arch/xtensa/kernel/platform.c     |   47 +
 arch/xtensa/kernel/process.c      |  336 +++++++
 arch/xtensa/kernel/ptrace.c       |  346 +++++++
 arch/xtensa/kernel/setup.c        |  480 +++++++++
 arch/xtensa/kernel/signal.c       |  565 +++++++++++
 arch/xtensa/kernel/syscall.c      |   57 ++
 arch/xtensa/kernel/time.c         |  117 +++
 arch/xtensa/kernel/traps.c        |  527 ++++++++++
 arch/xtensa/kernel/vectors.S      |  468 +++++++++
 arch/xtensa/kernel/vmlinux.lds.S  |  272 +++++
 arch/xtensa/kernel/xtensa_ksyms.c |   97 ++
 23 files changed, 7378 insertions(+)
 create mode 100644 arch/xtensa/kernel/Makefile
 create mode 100644 arch/xtensa/kernel/align.S
 create mode 100644 arch/xtensa/kernel/asm-offsets.c
 create mode 100644 arch/xtensa/kernel/coprocessor.S
 create mode 100644 arch/xtensa/kernel/entry.S
 create mode 100644 arch/xtensa/kernel/head.S
 create mode 100644 arch/xtensa/kernel/init_task.c
 create mode 100644 arch/xtensa/kernel/io.c
 create mode 100644 arch/xtensa/kernel/irq.c
 create mode 100644 arch/xtensa/kernel/module.c
 create mode 100644 arch/xtensa/kernel/pci-dma.c
 create mode 100644 arch/xtensa/kernel/pci.c
 create mode 100644 arch/xtensa/kernel/platform.c
 create mode 100644 arch/xtensa/kernel/process.c
 create mode 100644 arch/xtensa/kernel/ptrace.c
 create mode 100644 arch/xtensa/kernel/setup.c
 create mode 100644 arch/xtensa/kernel/signal.c
 create mode 100644 arch/xtensa/kernel/syscall.c
 create mode 100644 arch/xtensa/kernel/time.c
 create mode 100644 arch/xtensa/kernel/traps.c
 create mode 100644 arch/xtensa/kernel/vectors.S
 create mode 100644 arch/xtensa/kernel/vmlinux.lds.S
 create mode 100644 arch/xtensa/kernel/xtensa_ksyms.c

(limited to 'arch/xtensa/kernel')

diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 00000000..2d2728b3
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,34 @@
+#
+# Makefile for the Linux/Xtensa kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
+	 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o  \
+	 pci-dma.o init_task.o io.o
+
+obj-$(CONFIG_KGDB) += xtensa-stub.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+
+# In the Xtensa architecture, assembly generates literals which must always
+# precede the L32R instruction with a relative offset less than 256 kB.
+# Therefore, the .text and .literal section must be combined in parenthesis
+# in the linker script, such as: *(.literal .text).
+#
+# We need to post-process the generated vmlinux.lds scripts to convert
+# *(xxx.text) to  *(xxx.literal xxx.text) for the following text sections:
+#   .text .ref.text .*init.text .*exit.text .text.*
+#
+# Replicate rules in scripts/Makefile.build
+
+sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g'    \
+	-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
+
+quiet_cmd__cpp_lds_S = LDS     $@
+      cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+                       | sed $(sed-y) >$@
+
+$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
+	$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 00000000..33d6e9d2
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,455 @@
+/*
+ * arch/xtensa/kernel/align.S
+ *
+ * Handle unalignment exceptions in kernel space.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica, Inc.
+ *
+ * Rewritten by Chris Zankel <chris@zankel.net>
+ *
+ * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
+ */
+
+#include <linux/linkage.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
+/*  First-level exception handler for unaligned exceptions.
+ *
+ *  Note: This handler works only for kernel exceptions.  Unaligned user
+ *        access should get a seg fault.
+ */
+
+/* Big and little endian 16-bit values are located in
+ * different halves of a register.  HWORD_START helps to
+ * abstract the notion of extracting a 16-bit value from a
+ * register.
+ * We also have to define new shifting instructions because
+ * lsb and msb are on 'opposite' ends in a register for
+ * different endian machines.
+ *
+ * Assume a memory region in ascending address:
+ *   	0 1 2 3|4 5 6 7
+ *
+ * When loading one word into a register, the content of that register is:
+ *  LE	3 2 1 0, 7 6 5 4
+ *  BE  0 1 2 3, 4 5 6 7
+ *
+ * Masking the bits of the higher/lower address means:
+ *  LE  X X 0 0, 0 0 X X
+ *  BE	0 0 X X, X X 0 0
+ *
+ * Shifting to higher/lower addresses, means:
+ *  LE  shift left / shift right
+ *  BE  shift right / shift left
+ *
+ * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
+ *  LE  mask 0 0 X X / shift left
+ *  BE  shift left / mask 0 0 X X
+ */
+
+#define UNALIGNED_USER_EXCEPTION
+
+#if XCHAL_HAVE_BE
+
+#define HWORD_START	16
+#define	INSN_OP0	28
+#define	INSN_T		24
+#define	INSN_OP1	16
+
+.macro __src_b	r, w0, w1;	src	\r, \w0, \w1;	.endm
+.macro __ssa8	r;		ssa8b	\r;		.endm
+.macro __ssa8r	r;		ssa8l	\r;		.endm
+.macro __sh	r, s;		srl	\r, \s;		.endm
+.macro __sl	r, s;		sll	\r, \s;		.endm
+.macro __exth	r, s;		extui	\r, \s, 0, 16;	.endm
+.macro __extl	r, s;		slli	\r, \s, 16;	.endm
+
+#else
+
+#define HWORD_START	0
+#define	INSN_OP0	0
+#define	INSN_T		4
+#define	INSN_OP1	12
+
+.macro __src_b	r, w0, w1;	src	\r, \w1, \w0;	.endm
+.macro __ssa8	r;		ssa8l	\r;		.endm
+.macro __ssa8r	r;		ssa8b	\r;		.endm
+.macro __sh	r, s;		sll	\r, \s;		.endm
+.macro __sl	r, s;		srl	\r, \s;		.endm
+.macro __exth	r, s;		slli	\r, \s, 16;	.endm
+.macro __extl	r, s;		extui	\r, \s, 0, 16;	.endm
+
+#endif
+
+/*
+ *	xxxx xxxx = imm8 field
+ *	     yyyy = imm4 field
+ *	     ssss = s field
+ *	     tttt = t field
+ *
+ *	       		 16		    0
+ *		          -------------------
+ *	L32I.N		  yyyy ssss tttt 1000
+ *	S32I.N	          yyyy ssss tttt 1001
+ *
+ *	       23			    0
+ *		-----------------------------
+ *	res	          0000           0010
+ *	L16UI	xxxx xxxx 0001 ssss tttt 0010
+ *	L32I	xxxx xxxx 0010 ssss tttt 0010
+ *	XXX	          0011 ssss tttt 0010
+ *	XXX	          0100 ssss tttt 0010
+ *	S16I	xxxx xxxx 0101 ssss tttt 0010
+ *	S32I	xxxx xxxx 0110 ssss tttt 0010
+ *	XXX	          0111 ssss tttt 0010
+ *	XXX	          1000 ssss tttt 0010
+ *	L16SI	xxxx xxxx 1001 ssss tttt 0010
+ *	XXX	          1010           0010
+ *      **L32AI	xxxx xxxx 1011 ssss tttt 0010 unsupported
+ *	XXX	          1100           0010
+ *	XXX	          1101           0010
+ *	XXX	          1110           0010
+ *	**S32RI	xxxx xxxx 1111 ssss tttt 0010 unsupported
+ *		-----------------------------
+ *                           ^         ^    ^
+ *    sub-opcode (NIBBLE_R) -+         |    |
+ *       t field (NIBBLE_T) -----------+    |
+ *  major opcode (NIBBLE_OP0) --------------+
+ */
+
+#define OP0_L32I_N	0x8		/* load immediate narrow */
+#define OP0_S32I_N	0x9		/* store immediate narrow */
+#define OP1_SI_MASK	0x4		/* OP1 bit set for stores */
+#define OP1_SI_BIT	2		/* OP1 bit number for stores */
+
+#define OP1_L32I	0x2
+#define OP1_L16UI	0x1
+#define OP1_L16SI	0x9
+#define OP1_L32AI	0xb
+
+#define OP1_S32I	0x6
+#define OP1_S16I	0x5
+#define OP1_S32RI	0xf
+
+/*
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+
+ENTRY(fast_unaligned)
+
+	/* Note: We don't expect the address to be aligned on a word
+	 *       boundary. After all, the processor generated that exception
+	 *       and it would be a hardware fault.
+	 */
+
+	/* Save some working register */
+
+	s32i	a4, a2, PT_AREG4
+	s32i	a5, a2, PT_AREG5
+	s32i	a6, a2, PT_AREG6
+	s32i	a7, a2, PT_AREG7
+	s32i	a8, a2, PT_AREG8
+
+	rsr	a0, DEPC
+	xsr	a3, EXCSAVE_1
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+
+	/* Keep value of SAR in a0 */
+
+	rsr	a0, SAR
+	rsr	a8, EXCVADDR		# load unaligned memory address
+
+	/* Now, identify one of the following load/store instructions.
+	 *
+	 * The only possible danger of a double exception on the
+	 * following l32i instructions is kernel code in vmalloc
+	 * memory. The processor was just executing at the EPC_1
+	 * address, and indeed, already fetched the instruction.  That
+	 * guarantees a TLB mapping, which hasn't been replaced by
+	 * this unaligned exception handler that uses only static TLB
+	 * mappings. However, high-level interrupt handlers might
+	 * modify TLB entries, so for the generic case, we register a
+	 * TABLE_FIXUP handler here, too.
+	 */
+
+	/* a3...a6 saved on stack, a2 = SP */
+
+	/* Extract the instruction that caused the unaligned access. */
+
+	rsr	a7, EPC_1	# load exception address
+	movi	a3, ~3
+	and	a3, a3, a7	# mask lower bits
+
+	l32i	a4, a3, 0	# load 2 words
+	l32i	a5, a3, 4
+
+	__ssa8	a7
+	__src_b	a4, a4, a5	# a4 has the instruction
+
+	/* Analyze the instruction (load or store?). */
+
+	extui	a5, a4, INSN_OP0, 4	# get insn.op0 nibble
+
+#if XCHAL_HAVE_DENSITY
+	_beqi	a5, OP0_L32I_N, .Lload	# L32I.N, jump
+	addi	a6, a5, -OP0_S32I_N
+	_beqz	a6, .Lstore		# S32I.N, do a store
+#endif
+	/* 'store indicator bit' not set, jump */
+	_bbci.l	a4, OP1_SI_BIT + INSN_OP1, .Lload
+
+	/* Store: Jump to table entry to get the value in the source register.*/
+
+.Lstore:movi	a5, .Lstore_table	# table
+	extui	a6, a4, INSN_T, 4	# get source register
+	addx8	a5, a6, a5
+	jx	a5			# jump into table
+
+	/* Invalid instruction, CRITICAL! */
+.Linvalid_instruction_load:
+	j	.Linvalid_instruction
+
+	/* Load: Load memory address. */
+
+.Lload: movi	a3, ~3
+	and	a3, a3, a8		# align memory address
+
+	__ssa8	a8
+#ifdef UNALIGNED_USER_EXCEPTION
+	addi	a3, a3, 8
+	l32e	a5, a3, -8
+	l32e	a6, a3, -4
+#else
+	l32i	a5, a3, 0
+	l32i	a6, a3, 4
+#endif
+	__src_b	a3, a5, a6		# a3 has the data word
+
+#if XCHAL_HAVE_DENSITY
+	addi	a7, a7, 2		# increment PC (assume 16-bit insn)
+
+	extui	a5, a4, INSN_OP0, 4
+	_beqi	a5, OP0_L32I_N, 1f	# l32i.n: jump
+
+	addi	a7, a7, 1
+#else
+	addi	a7, a7, 3
+#endif
+
+	extui	a5, a4, INSN_OP1, 4
+	_beqi	a5, OP1_L32I, 1f	# l32i: jump
+
+	extui	a3, a3, 0, 16		# extract lower 16 bits
+	_beqi	a5, OP1_L16UI, 1f
+	addi	a5, a5, -OP1_L16SI
+	_bnez	a5, .Linvalid_instruction_load
+
+	/* sign extend value */
+
+	slli	a3, a3, 16
+	srai	a3, a3, 16
+
+	/* Set target register. */
+
+1:
+
+#if XCHAL_HAVE_LOOPS
+	rsr	a5, LEND		# check if we reached LEND
+	bne	a7, a5, 1f
+	rsr	a5, LCOUNT		# and LCOUNT != 0
+	beqz	a5, 1f
+	addi	a5, a5, -1		# decrement LCOUNT and set
+	rsr	a7, LBEG		# set PC to LBEGIN
+	wsr	a5, LCOUNT
+#endif
+
+1:	wsr	a7, EPC_1		# skip load instruction
+	extui	a4, a4, INSN_T, 4	# extract target register
+	movi	a5, .Lload_table
+	addx8	a4, a4, a5
+	jx	a4			# jump to entry for target register
+
+	.align	8
+.Lload_table:
+	s32i	a3, a2, PT_AREG0;	_j .Lexit;	.align 8
+	mov	a1, a3;			_j .Lexit;	.align 8 # fishy??
+	s32i	a3, a2, PT_AREG2;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG3;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG4;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG5;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG6;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG7;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG8;	_j .Lexit;	.align 8
+	mov	a9, a3		;	_j .Lexit;	.align 8
+	mov	a10, a3		;	_j .Lexit;	.align 8
+	mov	a11, a3		;	_j .Lexit;	.align 8
+	mov	a12, a3		;	_j .Lexit;	.align 8
+	mov	a13, a3		;	_j .Lexit;	.align 8
+	mov	a14, a3		;	_j .Lexit;	.align 8
+	mov	a15, a3		;	_j .Lexit;	.align 8
+
+.Lstore_table:
+	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
+	mov	a3, a1;			_j 1f;	.align 8	# fishy??
+	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG5;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG6;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG7;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG8;	_j 1f;	.align 8
+	mov	a3, a9		;	_j 1f;	.align 8
+	mov	a3, a10		;	_j 1f;	.align 8
+	mov	a3, a11		;	_j 1f;	.align 8
+	mov	a3, a12		;	_j 1f;	.align 8
+	mov	a3, a13		;	_j 1f;	.align 8
+	mov	a3, a14		;	_j 1f;	.align 8
+	mov	a3, a15		;	_j 1f;	.align 8
+
+1: 	# a7: instruction pointer, a4: instruction, a3: value
+
+	movi	a6, 0			# mask: ffffffff:00000000
+
+#if XCHAL_HAVE_DENSITY
+	addi	a7, a7, 2		# incr. PC,assume 16-bit instruction
+
+	extui	a5, a4, INSN_OP0, 4	# extract OP0
+	addi	a5, a5, -OP0_S32I_N
+	_beqz	a5, 1f			# s32i.n: jump
+
+	addi	a7, a7, 1		# increment PC, 32-bit instruction
+#else
+	addi	a7, a7, 3		# increment PC, 32-bit instruction
+#endif
+
+	extui	a5, a4, INSN_OP1, 4	# extract OP1
+	_beqi	a5, OP1_S32I, 1f	# jump if 32 bit store
+	_bnei	a5, OP1_S16I, .Linvalid_instruction_store
+
+	movi	a5, -1
+	__extl	a3, a3			# get 16-bit value
+	__exth	a6, a5			# get 16-bit mask ffffffff:ffff0000
+
+	/* Get memory address */
+
+1:
+#if XCHAL_HAVE_LOOPS
+	rsr	a4, LEND		# check if we reached LEND
+	bne	a7, a4, 1f
+	rsr	a4, LCOUNT		# and LCOUNT != 0
+	beqz	a4, 1f
+	addi	a4, a4, -1		# decrement LCOUNT and set
+	rsr	a7, LBEG		# set PC to LBEGIN
+	wsr	a4, LCOUNT
+#endif
+
+1:	wsr	a7, EPC_1		# skip store instruction
+	movi	a4, ~3
+	and	a4, a4, a8		# align memory address
+
+	/* Insert value into memory */
+
+	movi	a5, -1			# mask: ffffffff:XXXX0000
+#ifdef UNALIGNED_USER_EXCEPTION
+	addi	a4, a4, 8
+#endif
+
+	__ssa8r a8
+	__src_b	a7, a5, a6		# lo-mask  F..F0..0 (BE) 0..0F..F (LE)
+	__src_b	a6, a6, a5		# hi-mask  0..0F..F (BE) F..F0..0 (LE)
+#ifdef UNALIGNED_USER_EXCEPTION
+	l32e	a5, a4, -8
+#else
+	l32i	a5, a4, 0		# load lower address word
+#endif
+	and	a5, a5, a7		# mask
+	__sh	a7, a3 			# shift value
+	or	a5, a5, a7		# or with original value
+#ifdef UNALIGNED_USER_EXCEPTION
+	s32e	a5, a4, -8
+	l32e	a7, a4, -4
+#else
+	s32i	a5, a4, 0		# store
+	l32i	a7, a4, 4		# same for upper address word
+#endif
+	__sl	a5, a3
+	and	a6, a7, a6
+	or	a6, a6, a5
+#ifdef UNALIGNED_USER_EXCEPTION
+	s32e	a6, a4, -4
+#else
+	s32i	a6, a4, 4
+#endif
+
+	/* Done. restore stack and return */
+
+.Lexit:
+	movi	a4, 0
+	rsr	a3, EXCSAVE_1
+	s32i	a4, a3, EXC_TABLE_FIXUP
+
+	/* Restore working register */
+
+	l32i	a8, a2, PT_AREG8
+	l32i	a7, a2, PT_AREG7
+	l32i	a6, a2, PT_AREG6
+	l32i	a5, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	l32i	a3, a2, PT_AREG3
+
+	/* restore SAR and return */
+
+	wsr	a0, SAR
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_AREG2
+	rfe
+
+	/* We cannot handle this exception. */
+
+	.extern _kernel_exception
+.Linvalid_instruction_store:
+.Linvalid_instruction:
+
+	/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+
+	l32i	a8, a2, PT_AREG8
+	l32i	a7, a2, PT_AREG7
+	l32i	a6, a2, PT_AREG6
+	l32i	a5, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	wsr	a0, SAR
+	mov	a1, a2
+
+	rsr	a0, PS
+        bbsi.l  a2, PS_UM_BIT, 1f     # jump if user mode
+
+	movi	a0, _kernel_exception
+	jx	a0
+
+1:	movi	a0, _user_exception
+	jx	a0
+
+
+#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 00000000..7dc3f915
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,111 @@
+/*
+ * arch/xtensa/kernel/asm-offsets.c
+ *
+ * Generates definitions from c-type structures used by assembly sources.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+int main(void)
+{
+	/* struct pt_regs */
+	DEFINE(PT_PC, offsetof (struct pt_regs, pc));
+	DEFINE(PT_PS, offsetof (struct pt_regs, ps));
+	DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
+	DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
+	DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
+	DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
+	DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
+	DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
+	DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
+	DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
+	DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+	DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
+	DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+	DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
+	DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
+	DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
+	DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
+	DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
+	DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
+	DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
+	DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
+	DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
+	DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
+	DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
+	DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
+	DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
+	DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
+	DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
+	DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
+	DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
+	DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
+	DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
+	DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+	DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+	DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
+
+	/* struct task_struct */
+	DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
+	DEFINE(TASK_MM, offsetof (struct task_struct, mm));
+	DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
+	DEFINE(TASK_PID, offsetof (struct task_struct, pid));
+	DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
+	DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+	DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+
+	/* struct thread_info (offset from start_struct) */
+	DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+	DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+	DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XTENSA_HAVE_COPROCESSORS
+	DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
+	DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+#endif
+	DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+	DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
+	DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
+
+	/* struct mm_struct */
+	DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
+	DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
+	DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
+
+	/* struct page */
+	DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
+
+	/* constants */
+	DEFINE(_CLONE_VM, CLONE_VM);
+	DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
+	DEFINE(PG_ARCH_1, PG_arch_1);
+
+	return 0;
+}
+
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 00000000..2bc1e145
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,335 @@
+/*
+ * arch/xtensa/kernel/coprocessor.S
+ *
+ * Xtensa processor configuration-specific table of coprocessor and
+ * other custom register layout information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+/* IO protection is currently unsupported. */
+
+ENTRY(fast_io_protect)
+	wsr	a0, EXCSAVE_1
+	movi	a0, unrecoverable_exception
+	callx0	a0
+
+#if XTENSA_HAVE_COPROCESSORS
+
+/*
+ * Macros for lazy context switch. 
+ */
+
+#define SAVE_CP_REGS(x)							\
+	.align 4;							\
+	.Lsave_cp_regs_cp##x:						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		xchal_cp##x##_store a2 a4 a5 a6 a7;			\
+	.endif;								\
+	jx	a0
+
+#define SAVE_CP_REGS_TAB(x)						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table;	\
+	.else;								\
+		.long 0;						\
+	.endif;								\
+	.long THREAD_XTREGS_CP##x
+
+
+#define LOAD_CP_REGS(x)							\
+	.align 4;							\
+	.Lload_cp_regs_cp##x:						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		xchal_cp##x##_load a2 a4 a5 a6 a7;			\
+	.endif;								\
+	jx	a0
+
+#define LOAD_CP_REGS_TAB(x)						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		.long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
+	.else;								\
+		.long 0;						\
+	.endif;								\
+	.long THREAD_XTREGS_CP##x
+
+	SAVE_CP_REGS(0)
+	SAVE_CP_REGS(1)
+	SAVE_CP_REGS(2)
+	SAVE_CP_REGS(3)
+	SAVE_CP_REGS(4)
+	SAVE_CP_REGS(5)
+	SAVE_CP_REGS(6)
+	SAVE_CP_REGS(7)
+
+	LOAD_CP_REGS(0)
+	LOAD_CP_REGS(1)
+	LOAD_CP_REGS(2)
+	LOAD_CP_REGS(3)
+	LOAD_CP_REGS(4)
+	LOAD_CP_REGS(5)
+	LOAD_CP_REGS(6)
+	LOAD_CP_REGS(7)
+
+	.align 4
+.Lsave_cp_regs_jump_table:
+	SAVE_CP_REGS_TAB(0)
+	SAVE_CP_REGS_TAB(1)
+	SAVE_CP_REGS_TAB(2)
+	SAVE_CP_REGS_TAB(3)
+	SAVE_CP_REGS_TAB(4)
+	SAVE_CP_REGS_TAB(5)
+	SAVE_CP_REGS_TAB(6)
+	SAVE_CP_REGS_TAB(7)
+
+.Lload_cp_regs_jump_table:
+	LOAD_CP_REGS_TAB(0)
+	LOAD_CP_REGS_TAB(1)
+	LOAD_CP_REGS_TAB(2)
+	LOAD_CP_REGS_TAB(3)
+	LOAD_CP_REGS_TAB(4)
+	LOAD_CP_REGS_TAB(5)
+	LOAD_CP_REGS_TAB(6)
+	LOAD_CP_REGS_TAB(7)
+
+/*
+ * coprocessor_save(buffer, index) 
+ *                    a2      a3
+ * coprocessor_load(buffer, index)
+ *                    a2      a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'. 
+ * The register values are saved to or loaded from them 'buffer' address.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_save)
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lsave_cp_regs_jump_table
+	addx8	a3, a3, a0
+	l32i	a3, a3, 0
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENTRY(coprocessor_load)
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lload_cp_regs_jump_table
+	addx4	a3, a3, a0
+	l32i	a3, a3, 0
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+/*
+ * coprocessor_flush(struct task_info*, index) 
+ *                             a2        a3
+ * coprocessor_restore(struct task_info*, index)
+ *                              a2         a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'. 
+ * The register values are saved to or loaded from the coprocessor area 
+ * inside the task_info structure.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+
+ENTRY(coprocessor_flush)
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lsave_cp_regs_jump_table
+	addx8	a3, a3, a0
+	l32i	a4, a3, 4
+	l32i	a3, a3, 0
+	add	a2, a2, a4
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENTRY(coprocessor_restore)
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lload_cp_regs_jump_table
+	addx4	a3, a3, a0
+	l32i	a4, a3, 4
+	l32i	a3, a3, 0
+	add	a2, a2, a4
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+/*
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_coprocessor_double)
+	wsr	a0, EXCSAVE_1
+	movi	a0, unrecoverable_exception
+	callx0	a0
+
+
+ENTRY(fast_coprocessor)
+
+	/* Save remaining registers a1-a3 and SAR */
+
+	xsr	a3, EXCSAVE_1
+	s32i	a3, a2, PT_AREG3
+	rsr	a3, SAR
+	s32i	a1, a2, PT_AREG1
+	s32i	a3, a2, PT_SAR
+	mov	a1, a2
+	rsr	a2, DEPC
+	s32i	a2, a1, PT_AREG2
+
+	/*
+	 * The hal macros require up to 4 temporary registers. We use a3..a6.
+	 */
+
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+
+	/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+	rsr	a3, EXCCAUSE
+	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+	/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+	ssl	a3			# SAR: 32 - coprocessor_number
+	movi	a2, 1
+	rsr	a0, CPENABLE
+	sll	a2, a2
+	or	a0, a0, a2
+	wsr	a0, CPENABLE
+	rsync
+
+	/* Retrieve previous owner. (a3 still holds CP number) */
+
+	movi	a0, coprocessor_owner	# list of owners
+	addx4	a0, a3, a0		# entry for CP
+	l32i	a4, a0, 0
+
+	beqz	a4, 1f			# skip 'save' if no previous owner
+
+	/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+	l32i	a5, a4, THREAD_CPENABLE
+	xor	a5, a5, a2		# (1 << cp-id) still in a2
+	s32i	a5, a4, THREAD_CPENABLE
+
+	/*
+	 * Get context save area and 'call' save routine. 
+	 * (a4 still holds previous owner (thread_info), a3 CP number)
+	 */
+
+	movi	a5, .Lsave_cp_regs_jump_table
+	movi	a0, 2f			# a0: 'return' address
+	addx8	a3, a3, a5		# a3: coprocessor number
+	l32i	a2, a3, 4		# a2: xtregs offset
+	l32i	a3, a3, 0		# a3: jump offset
+	add	a2, a2, a4
+	add	a4, a3, a5		# a4: address of save routine
+	jx	a4
+
+	/* Note that only a0 and a1 were preserved. */
+
+2:	rsr	a3, EXCCAUSE
+	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+	movi	a0, coprocessor_owner
+	addx4	a0, a3, a0
+
+	/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+
+1:	GET_THREAD_INFO (a4, a1)
+	s32i	a4, a0, 0
+
+	/* Get context save area and 'call' load routine. */
+
+	movi	a5, .Lload_cp_regs_jump_table
+	movi	a0, 1f
+	addx8	a3, a3, a5
+	l32i	a2, a3, 4		# a2: xtregs offset
+	l32i	a3, a3, 0		# a3: jump offset
+	add	a2, a2, a4
+	add	a4, a3, a5
+	jx	a4
+
+	/* Restore all registers and return from exception handler. */
+
+1:	l32i	a6, a1, PT_AREG6
+	l32i	a5, a1, PT_AREG5
+	l32i	a4, a1, PT_AREG4
+
+	l32i	a0, a1, PT_SAR
+	l32i	a3, a1, PT_AREG3
+	l32i	a2, a1, PT_AREG2
+	wsr	a0, SAR
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+
+	rfe
+
+	.data
+ENTRY(coprocessor_owner)
+	.fill XCHAL_CP_MAX, 4, 0
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
+
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 00000000..6223f334
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,1960 @@
+/*
+ * arch/xtensa/kernel/entry.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+#include <variant/tie-asm.h>
+
+/* Unimplemented features. */
+
+#undef KERNEL_STACK_OVERFLOW_CHECK
+#undef PREEMPTIBLE_KERNEL
+#undef ALLOCA_EXCEPTION_IN_IRAM
+
+/* Not well tested.
+ *
+ * - fast_coprocessor
+ */
+
+/*
+ * Macro to find first bit set in WINDOWBASE from the left + 1
+ *
+ * 100....0 -> 1
+ * 010....0 -> 2
+ * 000....1 -> WSBITS
+ */
+
+	.macro ffs_ws bit mask
+
+#if XCHAL_HAVE_NSA
+	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
+	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
+#else
+	movi    \bit, WSBITS
+#if WSBITS > 16
+	_bltui  \mask, 0x10000, 99f
+	addi    \bit, \bit, -16
+	extui   \mask, \mask, 16, 16
+#endif
+#if WSBITS > 8
+99:	_bltui  \mask, 0x100, 99f
+	addi    \bit, \bit, -8
+	srli    \mask, \mask, 8
+#endif
+99:	_bltui  \mask, 0x10, 99f
+	addi    \bit, \bit, -4
+	srli    \mask, \mask, 4
+99:	_bltui  \mask, 0x4, 99f
+	addi    \bit, \bit, -2
+	srli    \mask, \mask, 2
+99:	_bltui  \mask, 0x2, 99f
+	addi    \bit, \bit, -1
+99:
+
+#endif
+	.endm
+
+/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
+
+/*
+ * First-level exception handler for user exceptions.
+ * Save some special registers, extra states and all registers in the AR
+ * register file that were in use in the user task, and jump to the common
+ * exception code.
+ * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
+ * save them for kernel exceptions).
+ *
+ * Entry condition for user_exception:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original value in depc
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _user_exception:
+ *
+ *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ *   excsave has been restored, and
+ *   stack pointer (a1) has been set.
+ *
+ * Note: _user_exception might be at an odd address. Don't use call0..call12
+ */
+
+ENTRY(user_exception)
+
+	/* Save a2, a3, and depc, restore excsave_1 and set SP. */
+
+	xsr	a3, EXCSAVE_1
+	rsr	a0, DEPC
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	mov	a1, a2
+
+	.globl _user_exception
+_user_exception:
+
+	/* Save SAR and turn off single stepping */
+
+	movi	a2, 0
+	rsr	a3, SAR
+	xsr	a2, ICOUNTLEVEL
+	s32i	a3, a1, PT_SAR
+	s32i	a2, a1, PT_ICOUNTLEVEL
+
+	/* Rotate ws so that the current windowbase is at bit0. */
+	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+	rsr	a2, WINDOWBASE
+	rsr	a3, WINDOWSTART
+	ssr	a2
+	s32i	a2, a1, PT_WINDOWBASE
+	s32i	a3, a1, PT_WINDOWSTART
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2
+	srli	a2, a2, 32-WSBITS
+	s32i	a2, a1, PT_WMASK	# needed for restoring registers
+
+	/* Save only live registers. */
+
+	_bbsi.l	a2, 1, 1f
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+	s32i	a7, a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	s32i	a8, a1, PT_AREG8
+	s32i	a9, a1, PT_AREG9
+	s32i	a10, a1, PT_AREG10
+	s32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	s32i	a12, a1, PT_AREG12
+	s32i	a13, a1, PT_AREG13
+	s32i	a14, a1, PT_AREG14
+	s32i	a15, a1, PT_AREG15
+	_bnei	a2, 1, 1f		# only one valid frame?
+
+	/* Only one valid frame, skip saving regs. */
+
+	j	2f
+
+	/* Save the remaining registers.
+	 * We have to save all registers up to the first '1' from
+	 * the right, except the current frame (bit 0).
+	 * Assume a2 is:  001001000110001
+	 * All register frames starting from the top field to the marked '1'
+	 * must be saved.
+	 */
+
+1:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
+	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
+	and	a3, a3, a2		# max. only one bit is set
+
+	/* Find number of frames to save */
+
+	ffs_ws	a0, a3			# number of frames to the '1' from left
+
+	/* Store information into WMASK:
+	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
+	 * bits 4...: number of valid 4-register frames
+	 */
+
+	slli	a3, a0, 4		# number of frames to save in bits 8..4
+	extui	a2, a2, 0, 4		# mask for the first 16 registers
+	or	a2, a3, a2
+	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
+
+	/* Save 4 registers at a time */
+
+1:	rotw	-1
+	s32i	a0, a5, PT_AREG_END - 16
+	s32i	a1, a5, PT_AREG_END - 12
+	s32i	a2, a5, PT_AREG_END - 8
+	s32i	a3, a5, PT_AREG_END - 4
+	addi	a0, a4, -1
+	addi	a1, a5, -16
+	_bnez	a0, 1b
+
+	/* WINDOWBASE still in SAR! */
+
+	rsr	a2, SAR			# original WINDOWBASE
+	movi	a3, 1
+	ssl	a2
+	sll	a3, a3
+	wsr	a3, WINDOWSTART		# set corresponding WINDOWSTART bit
+	wsr	a2, WINDOWBASE		# and WINDOWSTART
+	rsync
+
+	/* We are back to the original stack pointer (a1) */
+
+2:	/* Now, jump to the common exception handler. */
+
+	j	common_exception
+
+
+/*
+ * First-level exit handler for kernel exceptions
+ * Save special registers and the live window frame.
+ * Note: Even though we changes the stack pointer, we don't have to do a
+ *	 MOVSP here, as we do that when we return from the exception.
+ *	 (See comment in the kernel exception exit code)
+ *
+ * Entry condition for kernel_exception:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _kernel_exception:
+ *
+ *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ *   excsave has been restored, and
+ *   stack pointer (a1) has been set.
+ *
+ * Note: _kernel_exception might be at an odd address. Don't use call0..call12
+ */
+
+ENTRY(kernel_exception)
+
+	/* Save a0, a2, a3, DEPC and set SP. */
+
+	xsr	a3, EXCSAVE_1		# restore a3, excsave_1
+	rsr	a0, DEPC		# get a2
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	mov	a1, a2
+
+	.globl _kernel_exception
+_kernel_exception:
+
+	/* Save SAR and turn off single stepping */
+
+	movi	a2, 0
+	rsr	a3, SAR
+	xsr	a2, ICOUNTLEVEL
+	s32i	a3, a1, PT_SAR
+	s32i	a2, a1, PT_ICOUNTLEVEL
+
+	/* Rotate ws so that the current windowbase is at bit0. */
+	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+	rsr	a2, WINDOWBASE		# don't need to save these, we only
+	rsr	a3, WINDOWSTART		# need shifted windowstart: windowmask
+	ssr	a2
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2
+	srli	a2, a2, 32-WSBITS
+	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
+
+	/* Save only the live window-frame */
+
+	_bbsi.l	a2, 1, 1f
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+	s32i	a7, a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	s32i	a8, a1, PT_AREG8
+	s32i	a9, a1, PT_AREG9
+	s32i	a10, a1, PT_AREG10
+	s32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	s32i	a12, a1, PT_AREG12
+	s32i	a13, a1, PT_AREG13
+	s32i	a14, a1, PT_AREG14
+	s32i	a15, a1, PT_AREG15
+
+1:
+
+#ifdef KERNEL_STACK_OVERFLOW_CHECK
+
+	/*  Stack overflow check, for debugging  */
+	extui	a2, a1, TASK_SIZE_BITS,XX
+	movi	a3, SIZE??
+	_bge	a2, a3, out_of_stack_panic
+
+#endif
+
+/*
+ * This is the common exception handler.
+ * We get here from the user exception handler or simply by falling through
+ * from the kernel exception handler.
+ * Save the remaining special registers, switch to kernel mode, and jump
+ * to the second-level exception handler.
+ *
+ */
+
+common_exception:
+
+	/* Save some registers, disable loops and clear the syscall flag. */
+
+	rsr	a2, DEBUGCAUSE
+	rsr	a3, EPC_1
+	s32i	a2, a1, PT_DEBUGCAUSE
+	s32i	a3, a1, PT_PC
+
+	movi	a2, -1
+	rsr	a3, EXCVADDR
+	s32i	a2, a1, PT_SYSCALL
+	movi	a2, 0
+	s32i	a3, a1, PT_EXCVADDR
+	xsr	a2, LCOUNT
+	s32i	a2, a1, PT_LCOUNT
+
+	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+	rsr	a0, EXCCAUSE
+	movi	a3, 0
+	rsr	a2, EXCSAVE_1
+	s32i	a0, a1, PT_EXCCAUSE
+	s32i	a3, a2, EXC_TABLE_FIXUP
+
+	/* All unrecoverable states are saved on stack, now, and a1 is valid,
+	 * so we can allow exceptions and interrupts (*) again.
+	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+	 *
+	 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
+	 *     (interrupts disabled) and if this exception is not an interrupt.
+	 */
+
+	rsr	a3, PS
+	addi	a0, a0, -4
+	movi	a2, 1
+	extui	a3, a3, 0, 1		# a3 = PS.INTLEVEL[0]
+	moveqz	a3, a2, a0		# a3 = 1 iff interrupt exception
+	movi	a2, 1 << PS_WOE_BIT
+	or	a3, a3, a2
+	rsr	a0, EXCCAUSE
+	xsr	a3, PS
+
+	s32i	a3, a1, PT_PS		# save ps
+
+	/* Save LBEG, LEND */
+
+	rsr	a2, LBEG
+	rsr	a3, LEND
+	s32i	a2, a1, PT_LBEG
+	s32i	a3, a1, PT_LEND
+
+	/* Save optional registers. */
+
+	save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+	
+	/* Go to second-level dispatcher. Set up parameters to pass to the
+	 * exception handler and call the exception handler.
+	 */
+
+	movi	a4, exc_table
+	mov	a6, a1			# pass stack frame
+	mov	a7, a0			# pass EXCCAUSE
+	addx4	a4, a0, a4
+	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
+
+	/* Call the second-level handler */
+
+	callx4	a4
+
+	/* Jump here for exception exit */
+
+common_exception_return:
+
+	/* Jump if we are returning from kernel exceptions. */
+
+1:	l32i	a3, a1, PT_PS
+	_bbci.l	a3, PS_UM_BIT, 4f
+
+	/* Specific to a user exception exit:
+	 * We need to check some flags for signal handling and rescheduling,
+	 * and have to restore WB and WS, extra states, and all registers
+	 * in the register file that were in use in the user task.
+	 * Note that we don't disable interrupts here. 
+	 */
+
+	GET_THREAD_INFO(a2,a1)
+	l32i	a4, a2, TI_FLAGS
+
+	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
+	_bbci.l	a4, TIF_SIGPENDING, 4f
+
+	l32i	a4, a1, PT_DEPC
+	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+
+	/* Call do_signal() */
+
+	movi	a4, do_signal	# int do_signal(struct pt_regs*, sigset_t*)
+	mov	a6, a1
+	movi	a7, 0
+	callx4	a4
+	j	1b
+
+3:	/* Reschedule */
+
+	movi	a4, schedule	# void schedule (void)
+	callx4	a4
+	j	1b
+
+4:	/* Restore optional registers. */
+
+	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+
+	wsr	a3, PS		/* disable interrupts */
+
+	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
+
+user_exception_exit:
+
+	/* Restore the state of the task and return from the exception. */
+
+	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
+
+	l32i	a2, a1, PT_WINDOWBASE
+	l32i	a3, a1, PT_WINDOWSTART
+	wsr	a1, DEPC		# use DEPC as temp storage
+	wsr	a3, WINDOWSTART		# restore WINDOWSTART
+	ssr	a2			# preserve user's WB in the SAR
+	wsr	a2, WINDOWBASE		# switch to user's saved WB
+	rsync
+	rsr	a1, DEPC		# restore stack pointer
+	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
+	rotw	-1			# we restore a4..a7
+	_bltui	a6, 16, 1f		# only have to restore current window?
+
+	/* The working registers are a0 and a3.  We are restoring to
+	 * a4..a7.  Be careful not to destroy what we have just restored.
+	 * Note: wmask has the format YYYYM:
+	 *       Y: number of registers saved in groups of 4
+	 *       M: 4 bit mask of first 16 registers
+	 */
+
+	mov	a2, a6
+	mov	a3, a5
+
+2:	rotw	-1			# a0..a3 become a4..a7
+	addi	a3, a7, -4*4		# next iteration
+	addi	a2, a6, -16		# decrementing Y in WMASK
+	l32i	a4, a3, PT_AREG_END + 0
+	l32i	a5, a3, PT_AREG_END + 4
+	l32i	a6, a3, PT_AREG_END + 8
+	l32i	a7, a3, PT_AREG_END + 12
+	_bgeui	a2, 16, 2b
+
+	/* Clear unrestored registers (don't leak anything to user-land */
+
+1:	rsr	a0, WINDOWBASE
+	rsr	a3, SAR
+	sub	a3, a0, a3
+	beqz	a3, 2f
+	extui	a3, a3, 0, WBBITS
+
+1:	rotw	-1
+	addi	a3, a7, -1
+	movi	a4, 0
+	movi	a5, 0
+	movi	a6, 0
+	movi	a7, 0
+	bgei	a3, 1, 1b
+
+	/* We are back were we were when we started.
+	 * Note: a2 still contains WMASK (if we've returned to the original
+	 *	 frame where we had loaded a2), or at least the lower 4 bits
+	 *	 (if we have restored WSBITS-1 frames).
+	 */
+
+2:	j	common_exception_exit
+
+	/* This is the kernel exception exit.
+	 * We avoided to do a MOVSP when we entered the exception, but we
+	 * have to do it here.
+	 */
+
+kernel_exception_exit:
+
+#ifdef PREEMPTIBLE_KERNEL
+
+#ifdef CONFIG_PREEMPT
+
+	/*
+	 * Note: We've just returned from a call4, so we have
+	 * at least 4 addt'l regs.
+	 */
+
+	/* Check current_thread_info->preempt_count */
+
+	GET_THREAD_INFO(a2)
+	l32i	a3, a2, TI_PREEMPT
+	bnez	a3, 1f
+
+	l32i	a2, a2, TI_FLAGS
+
+1:
+
+#endif
+
+#endif
+
+	/* Check if we have to do a movsp.
+	 *
+	 * We only have to do a movsp if the previous window-frame has
+	 * been spilled to the *temporary* exception stack instead of the
+	 * task's stack. This is the case if the corresponding bit in
+	 * WINDOWSTART for the previous window-frame was set before
+	 * (not spilled) but is zero now (spilled).
+	 * If this bit is zero, all other bits except the one for the
+	 * current window frame are also zero. So, we can use a simple test:
+	 * 'and' WINDOWSTART and WINDOWSTART-1:
+	 *
+	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
+	 *
+	 * The result is zero only if one bit was set.
+	 *
+	 * (Note: We might have gone through several task switches before
+	 *        we come back to the current task, so WINDOWBASE might be
+	 *        different from the time the exception occurred.)
+	 */
+
+	/* Test WINDOWSTART before and after the exception.
+	 * We actually have WMASK, so we only have to test if it is 1 or not.
+	 */
+
+	l32i	a2, a1, PT_WMASK
+	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
+
+	/* Test WINDOWSTART now. If spilled, do the movsp */
+
+	rsr     a3, WINDOWSTART
+	addi	a0, a3, -1
+	and     a3, a3, a0
+	_bnez	a3, common_exception_exit
+
+	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
+
+	addi    a0, a1, -16
+	l32i    a3, a0, 0
+	l32i    a4, a0, 4
+	s32i    a3, a1, PT_SIZE+0
+	s32i    a4, a1, PT_SIZE+4
+	l32i    a3, a0, 8
+	l32i    a4, a0, 12
+	s32i    a3, a1, PT_SIZE+8
+	s32i    a4, a1, PT_SIZE+12
+
+	/* Common exception exit.
+	 * We restore the special register and the current window frame, and
+	 * return from the exception.
+	 *
+	 * Note: We expect a2 to hold PT_WMASK
+	 */
+
+common_exception_exit:
+
+	/* Restore address registers. */
+
+	_bbsi.l	a2, 1, 1f
+	l32i	a4,  a1, PT_AREG4
+	l32i	a5,  a1, PT_AREG5
+	l32i	a6,  a1, PT_AREG6
+	l32i	a7,  a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	l32i	a8,  a1, PT_AREG8
+	l32i	a9,  a1, PT_AREG9
+	l32i	a10, a1, PT_AREG10
+	l32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	l32i	a12, a1, PT_AREG12
+	l32i	a13, a1, PT_AREG13
+	l32i	a14, a1, PT_AREG14
+	l32i	a15, a1, PT_AREG15
+
+	/* Restore PC, SAR */
+
+1:	l32i	a2, a1, PT_PC
+	l32i	a3, a1, PT_SAR
+	wsr	a2, EPC_1
+	wsr	a3, SAR
+
+	/* Restore LBEG, LEND, LCOUNT */
+
+	l32i	a2, a1, PT_LBEG
+	l32i	a3, a1, PT_LEND
+	wsr	a2, LBEG
+	l32i	a2, a1, PT_LCOUNT
+	wsr	a3, LEND
+	wsr	a2, LCOUNT
+
+	/* We control single stepping through the ICOUNTLEVEL register. */
+
+	l32i	a2, a1, PT_ICOUNTLEVEL
+	movi	a3, -2
+	wsr	a2, ICOUNTLEVEL
+	wsr	a3, ICOUNT
+
+	/* Check if it was double exception. */
+
+	l32i	a0, a1, PT_DEPC
+	l32i	a3, a1, PT_AREG3
+	l32i	a2, a1, PT_AREG2
+	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	/* Restore a0...a3 and return */
+
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+	rfe
+
+1:	wsr	a0, DEPC
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+	rfde
+
+/*
+ * Debug exception handler.
+ *
+ * Currently, we don't support KGDB, so only user application can be debugged.
+ *
+ * When we get here,  a0 is trashed and saved to excsave[debuglevel]
+ */
+
+ENTRY(debug_exception)
+
+	rsr	a0, EPS + XCHAL_DEBUGLEVEL
+	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
+
+	/* Set EPC_1 and EXCCAUSE */
+
+	wsr	a2, DEPC		# save a2 temporarily
+	rsr	a2, EPC + XCHAL_DEBUGLEVEL
+	wsr	a2, EPC_1
+
+	movi	a2, EXCCAUSE_MAPPED_DEBUG
+	wsr	a2, EXCCAUSE
+
+	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
+
+	movi	a2, 1 << PS_EXCM_BIT
+	or	a2, a0, a2
+	movi	a0, debug_exception	# restore a3, debug jump vector
+	wsr	a2, PS
+	xsr	a0, EXCSAVE + XCHAL_DEBUGLEVEL
+
+	/* Switch to kernel/user stack, restore jump vector, and save a0 */
+
+	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
+
+	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
+	s32i	a0, a2, PT_AREG0
+	movi	a0, 0
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	xsr	a0, DEPC
+	s32i	a3, a2, PT_AREG3
+	s32i	a0, a2, PT_AREG2
+	mov	a1, a2
+	j	_kernel_exception
+
+2:	rsr	a2, EXCSAVE_1
+	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
+	s32i	a0, a2, PT_AREG0
+	movi	a0, 0
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_DEPC
+	xsr	a0, DEPC
+	s32i	a3, a2, PT_AREG3
+	s32i	a0, a2, PT_AREG2
+	mov	a1, a2
+	j	_user_exception
+
+	/* Debug exception while in exception mode. */
+1:	j	1b	// FIXME!!
+
+
+/*
+ * We get here in case of an unrecoverable exception.
+ * The only thing we can do is to be nice and print a panic message.
+ * We only produce a single stack frame for panic, so ???
+ *
+ *
+ * Entry conditions:
+ *
+ *   - a0 contains the caller address; original value saved in excsave1.
+ *   - the original a0 contains a valid return address (backtrace) or 0.
+ *   - a2 contains a valid stackpointer
+ *
+ * Notes:
+ *
+ *   - If the stack pointer could be invalid, the caller has to setup a
+ *     dummy stack pointer (e.g. the stack of the init_task)
+ *
+ *   - If the return address could be invalid, the caller has to set it
+ *     to 0, so the backtrace would stop.
+ *
+ */
+	.align 4
+unrecoverable_text:
+	.ascii "Unrecoverable error in exception handler\0"
+
+ENTRY(unrecoverable_exception)
+
+	movi	a0, 1
+	movi	a1, 0
+
+	wsr	a0, WINDOWSTART
+	wsr	a1, WINDOWBASE
+	rsync
+
+	movi	a1, (1 << PS_WOE_BIT) | 1
+	wsr	a1, PS
+	rsync
+
+	movi	a1, init_task
+	movi	a0, 0
+	addi	a1, a1, PT_REGS_OFFSET
+
+	movi	a4, panic
+	movi	a6, unrecoverable_text
+
+	callx4	a4
+
+1:	j	1b
+
+
+/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+/*
+ * Fast-handler for alloca exceptions
+ *
+ *  The ALLOCA handler is entered when user code executes the MOVSP
+ *  instruction and the caller's frame is not in the register file.
+ *  In this case, the caller frame's a0..a3 are on the stack just
+ *  below sp (a1), and this handler moves them.
+ *
+ *  For "MOVSP <ar>,<as>" without destination register a1, this routine
+ *  simply moves the value from <as> to <ar> without moving the save area.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+#if XCHAL_HAVE_BE
+#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 4, 4
+#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 0, 4
+#else
+#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 0, 4
+#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 4, 4
+#endif
+
+ENTRY(fast_alloca)
+
+	/* We shouldn't be in a double exception. */
+
+	l32i	a0, a2, PT_DEPC
+	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
+
+	rsr	a0, DEPC		# get a2
+	s32i	a4, a2, PT_AREG4	# save a4 and
+	s32i	a0, a2, PT_AREG2	# a2 to stack
+
+	/* Exit critical section. */
+
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+
+	/* Restore a3, excsave_1 */
+
+	xsr	a3, EXCSAVE_1		# make sure excsave_1 is valid for dbl.
+	rsr	a4, EPC_1		# get exception address
+	s32i	a3, a2, PT_AREG3	# save a3 to stack
+
+#ifdef ALLOCA_EXCEPTION_IN_IRAM
+#error	iram not supported
+#else
+	/* Note: l8ui not allowed in IRAM/IROM!! */
+	l8ui	a0, a4, 1		# read as(src) from MOVSP instruction
+#endif
+	movi	a3, .Lmovsp_src
+	_EXTUI_MOVSP_SRC(a0)		# extract source register number
+	addx8	a3, a0, a3
+	jx	a3
+
+.Lunhandled_double:
+	wsr	a0, EXCSAVE_1
+	movi	a0, unrecoverable_exception
+	callx0	a0
+
+	.align 8
+.Lmovsp_src:
+	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
+	mov	a3, a1;			_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
+	mov	a3, a5;			_j 1f;	.align 8
+	mov	a3, a6;			_j 1f;	.align 8
+	mov	a3, a7;			_j 1f;	.align 8
+	mov	a3, a8;			_j 1f;	.align 8
+	mov	a3, a9;			_j 1f;	.align 8
+	mov	a3, a10;		_j 1f;	.align 8
+	mov	a3, a11;		_j 1f;	.align 8
+	mov	a3, a12;		_j 1f;	.align 8
+	mov	a3, a13;		_j 1f;	.align 8
+	mov	a3, a14;		_j 1f;	.align 8
+	mov	a3, a15;		_j 1f;	.align 8
+
+1:
+
+#ifdef ALLOCA_EXCEPTION_IN_IRAM
+#error	iram not supported
+#else
+	l8ui	a0, a4, 0		# read ar(dst) from MOVSP instruction
+#endif
+	addi	a4, a4, 3		# step over movsp
+	_EXTUI_MOVSP_DST(a0)		# extract destination register
+	wsr	a4, EPC_1		# save new epc_1
+
+	_bnei	a0, 1, 1f		# no 'movsp a1, ax': jump
+
+        /* Move the save area. This implies the use of the L32E
+	 * and S32E instructions, because this move must be done with
+	 * the user's PS.RING privilege levels, not with ring 0
+	 * (kernel's) privileges currently active with PS.EXCM
+	 * set. Note that we have stil registered a fixup routine with the
+	 * double exception vector in case a double exception occurs.
+	 */
+
+	/* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
+
+	l32e	a0, a1, -16
+	l32e	a4, a1, -12
+	s32e	a0, a3, -16
+	s32e	a4, a3, -12
+	l32e	a0, a1, -8
+	l32e	a4, a1, -4
+	s32e	a0, a3, -8
+	s32e	a4, a3, -4
+
+	/* Restore stack-pointer and all the other saved registers. */
+
+	mov	a1, a3
+
+	l32i	a4, a2, PT_AREG4
+	l32i	a3, a2, PT_AREG3
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_AREG2
+	rfe
+
+	/*  MOVSP <at>,<as>  was invoked with <at> != a1.
+	 *  Because the stack pointer is not being modified,
+	 *  we should be able to just modify the pointer
+	 *  without moving any save area.
+	 *  The processor only traps these occurrences if the
+	 *  caller window isn't live, so unfortunately we can't
+	 *  use this as an alternate trap mechanism.
+	 *  So we just do the move.  This requires that we
+	 *  resolve the destination register, not just the source,
+	 *  so there's some extra work.
+	 *  (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
+	 */
+
+	/* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
+
+1:	movi	a4, .Lmovsp_dst
+	addx8	a4, a0, a4
+	jx	a4
+
+	.align 8
+.Lmovsp_dst:
+	s32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
+	mov	a1, a3;			_j 1f;	.align 8
+	s32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
+	s32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
+	s32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
+	mov	a5, a3;			_j 1f;	.align 8
+	mov	a6, a3;			_j 1f;	.align 8
+	mov	a7, a3;			_j 1f;	.align 8
+	mov	a8, a3;			_j 1f;	.align 8
+	mov	a9, a3;			_j 1f;	.align 8
+	mov	a10, a3;		_j 1f;	.align 8
+	mov	a11, a3;		_j 1f;	.align 8
+	mov	a12, a3;		_j 1f;	.align 8
+	mov	a13, a3;		_j 1f;	.align 8
+	mov	a14, a3;		_j 1f;	.align 8
+	mov	a15, a3;		_j 1f;	.align 8
+
+1:	l32i	a4, a2, PT_AREG4
+	l32i	a3, a2, PT_AREG3
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_AREG2
+	rfe
+
+
+/*
+ * fast system calls.
+ *
+ * WARNING:  The kernel doesn't save the entire user context before
+ * handling a fast system call.  These functions are small and short,
+ * usually offering some functionality not available to user tasks.
+ *
+ * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ */
+
+ENTRY(fast_syscall_kernel)
+
+	/* Skip syscall. */
+
+	rsr	a0, EPC_1
+	addi	a0, a0, 3
+	wsr	a0, EPC_1
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+	rsr	a0, DEPC			# get syscall-nr
+	_beqz	a0, fast_syscall_spill_registers
+	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
+
+	j	kernel_exception
+
+ENTRY(fast_syscall_user)
+
+	/* Skip syscall. */
+
+	rsr	a0, EPC_1
+	addi	a0, a0, 3
+	wsr	a0, EPC_1
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+	rsr	a0, DEPC			# get syscall-nr
+	_beqz	a0, fast_syscall_spill_registers
+	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
+
+	j	user_exception
+
+ENTRY(fast_syscall_unrecoverable)
+
+        /* Restore all states. */
+
+        l32i    a0, a2, PT_AREG0        # restore a0
+        xsr     a2, DEPC                # restore a2, depc
+        rsr     a3, EXCSAVE_1
+
+        wsr     a0, EXCSAVE_1
+        movi    a0, unrecoverable_exception
+        callx0  a0
+
+
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ *        a2            a6                   a3    a4      a5
+ *
+ * Entry condition:
+ *
+ *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in a0 and DEPC
+ *   a3:	dispatch table, original in excsave_1
+ *   a4..a15:	unchanged
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: we don't have to save a2; a2 holds the return value
+ *
+ * We use the two macros TRY and CATCH:
+ *
+ * TRY	 adds an entry to the __ex_table fixup table for the immediately
+ *	 following instruction.
+ *
+ * CATCH catches any exception that occurred at one of the preceding TRY
+ *       statements and continues from there
+ *
+ * Usage TRY	l32i	a0, a1, 0
+ *		<other code>
+ *	 done:	rfe
+ *	 CATCH	<set return code>
+ *		j done
+ */
+
+#define TRY								\
+	.section __ex_table, "a";					\
+	.word	66f, 67f;						\
+	.text;								\
+66:
+
+#define CATCH								\
+67:
+
+ENTRY(fast_syscall_xtensa)
+
+	xsr	a3, EXCSAVE_1		# restore a3, excsave1
+
+	s32i	a7, a2, PT_AREG7	# we need an additional register
+	movi	a7, 4			# sizeof(unsigned int)
+	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
+
+	addi	a6, a6, -1		# assuming SYS_XTENSA_ATOMIC_SET = 1
+	_bgeui	a6, SYS_XTENSA_COUNT - 1, .Lill
+	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
+
+	/* Fall through for ATOMIC_CMP_SWP. */
+
+.Lswp:	/* Atomic compare and swap */
+
+TRY	l32i	a0, a3, 0		# read old value
+	bne	a0, a4, 1f		# same as old value? jump
+TRY	s32i	a5, a3, 0		# different, modify value
+	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, 1			# and return 1
+	addi	a6, a6, 1		# restore a6 (really necessary?)
+	rfe
+
+1:	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, 0			# return 0 (note that we cannot set
+	addi	a6, a6, 1		# restore a6 (really necessary?)
+	rfe
+
+.Lnswp:	/* Atomic set, add, and exg_add. */
+
+TRY	l32i	a7, a3, 0		# orig
+	add	a0, a4, a7		# + arg
+	moveqz	a0, a4, a6		# set
+TRY	s32i	a0, a3, 0		# write new value
+
+	mov	a0, a2
+	mov	a2, a7
+	l32i	a7, a0, PT_AREG7	# restore a7
+	l32i	a0, a0, PT_AREG0	# restore a0
+	addi	a6, a6, 1		# restore a6 (really necessary?)
+	rfe
+
+CATCH
+.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, -EFAULT
+	rfe
+
+.Lill:	l32i	a7, a2, PT_AREG0	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, -EINVAL
+	rfe
+
+
+
+
+/* fast_syscall_spill_registers.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
+ */
+
+ENTRY(fast_syscall_spill_registers)
+
+	/* Register a FIXUP handler (pass current wb as a parameter) */
+
+	movi	a0, fast_syscall_spill_registers_fixup
+	s32i	a0, a3, EXC_TABLE_FIXUP
+	rsr	a0, WINDOWBASE
+	s32i	a0, a3, EXC_TABLE_PARAM
+
+	/* Save a3 and SAR on stack. */
+
+	rsr	a0, SAR
+	xsr	a3, EXCSAVE_1		# restore a3 and excsave_1
+	s32i	a3, a2, PT_AREG3
+	s32i	a4, a2, PT_AREG4
+	s32i	a0, a2, PT_AREG5	# store SAR to PT_AREG5
+
+	/* The spill routine might clobber a7, a11, and a15. */
+
+	s32i	a7, a2, PT_AREG7
+	s32i	a11, a2, PT_AREG11
+	s32i	a15, a2, PT_AREG15
+
+	call0	_spill_registers	# destroys a3, a4, and SAR
+
+	/* Advance PC, restore registers and SAR, and return from exception. */
+
+	l32i	a3, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	l32i	a0, a2, PT_AREG0
+	wsr	a3, SAR
+	l32i	a3, a2, PT_AREG3
+
+	/* Restore clobbered registers. */
+
+	l32i	a7, a2, PT_AREG7
+	l32i	a11, a2, PT_AREG11
+	l32i	a15, a2, PT_AREG15
+
+	movi	a2, 0
+	rfe
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+fast_syscall_spill_registers_fixup:
+
+	rsr	a2, WINDOWBASE	# get current windowbase (a2 is saved)
+	xsr	a0, DEPC	# restore depc and a0
+	ssl	a2		# set shift (32 - WB)
+
+	/* We need to make sure the current registers (a0-a3) are preserved.
+	 * To do this, we simply set the bit for the current window frame
+	 * in WS, so that the exception handlers save them to the task stack.
+	 */
+
+	rsr	a3, EXCSAVE_1	# get spill-mask
+	slli	a2, a3, 1	# shift left by one
+
+	slli	a3, a2, 32-WSBITS
+	src	a2, a2, a3	# a1 = xxwww1yyxxxwww1yy......
+	wsr	a2, WINDOWSTART	# set corrected windowstart
+
+	movi	a3, exc_table
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# restore a2
+	l32i	a3, a3, EXC_TABLE_PARAM	# original WB (in user task)
+
+	/* Return to the original (user task) WINDOWBASE.
+	 * We leave the following frame behind:
+	 * a0, a1, a2	same
+	 * a3:		trashed (saved in excsave_1)
+	 * depc:	depc (we have to return to that address)
+	 * excsave_1:	a3
+	 */
+
+	wsr	a3, WINDOWBASE
+	rsync
+
+	/* We are now in the original frame when we entered _spill_registers:
+	 *  a0: return address
+	 *  a1: used, stack pointer
+	 *  a2: kernel stack pointer
+	 *  a3: available, saved in EXCSAVE_1
+	 *  depc: exception address
+	 *  excsave: a3
+	 * Note: This frame might be the same as above.
+	 */
+
+	/* Setup stack pointer. */
+
+	addi	a2, a2, -PT_USER_SIZE
+	s32i	a0, a2, PT_AREG0
+
+	/* Make sure we return to this fixup handler. */
+
+	movi	a3, fast_syscall_spill_registers_fixup_return
+	s32i	a3, a2, PT_DEPC		# setup depc
+
+	/* Jump to the exception handler. */
+
+	movi	a3, exc_table
+	rsr	a0, EXCCAUSE
+        addx4	a0, a0, a3              	# find entry in table
+        l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
+        jx	a0
+
+fast_syscall_spill_registers_fixup_return:
+
+	/* When we return here, all registers have been restored (a2: DEPC) */
+
+	wsr	a2, DEPC		# exception address
+
+	/* Restore fixup handler. */
+
+	xsr	a3, EXCSAVE_1
+	movi	a2, fast_syscall_spill_registers_fixup
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	rsr	a2, WINDOWBASE
+	s32i	a2, a3, EXC_TABLE_PARAM
+	l32i	a2, a3, EXC_TABLE_KSTK
+
+	/* Load WB at the time the exception occurred. */
+
+	rsr	a3, SAR			# WB is still in SAR
+	neg	a3, a3
+	wsr	a3, WINDOWBASE
+	rsync
+
+	/* Restore a3 and return. */
+
+	movi	a3, exc_table
+	xsr	a3, EXCSAVE_1
+
+	rfde
+
+
+/*
+ * spill all registers.
+ *
+ * This is not a real function. The following conditions must be met:
+ *
+ *  - must be called with call0.
+ *  - uses a3, a4 and SAR.
+ *  - the last 'valid' register of each frame are clobbered.
+ *  - the caller must have registered a fixup handler
+ *    (or be inside a critical section)
+ *  - PS_EXCM must be set (PS_WOE cleared?)
+ */
+
+ENTRY(_spill_registers)
+
+	/*
+	 * Rotate ws so that the current windowbase is at bit 0.
+	 * Assume ws = xxxwww1yy (www1 current window frame).
+	 * Rotate ws right so that a4 = yyxxxwww1.
+	 */
+
+	rsr	a4, WINDOWBASE
+	rsr	a3, WINDOWSTART		# a3 = xxxwww1yy
+	ssr	a4			# holds WB
+	slli	a4, a3, WSBITS
+	or	a3, a3, a4		# a3 = xxxwww1yyxxxwww1yy
+	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
+
+	/* We are done if there are no more than the current register frame. */
+
+	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
+	movi	a4, (1 << (WSBITS-1))
+	_beqz	a3, .Lnospill		# only one active frame? jump
+
+	/* We want 1 at the top, so that we return to the current windowbase */
+
+	or	a3, a3, a4		# 1yyxxxwww
+
+	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+	wsr	a3, WINDOWSTART		# save shifted windowstart
+	neg	a4, a3
+	and	a3, a4, a3		# first bit set from right: 000010000
+
+	ffs_ws	a4, a3			# a4: shifts to skip empty frames
+	movi	a3, WSBITS
+	sub	a4, a3, a4		# WSBITS-a4:number of 0-bits from right
+	ssr	a4			# save in SAR for later.
+
+	rsr	a3, WINDOWBASE
+	add	a3, a3, a4
+	wsr	a3, WINDOWBASE
+	rsync
+
+	rsr	a3, WINDOWSTART
+	srl	a3, a3			# shift windowstart
+
+	/* WB is now just one frame below the oldest frame in the register
+	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
+	   and WS differ by one 4-register frame. */
+
+	/* Save frames. Depending what call was used (call4, call8, call12),
+	 * we have to save 4,8. or 12 registers.
+	 */
+
+	_bbsi.l	a3, 1, .Lc4
+	_bbsi.l	a3, 2, .Lc8
+
+	/* Special case: we have a call12-frame starting at a4. */
+
+	_bbci.l	a3, 3, .Lc12	# bit 3 shouldn't be zero! (Jump to Lc12 first)
+
+	s32e	a4, a1, -16	# a1 is valid with an empty spill area
+	l32e	a4, a5, -12
+	s32e	a8, a4, -48
+	mov	a8, a4
+	l32e	a4, a1, -16
+	j	.Lc12c
+
+.Lnospill:
+	ret
+
+.Lloop: _bbsi.l	a3, 1, .Lc4
+	_bbci.l	a3, 2, .Lc12
+
+.Lc8:	s32e	a4, a13, -16
+	l32e	a4, a5, -12
+	s32e	a8, a4, -32
+	s32e	a5, a13, -12
+	s32e	a6, a13, -8
+	s32e	a7, a13, -4
+	s32e	a9, a4, -28
+	s32e	a10, a4, -24
+	s32e	a11, a4, -20
+
+	srli	a11, a3, 2		# shift windowbase by 2
+	rotw	2
+	_bnei	a3, 1, .Lloop
+
+.Lexit: /* Done. Do the final rotation, set WS, and return. */
+
+	rotw	1
+	rsr	a3, WINDOWBASE
+	ssl	a3
+	movi	a3, 1
+	sll	a3, a3
+	wsr	a3, WINDOWSTART
+	ret
+
+.Lc4:	s32e	a4, a9, -16
+	s32e	a5, a9, -12
+	s32e	a6, a9, -8
+	s32e	a7, a9, -4
+
+	srli	a7, a3, 1
+	rotw	1
+	_bnei	a3, 1, .Lloop
+	j	.Lexit
+
+.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
+
+	/* 12-register frame (call12) */
+
+	l32e	a2, a5, -12
+	s32e	a8, a2, -48
+	mov	a8, a2
+
+.Lc12c: s32e	a9, a8, -44
+	s32e	a10, a8, -40
+	s32e	a11, a8, -36
+	s32e	a12, a8, -32
+	s32e	a13, a8, -28
+	s32e	a14, a8, -24
+	s32e	a15, a8, -20
+	srli	a15, a3, 3
+
+	/* The stack pointer for a4..a7 is out of reach, so we rotate the
+	 * window, grab the stackpointer, and rotate back.
+	 * Alternatively, we could also use the following approach, but that
+	 * makes the fixup routine much more complicated:
+	 * rotw	1
+	 * s32e	a0, a13, -16
+	 * ...
+	 * rotw 2
+	 */
+
+	rotw	1
+	mov	a5, a13
+	rotw	-1
+
+	s32e	a4, a9, -16
+	s32e	a5, a9, -12
+	s32e	a6, a9, -8
+	s32e	a7, a9, -4
+
+	rotw	3
+
+	_beqi	a3, 1, .Lexit
+	j	.Lloop
+
+.Linvalid_mask:
+
+	/* We get here because of an unrecoverable error in the window
+	 * registers. If we are in user space, we kill the application,
+	 * however, this condition is unrecoverable in kernel space.
+	 */
+
+	rsr	a0, PS
+	_bbci.l	a0, PS_UM_BIT, 1f
+
+ 	/* User space: Setup a dummy frame and kill application.
+	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+	 */
+
+	movi	a0, 1
+	movi	a1, 0
+
+	wsr	a0, WINDOWSTART
+	wsr	a1, WINDOWBASE
+	rsync
+
+	movi	a0, 0
+
+	movi	a3, exc_table
+	l32i	a1, a3, EXC_TABLE_KSTK
+	wsr	a3, EXCSAVE_1
+
+	movi	a4, (1 << PS_WOE_BIT) | 1
+	wsr	a4, PS
+	rsync
+
+	movi	a6, SIGSEGV
+	movi	a4, do_exit
+	callx4	a4
+
+1:	/* Kernel space: PANIC! */
+
+	wsr	a0, EXCSAVE_1
+	movi	a0, unrecoverable_exception
+	callx0	a0		# should not return
+1:	j	1b
+
+#ifdef CONFIG_MMU
+/*
+ * We should never get here. Bail out!
+ */
+
+ENTRY(fast_second_level_miss_double_kernel)
+
+1:	movi	a0, unrecoverable_exception
+	callx0	a0		# should not return
+1:	j	1b
+
+/* First-level entry handler for user, kernel, and double 2nd-level
+ * TLB miss exceptions.  Note that for now, user and kernel miss
+ * exceptions share the same entry point and are handled identically.
+ *
+ * An old, less-efficient C version of this function used to exist.
+ * We include it below, interleaved as comments, for reference.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_second_level_miss)
+
+	/* Save a1. Note: we don't expect a double exception. */
+
+	s32i	a1, a2, PT_AREG1
+
+	/* We need to map the page of PTEs for the user task.  Find
+	 * the pointer to that page.  Also, it's possible for tsk->mm
+	 * to be NULL while tsk->active_mm is nonzero if we faulted on
+	 * a vmalloc address.  In that rare case, we must use
+	 * active_mm instead to avoid a fault in this handler.  See
+	 *
+	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
+	 *   (or search Internet on "mm vs. active_mm")
+	 *
+	 *	if (!mm)
+	 *		mm = tsk->active_mm;
+	 *	pgd = pgd_offset (mm, regs->excvaddr);
+	 *	pmd = pmd_offset (pgd, regs->excvaddr);
+	 *	pmdval = *pmd;
+	 */
+
+	GET_CURRENT(a1,a2)
+	l32i	a0, a1, TASK_MM		# tsk->mm
+	beqz	a0, 9f
+
+
+	/* We deliberately destroy a3 that holds the exception table. */
+
+8:	rsr	a3, EXCVADDR		# fault address
+	_PGD_OFFSET(a0, a3, a1)
+	l32i	a0, a0, 0		# read pmdval
+	beqz	a0, 2f
+
+	/* Read ptevaddr and convert to top of page-table page.
+	 *
+	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
+	 * 	vpnval += DTLB_WAY_PGTABLE;
+	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
+	 *	write_dtlb_entry (pteval, vpnval);
+	 *
+	 * The messy computation for 'pteval' above really simplifies
+	 * into the following:
+	 *
+	 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
+	 */
+
+	movi	a1, -PAGE_OFFSET
+	add	a0, a0, a1		# pmdval - PAGE_OFFSET
+	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
+	xor	a0, a0, a1
+
+	movi	a1, _PAGE_DIRECTORY
+	or	a0, a0, a1		# ... | PAGE_DIRECTORY
+
+	/*
+	 * We utilize all three wired-ways (7-9) to hold pmd translations.
+	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
+	 * This allows to map the three most common regions to three different
+	 * DTLBs:
+	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
+	 *  2   -> way 8	shared libaries (2000.0000)
+	 *  3   -> way 0	stack (3000.0000)
+	 */
+
+	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
+	rsr	a1, PTEVADDR
+	addx2	a3, a3, a3		# ->			0,3,6,9
+	srli	a1, a1, PAGE_SHIFT
+	extui	a3, a3, 2, 2		# ->			0,0,1,2
+	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
+	addi	a3, a3, DTLB_WAY_PGD
+	add	a1, a1, a3		# ... + way_number
+
+3:	wdtlb	a0, a1
+	dsync
+
+	/* Exit critical section. */
+
+4:	movi	a3, exc_table		# restore a3
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+
+	/* Restore the working registers, and return. */
+
+	l32i	a0, a2, PT_AREG0
+	l32i	a1, a2, PT_AREG1
+	l32i	a2, a2, PT_DEPC
+	xsr	a3, EXCSAVE_1
+
+	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	/* Restore excsave1 and return. */
+
+	rsr	a2, DEPC
+	rfe
+
+	/* Return from double exception. */
+
+1:	xsr	a2, DEPC
+	esync
+	rfde
+
+9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
+	j	8b
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+2:	/* Special case for cache aliasing.
+	 * We (should) only get here if a clear_user_page, copy_user_page
+	 * or the aliased cache flush functions got preemptively interrupted 
+	 * by another task. Re-establish temporary mapping to the 
+	 * TLBTEMP_BASE areas.
+	 */
+
+	/* We shouldn't be in a double exception */
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+
+	/* Make sure the exception originated in the special functions */
+
+	movi	a0, __tlbtemp_mapping_start
+	rsr	a3, EPC_1
+	bltu	a3, a0, 2f
+	movi	a0, __tlbtemp_mapping_end
+	bgeu	a3, a0, 2f
+
+	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
+
+	movi	a3, TLBTEMP_BASE_1
+	rsr	a0, EXCVADDR
+	bltu	a0, a3, 2f
+
+	addi	a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
+	bgeu	a1, a3, 2f
+
+	/* Check if we have to restore an ITLB mapping. */
+
+	movi	a1, __tlbtemp_mapping_itlb
+	rsr	a3, EPC_1
+	sub	a3, a3, a1
+
+	/* Calculate VPN */
+
+	movi	a1, PAGE_MASK
+	and	a1, a1, a0
+
+	/* Jump for ITLB entry */
+
+	bgez	a3, 1f
+
+	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
+
+	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
+	add	a1, a3, a1
+
+	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
+
+	mov	a0, a6
+	movnez	a0, a7, a3
+	j	3b
+
+	/* ITLB entry. We only use dst in a6. */
+
+1:	witlb	a6, a1
+	isync
+	j	4b
+
+
+#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
+
+
+2:	/* Invalid PGD, default exception handling */
+
+	movi	a3, exc_table
+	rsr	a1, DEPC
+	xsr	a3, EXCSAVE_1
+	s32i	a1, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	mov	a1, a2
+
+	rsr	a2, PS
+	bbsi.l	a2, PS_UM_BIT, 1f
+	j	_kernel_exception
+1:	j	_user_exception
+
+
+/*
+ * StoreProhibitedException
+ *
+ * Update the pte and invalidate the itlb mapping for this pte.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_store_prohibited)
+
+	/* Save a1 and a4. */
+
+	s32i	a1, a2, PT_AREG1
+	s32i	a4, a2, PT_AREG4
+
+	GET_CURRENT(a1,a2)
+	l32i	a0, a1, TASK_MM		# tsk->mm
+	beqz	a0, 9f
+
+8:	rsr	a1, EXCVADDR		# fault address
+	_PGD_OFFSET(a0, a1, a4)
+	l32i	a0, a0, 0
+	beqz	a0, 2f
+
+	/* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
+
+	_PTE_OFFSET(a0, a1, a4)
+	l32i	a4, a0, 0		# read pteval
+	bbci.l	a4, _PAGE_WRITABLE_BIT, 2f
+
+	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
+	or	a4, a4, a1
+	rsr	a1, EXCVADDR
+	s32i	a4, a0, 0
+
+	/* We need to flush the cache if we have page coloring. */
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+	dhwb	a0, 0
+#endif
+	pdtlb	a0, a1
+	wdtlb	a4, a0
+
+	/* Exit critical section. */
+
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+
+	/* Restore the working registers, and return. */
+
+	l32i	a4, a2, PT_AREG4
+	l32i	a1, a2, PT_AREG1
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_DEPC
+
+	/* Restore excsave1 and a3. */
+
+	xsr	a3, EXCSAVE_1
+	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	rsr	a2, DEPC
+	rfe
+
+	/* Double exception. Restore FIXUP handler and return. */
+
+1:	xsr	a2, DEPC
+	esync
+	rfde
+
+9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
+	j	8b
+
+2:	/* If there was a problem, handle fault in C */
+
+	rsr	a4, DEPC	# still holds a2
+	xsr	a3, EXCSAVE_1
+	s32i	a4, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	l32i	a4, a2, PT_AREG4
+	mov	a1, a2
+
+	rsr	a2, PS
+	bbsi.l	a2, PS_UM_BIT, 1f
+	j	_kernel_exception
+1:	j	_user_exception
+#endif /* CONFIG_MMU */
+
+/*
+ * System Calls.
+ *
+ * void system_call (struct pt_regs* regs, int exccause)
+ *                            a2                 a3
+ */
+
+ENTRY(system_call)
+	entry	a1, 32
+
+	/* regs->syscall = regs->areg[2] */
+
+	l32i	a3, a2, PT_AREG2
+	mov	a6, a2
+	movi	a4, do_syscall_trace_enter
+	s32i	a3, a2, PT_SYSCALL
+	callx4	a4
+
+	/* syscall = sys_call_table[syscall_nr] */
+
+	movi	a4, sys_call_table;
+	movi	a5, __NR_syscall_count
+	movi	a6, -ENOSYS
+	bgeu	a3, a5, 1f
+
+	addx4	a4, a3, a4
+	l32i	a4, a4, 0
+	movi	a5, sys_ni_syscall;
+	beq	a4, a5, 1f
+
+	/* Load args: arg0 - arg5 are passed via regs. */
+
+	l32i	a6, a2, PT_AREG6
+	l32i	a7, a2, PT_AREG3
+	l32i	a8, a2, PT_AREG4
+	l32i	a9, a2, PT_AREG5
+	l32i	a10, a2, PT_AREG8
+	l32i	a11, a2, PT_AREG9
+
+	/* Pass one additional argument to the syscall: pt_regs (on stack) */
+	s32i	a2, a1, 0
+
+	callx4	a4
+
+1:	/* regs->areg[2] = return_value */
+
+	s32i	a6, a2, PT_AREG2
+	movi	a4, do_syscall_trace_leave
+	mov	a6, a2
+	callx4	a4
+	retw
+
+
+/*
+ * Create a kernel thread
+ *
+ * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ * a2                    a2                 a3             a4
+ */
+
+ENTRY(kernel_thread)
+	entry	a1, 16
+
+	mov	a5, a2			# preserve fn over syscall
+	mov	a7, a3			# preserve args over syscall
+
+	movi	a3, _CLONE_VM | _CLONE_UNTRACED
+	movi	a2, __NR_clone
+	or	a6, a4, a3		# arg0: flags
+	mov	a3, a1			# arg1: sp
+	syscall
+
+	beq	a3, a1, 1f		# branch if parent
+	mov	a6, a7			# args
+	callx4	a5			# fn(args)
+
+	movi	a2, __NR_exit
+	syscall				# return value of fn(args) still in a6
+
+1:	retw
+
+/*
+ * Do a system call from kernel instead of calling sys_execve, so we end up
+ * with proper pt_regs.
+ *
+ * int kernel_execve(const char *fname, char *const argv[], charg *const envp[])
+ * a2                        a2               a3                  a4
+ */
+
+ENTRY(kernel_execve)
+	entry	a1, 16
+	mov	a6, a2			# arg0 is in a6
+	movi	a2, __NR_execve
+	syscall
+
+	retw
+
+/*
+ * Task switch.
+ *
+ * struct task*  _switch_to (struct task* prev, struct task* next)
+ *         a2                              a2                 a3
+ */
+
+ENTRY(_switch_to)
+
+	entry	a1, 16
+
+	mov	a12, a2			# preserve 'prev' (a2)
+	mov	a13, a3			# and 'next' (a3)
+
+	l32i	a4, a2, TASK_THREAD_INFO
+	l32i	a5, a3, TASK_THREAD_INFO
+
+	save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+
+	s32i	a0, a12, THREAD_RA	# save return address
+	s32i	a1, a12, THREAD_SP	# save stack pointer
+
+	/* Disable ints while we manipulate the stack pointer. */
+
+	movi	a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
+	xsr	a14, PS
+	rsr	a3, EXCSAVE_1
+	rsync
+	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */
+
+	/* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+	l32i	a3, a5, THREAD_CPENABLE
+	xsr	a3, CPENABLE
+	s32i	a3, a4, THREAD_CPENABLE
+#endif
+
+	/* Flush register file. */
+
+	call0	_spill_registers	# destroys a3, a4, and SAR
+
+	/* Set kernel stack (and leave critical section)
+	 * Note: It's save to set it here. The stack will not be overwritten
+	 *       because the kernel stack will only be loaded again after
+	 *       we return from kernel space.
+	 */
+
+	rsr	a3, EXCSAVE_1		# exc_table
+	movi	a6, 0
+	addi	a7, a5, PT_REGS_OFFSET
+	s32i	a6, a3, EXC_TABLE_FIXUP
+	s32i	a7, a3, EXC_TABLE_KSTK
+
+	/* restore context of the task that 'next' addresses */
+
+	l32i	a0, a13, THREAD_RA	# restore return address
+	l32i	a1, a13, THREAD_SP	# restore stack pointer
+
+	load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+
+	wsr	a14, PS
+	mov	a2, a12			# return 'prev'
+	rsync
+
+	retw
+
+
+ENTRY(ret_from_fork)
+
+	/* void schedule_tail (struct task_struct *prev)
+	 * Note: prev is still in a6 (return value from fake call4 frame)
+	 */
+	movi	a4, schedule_tail
+	callx4	a4
+
+	movi	a4, do_syscall_trace_leave
+	mov	a6, a1
+	callx4	a4
+
+	j	common_exception_return
+
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 00000000..3ef91a73
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,244 @@
+/*
+ * arch/xtensa/kernel/head.S
+ *
+ * Xtensa Processor startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+/*
+ * This module contains the entry code for kernel images. It performs the
+ * minimal setup needed to call the generic C routines.
+ *
+ * Prerequisites:
+ *
+ * - The kernel image has been loaded to the actual address where it was
+ *   compiled to.
+ * - a2 contains either 0 or a pointer to a list of boot parameters.
+ *   (see setup.c for more details)
+ *
+ */
+
+/*
+ *  _start
+ *
+ *  The bootloader passes a pointer to a list of boot parameters in a2.
+ */
+
+	/* The first bytes of the kernel image must be an instruction, so we
+	 * manually allocate and define the literal constant we need for a jx
+	 * instruction.
+	 */
+
+	__HEAD
+	.globl _start
+_start:	_j	2f
+	.align	4
+1:	.word	_startup
+2:	l32r	a0, 1b
+	jx	a0
+
+	.section .init.text, "ax"
+	.align 4
+_startup:
+
+	/* Disable interrupts and exceptions. */
+
+	movi	a0, LOCKLEVEL
+	wsr	a0, PS
+
+	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+
+	wsr	a2, EXCSAVE_1
+
+	/* Start with a fresh windowbase and windowstart.  */
+
+	movi	a1, 1
+	movi	a0, 0
+	wsr	a1, WINDOWSTART
+	wsr	a0, WINDOWBASE
+	rsync
+
+	/* Set a0 to 0 for the remaining initialization. */
+
+	movi	a0, 0
+
+	/* Clear debugging registers. */
+
+#if XCHAL_HAVE_DEBUG
+	wsr	a0, IBREAKENABLE
+	wsr	a0, ICOUNT
+	movi	a1, 15
+	wsr	a0, ICOUNTLEVEL
+
+	.set	_index, 0
+	.rept	XCHAL_NUM_DBREAK - 1
+	wsr	a0, DBREAKC + _index
+	.set	_index, _index + 1
+	.endr
+#endif
+
+	/* Clear CCOUNT (not really necessary, but nice) */
+
+	wsr	a0, CCOUNT	# not really necessary, but nice
+
+	/* Disable zero-loops. */
+
+#if XCHAL_HAVE_LOOPS
+	wsr	a0, LCOUNT
+#endif
+
+	/* Disable all timers. */
+
+	.set	_index, 0
+	.rept	XCHAL_NUM_TIMERS - 1
+	wsr	a0, CCOMPARE + _index
+	.set	_index, _index + 1
+	.endr
+
+	/* Interrupt initialization. */
+
+	movi	a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
+	wsr	a0, INTENABLE
+	wsr	a2, INTCLEAR
+
+	/* Disable coprocessors. */
+
+#if XCHAL_CP_NUM > 0
+	wsr	a0, CPENABLE
+#endif
+
+	/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
+	 *
+	 * Note: PS.EXCM must be cleared before using any loop
+	 *	 instructions; otherwise, they are silently disabled, and
+	 * 	 at most one iteration of the loop is executed.
+	 */
+
+	movi	a1, 1
+	wsr	a1, PS
+	rsync
+
+	/*  Initialize the caches.
+	 *  a2, a3 are just working registers (clobbered).
+	 */
+
+#if XCHAL_DCACHE_LINE_LOCKABLE
+	___unlock_dcache_all a2 a3
+#endif
+
+#if XCHAL_ICACHE_LINE_LOCKABLE
+	___unlock_icache_all a2 a3
+#endif
+
+	___invalidate_dcache_all a2 a3
+	___invalidate_icache_all a2 a3
+
+	isync
+
+	/* Unpack data sections
+	 *
+	 * The linker script used to build the Linux kernel image
+	 * creates a table located at __boot_reloc_table_start
+	 * that contans the information what data needs to be unpacked.
+	 *
+	 * Uses a2-a7.
+	 */
+
+	movi	a2, __boot_reloc_table_start
+	movi	a3, __boot_reloc_table_end
+
+1:	beq	a2, a3, 3f	# no more entries?
+	l32i	a4, a2, 0	# start destination (in RAM)
+	l32i	a5, a2, 4	# end desination (in RAM)
+	l32i	a6, a2, 8	# start source (in ROM)
+	addi	a2, a2, 12	# next entry
+	beq	a4, a5, 1b	# skip, empty entry
+	beq	a4, a6, 1b	# skip, source and dest. are the same
+
+2:	l32i	a7, a6, 0	# load word
+	addi	a6, a6, 4
+	s32i	a7, a4, 0	# store word
+	addi	a4, a4, 4
+	bltu	a4, a5, 2b
+	j	1b
+
+3:
+	/* All code and initialized data segments have been copied.
+	 * Now clear the BSS segment.
+	 */
+
+	movi	a2, __bss_start	# start of BSS
+	movi	a3, __bss_stop	# end of BSS
+
+	__loopt	a2, a3, a4, 2
+	s32i	a0, a2, 0
+	__endla	a2, a4, 4
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+	/* After unpacking, flush the writeback cache to memory so the
+	 * instructions/data are available.
+	 */
+
+	___flush_dcache_all a2 a3
+#endif
+
+	/* Setup stack and enable window exceptions (keep irqs disabled) */
+
+	movi	a1, init_thread_union
+	addi	a1, a1, KERNEL_STACK_SIZE
+
+	movi	a2, 0x00040001		# WOE=1, INTLEVEL=1, UM=0
+	wsr	a2, PS			# (enable reg-windows; progmode stack)
+	rsync
+
+	/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
+
+	movi	a2, debug_exception
+	wsr	a2, EXCSAVE + XCHAL_DEBUGLEVEL
+
+	/* Set up EXCSAVE[1] to point to the exc_table. */
+
+	movi	a6, exc_table
+	xsr	a6, EXCSAVE_1
+
+	/* init_arch kick-starts the linux kernel */
+
+	movi	a4, init_arch
+	callx4	a4
+
+	movi	a4, start_kernel
+	callx4	a4
+
+should_never_return:
+	j	should_never_return
+
+
+/*
+ * BSS section
+ */
+	
+__PAGE_ALIGNED_BSS
+#ifdef CONFIG_MMU
+ENTRY(swapper_pg_dir)
+	.fill	PAGE_SIZE, 1, 0
+#endif
+ENTRY(empty_zero_page)
+	.fill	PAGE_SIZE, 1, 0
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
new file mode 100644
index 00000000..cd122fb7
--- /dev/null
+++ b/arch/xtensa/kernel/init_task.c
@@ -0,0 +1,31 @@
+/*
+ * arch/xtensa/kernel/init_task.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+union thread_union init_thread_union __init_task_data =
+	{ INIT_THREAD_INFO(init_task) };
+
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/arch/xtensa/kernel/io.c b/arch/xtensa/kernel/io.c
new file mode 100644
index 00000000..5b65269b
--- /dev/null
+++ b/arch/xtensa/kernel/io.c
@@ -0,0 +1,75 @@
+/*
+ * arch/xtensa/io.c
+ *
+ * IO primitives
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+void outsb(unsigned long addr, const void *src, unsigned long count) {
+        while (count) {
+                count -= 1;
+                writeb(*(const char *)src, addr);
+                src += 1;
+                addr += 1;
+        }
+}
+
+void outsw(unsigned long addr, const void *src, unsigned long count) {
+        while (count) {
+                count -= 2;
+                writew(*(const short *)src, addr);
+                src += 2;
+                addr += 2;
+        }
+}
+
+void outsl(unsigned long addr, const void *src, unsigned long count) {
+        while (count) {
+                count -= 4;
+                writel(*(const long *)src, addr);
+                src += 4;
+                addr += 4;
+        }
+}
+
+void insb(unsigned long addr, void *dst, unsigned long count) {
+        while (count) {
+                count -= 1;
+                *(unsigned char *)dst = readb(addr);
+                dst += 1;
+                addr += 1;
+        }
+}
+
+void insw(unsigned long addr, void *dst, unsigned long count) {
+        while (count) {
+                count -= 2;
+                *(unsigned short *)dst = readw(addr);
+                dst += 2;
+                addr += 2;
+        }
+}
+
+void insl(unsigned long addr, void *dst, unsigned long count) {
+        while (count) {
+                count -= 4;
+                /*
+                 * XXX I am sure we are in for an unaligned trap here.
+                 */
+                *(unsigned long *)dst = readl(addr);
+                dst += 4;
+                addr += 4;
+        }
+}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 00000000..4340ee07
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,151 @@
+/*
+ * linux/arch/xtensa/kernel/irq.c
+ *
+ * Xtensa built-in interrupt controller and some generic functions copied
+ * from i386.
+ *
+ * Copyright (C) 2002 - 2006 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+#include <asm/platform.h>
+
+static unsigned int cached_irq_mask;
+
+atomic_t irq_err_count;
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	if (irq >= NR_IRQS) {
+		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+				__func__, irq);
+	}
+
+	irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	/* Debugging check for stack overflow: is there less than 1KB free? */
+	{
+		unsigned long sp;
+
+		__asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
+		sp &= THREAD_SIZE - 1;
+
+		if (unlikely(sp < (sizeof(thread_info) + 1024)))
+			printk("Stack overflow in do_IRQ: %ld\n",
+			       sp - sizeof(struct thread_info));
+	}
+#endif
+	generic_handle_irq(irq);
+
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	seq_printf(p, "%*s: ", prec, "ERR");
+	seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
+	return 0;
+}
+
+static void xtensa_irq_mask(struct irq_data *d)
+{
+	cached_irq_mask &= ~(1 << d->irq);
+	set_sr (cached_irq_mask, INTENABLE);
+}
+
+static void xtensa_irq_unmask(struct irq_data *d)
+{
+	cached_irq_mask |= 1 << d->irq;
+	set_sr (cached_irq_mask, INTENABLE);
+}
+
+static void xtensa_irq_enable(struct irq_data *d)
+{
+	variant_irq_enable(d->irq);
+	xtensa_irq_unmask(d->irq);
+}
+
+static void xtensa_irq_disable(struct irq_data *d)
+{
+	xtensa_irq_mask(d->irq);
+	variant_irq_disable(d->irq);
+}
+
+static void xtensa_irq_ack(struct irq_data *d)
+{
+	set_sr(1 << d->irq, INTCLEAR);
+}
+
+static int xtensa_irq_retrigger(struct irq_data *d)
+{
+	set_sr (1 << d->irq, INTSET);
+	return 1;
+}
+
+
+static struct irq_chip xtensa_irq_chip = {
+	.name		= "xtensa",
+	.irq_enable	= xtensa_irq_enable,
+	.irq_disable	= xtensa_irq_disable,
+	.irq_mask	= xtensa_irq_mask,
+	.irq_unmask	= xtensa_irq_unmask,
+	.irq_ack	= xtensa_irq_ack,
+	.irq_retrigger	= xtensa_irq_retrigger,
+};
+
+void __init init_IRQ(void)
+{
+	int index;
+
+	for (index = 0; index < XTENSA_NR_IRQS; index++) {
+		int mask = 1 << index;
+
+		if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
+			irq_set_chip_and_handler(index, &xtensa_irq_chip,
+						 handle_simple_irq);
+
+		else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
+			irq_set_chip_and_handler(index, &xtensa_irq_chip,
+						 handle_edge_irq);
+
+		else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
+			irq_set_chip_and_handler(index, &xtensa_irq_chip,
+						 handle_level_irq);
+
+		else if (mask & XCHAL_INTTYPE_MASK_TIMER)
+			irq_set_chip_and_handler(index, &xtensa_irq_chip,
+						 handle_edge_irq);
+
+		else	/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+			/* XCHAL_INTTYPE_MASK_NMI */
+
+			irq_set_chip_and_handler(index, &xtensa_irq_chip,
+						 handle_level_irq);
+	}
+
+	cached_irq_mask = 0;
+
+	variant_init_irq();
+}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 00000000..451dda92
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,192 @@
+/*
+ * arch/xtensa/kernel/module.c
+ *
+ * Module support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+#undef DEBUG_RELOCATE
+
+static int
+decode_calln_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+	return (location[0] & 0xf0) == 0x50;
+#endif
+#ifdef __XTENSA_EL__
+	return (location[0] & 0xf) == 0x5;
+#endif
+}
+
+static int
+decode_l32r_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+	return (location[0] & 0xf0) == 0x10;
+#endif
+#ifdef __XTENSA_EL__
+	return (location[0] & 0xf) == 0x1;
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *mod)
+{
+	unsigned int i;
+        Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	unsigned char *location;
+	uint32_t value;
+
+#ifdef DEBUG_RELOCATE
+	printk("Applying relocate section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+#endif
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+		location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rela[i].r_offset;
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rela[i].r_info);
+		value = sym->st_value + rela[i].r_addend;
+
+		switch (ELF32_R_TYPE(rela[i].r_info)) {
+		case R_XTENSA_NONE:
+		case R_XTENSA_DIFF8:
+		case R_XTENSA_DIFF16:
+		case R_XTENSA_DIFF32:
+		case R_XTENSA_ASM_EXPAND:
+			break;
+
+		case R_XTENSA_32:
+		case R_XTENSA_PLT:
+			*(uint32_t *)location += value;
+			break;
+
+		case R_XTENSA_SLOT0_OP:
+			if (decode_calln_opcode(location)) {
+				value -= ((unsigned long)location & -4) + 4;
+				if ((value & 3) != 0 ||
+				    ((value + (1 << 19)) >> 20) != 0) {
+					printk("%s: relocation out of range, "
+					       "section %d reloc %d "
+					       "sym '%s'\n",
+					       mod->name, relsec, i,
+					       strtab + sym->st_name);
+					return -ENOEXEC;
+				}
+				value = (signed int)value >> 2;
+#ifdef __XTENSA_EB__
+				location[0] = ((location[0] & ~0x3) |
+					    ((value >> 16) & 0x3));
+				location[1] = (value >> 8) & 0xff;
+				location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+				location[0] = ((location[0] & ~0xc0) |
+					    ((value << 6) & 0xc0));
+				location[1] = (value >> 2) & 0xff;
+				location[2] = (value >> 10) & 0xff;
+#endif
+			} else if (decode_l32r_opcode(location)) {
+				value -= (((unsigned long)location + 3) & -4);
+				if ((value & 3) != 0 ||
+				    (signed int)value >> 18 != -1) {
+					printk("%s: relocation out of range, "
+					       "section %d reloc %d "
+					       "sym '%s'\n",
+					       mod->name, relsec, i,
+					       strtab + sym->st_name);
+					return -ENOEXEC;
+				}
+				value = (signed int)value >> 2;
+
+#ifdef __XTENSA_EB__
+				location[1] = (value >> 8) & 0xff;
+				location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+				location[1] = value & 0xff;
+				location[2] = (value >> 8) & 0xff;
+#endif
+			}
+			/* FIXME: Ignore any other opcodes.  The Xtensa
+			   assembler currently assumes that the linker will
+			   always do relaxation and so all PC-relative
+			   operands need relocations.  (The assembler also
+			   writes out the tentative PC-relative values,
+			   assuming no link-time relaxation, so it is usually
+			   safe to ignore the relocations.)  If the
+			   assembler's "--no-link-relax" flag can be made to
+			   work, and if all kernel modules can be assembled
+			   with that flag, then unexpected relocations could
+			   be detected here.  */
+			break;
+
+		case R_XTENSA_SLOT1_OP:
+		case R_XTENSA_SLOT2_OP:
+		case R_XTENSA_SLOT3_OP:
+		case R_XTENSA_SLOT4_OP:
+		case R_XTENSA_SLOT5_OP:
+		case R_XTENSA_SLOT6_OP:
+		case R_XTENSA_SLOT7_OP:
+		case R_XTENSA_SLOT8_OP:
+		case R_XTENSA_SLOT9_OP:
+		case R_XTENSA_SLOT10_OP:
+		case R_XTENSA_SLOT11_OP:
+		case R_XTENSA_SLOT12_OP:
+		case R_XTENSA_SLOT13_OP:
+		case R_XTENSA_SLOT14_OP:
+			printk("%s: unexpected FLIX relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+
+		case R_XTENSA_SLOT0_ALT:
+		case R_XTENSA_SLOT1_ALT:
+		case R_XTENSA_SLOT2_ALT:
+		case R_XTENSA_SLOT3_ALT:
+		case R_XTENSA_SLOT4_ALT:
+		case R_XTENSA_SLOT5_ALT:
+		case R_XTENSA_SLOT6_ALT:
+		case R_XTENSA_SLOT7_ALT:
+		case R_XTENSA_SLOT8_ALT:
+		case R_XTENSA_SLOT9_ALT:
+		case R_XTENSA_SLOT10_ALT:
+		case R_XTENSA_SLOT11_ALT:
+		case R_XTENSA_SLOT12_ALT:
+		case R_XTENSA_SLOT13_ALT:
+		case R_XTENSA_SLOT14_ALT:
+			printk("%s: unexpected ALT relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+
+		default:
+			printk("%s: unexpected relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 00000000..2783fda7
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,94 @@
+/*
+ * arch/xtensa/kernel/pci-dma.c
+ *
+ * DMA coherent memory allocation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ *
+ * Based on version for i386.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Note: We assume that the full memory space is always mapped to 'kseg'
+ *	 Otherwise we have to use page attributes (not implemented).
+ */
+
+void *
+dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
+{
+	unsigned long ret;
+	unsigned long uncached = 0;
+
+	/* ignore region speicifiers */
+
+	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+		flag |= GFP_DMA;
+	ret = (unsigned long)__get_free_pages(flag, get_order(size));
+
+	if (ret == 0)
+		return NULL;
+
+	/* We currently don't support coherent memory outside KSEG */
+
+	if (ret < XCHAL_KSEG_CACHED_VADDR
+	    || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
+		BUG();
+
+
+	if (ret != 0) {
+		memset((void*) ret, 0, size);
+		uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
+		*handle = virt_to_bus((void*)ret);
+		__flush_invalidate_dcache_range(ret, size);
+	}
+
+	return (void*)uncached;
+}
+
+void dma_free_coherent(struct device *hwdev, size_t size,
+			 void *vaddr, dma_addr_t dma_handle)
+{
+	long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
+
+	if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
+		BUG();
+
+	free_pages(addr, get_order(size));
+}
+
+
+void consistent_sync(void *vaddr, size_t size, int direction)
+{
+	switch (direction) {
+	case PCI_DMA_NONE:
+		BUG();
+	case PCI_DMA_FROMDEVICE:        /* invalidate only */
+		__invalidate_dcache_range((unsigned long)vaddr,
+				          (unsigned long)size);
+		break;
+
+	case PCI_DMA_TODEVICE:          /* writeback only */
+	case PCI_DMA_BIDIRECTIONAL:     /* writeback and invalidate */
+		__flush_invalidate_dcache_range((unsigned long)vaddr,
+				    		(unsigned long)size);
+		break;
+	}
+}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 00000000..eb30e356
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,384 @@
+/*
+ * arch/xtensa/kernel/pci.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ *
+ * Based largely on work from Cort (ppc/kernel/pci.c)
+ * IO functions copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/platform.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* PCI Controller */
+
+
+/*
+ * pcibios_alloc_controller
+ * pcibios_enable_device
+ * pcibios_fixups
+ * pcibios_align_resource
+ * pcibios_fixup_bus
+ * pcibios_setup
+ * pci_bus_add_device
+ * pci_mmap_page_range
+ */
+
+struct pci_controller* pci_ctrl_head;
+struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
+
+static int pci_bus_count;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+		       resource_size_t size, resource_size_t align)
+{
+	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
+
+	if (res->flags & IORESOURCE_IO) {
+		if (size > 0x100) {
+			printk(KERN_ERR "PCI: I/O Region %s/%d too large"
+			       " (%ld bytes)\n", pci_name(dev),
+			       dev->resource - res, size);
+		}
+
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
+	}
+
+	return start;
+}
+
+int
+pcibios_enable_resources(struct pci_dev *dev, int mask)
+{
+	u16 cmd, old_cmd;
+	int idx;
+	struct resource *r;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+	for(idx=0; idx<6; idx++) {
+		r = &dev->resource[idx];
+		if (!r->start && r->end) {
+			printk (KERN_ERR "PCI: Device %s not available because "
+				"of resource collisions\n", pci_name(dev));
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			cmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			cmd |= PCI_COMMAND_MEMORY;
+	}
+	if (dev->resource[PCI_ROM_RESOURCE].start)
+		cmd |= PCI_COMMAND_MEMORY;
+	if (cmd != old_cmd) {
+		printk("PCI: Enabling device %s (%04x -> %04x)\n",
+			pci_name(dev), old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+	return 0;
+}
+
+struct pci_controller * __init pcibios_alloc_controller(void)
+{
+	struct pci_controller *pci_ctrl;
+
+	pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
+	memset(pci_ctrl, 0, sizeof(struct pci_controller));
+
+	*pci_ctrl_tail = pci_ctrl;
+	pci_ctrl_tail = &pci_ctrl->next;
+
+	return pci_ctrl;
+}
+
+static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
+					    struct list_head *resources)
+{
+	struct resource *res;
+	unsigned long io_offset;
+	int i;
+
+	io_offset = (unsigned long)pci_ctrl->io_space.base;
+	res = &pci_ctrl->io_resource;
+	if (!res->flags) {
+		if (io_offset)
+			printk (KERN_ERR "I/O resource not set for host"
+				" bridge %d\n", pci_ctrl->index);
+		res->start = 0;
+		res->end = IO_SPACE_LIMIT;
+		res->flags = IORESOURCE_IO;
+	}
+	res->start += io_offset;
+	res->end += io_offset;
+	pci_add_resource_offset(resources, res, io_offset);
+
+	for (i = 0; i < 3; i++) {
+		res = &pci_ctrl->mem_resources[i];
+		if (!res->flags) {
+			if (i > 0)
+				continue;
+			printk(KERN_ERR "Memory resource not set for "
+			       "host bridge %d\n", pci_ctrl->index);
+			res->start = 0;
+			res->end = ~0U;
+			res->flags = IORESOURCE_MEM;
+		}
+		pci_add_resource(resources, res);
+	}
+}
+
+static int __init pcibios_init(void)
+{
+	struct pci_controller *pci_ctrl;
+	struct list_head resources;
+	struct pci_bus *bus;
+	int next_busno = 0, i;
+
+	printk("PCI: Probing PCI hardware\n");
+
+	/* Scan all of the recorded PCI controllers.  */
+	for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+		pci_ctrl->last_busno = 0xff;
+		INIT_LIST_HEAD(&resources);
+		pci_controller_apertures(pci_ctrl, &resources);
+		bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
+					pci_ctrl->ops, pci_ctrl, &resources);
+		pci_ctrl->bus = bus;
+		pci_ctrl->last_busno = bus->subordinate;
+		if (next_busno <= pci_ctrl->last_busno)
+			next_busno = pci_ctrl->last_busno+1;
+	}
+	pci_bus_count = next_busno;
+
+	return platform_pcibios_fixup();
+}
+
+subsys_initcall(pcibios_init);
+
+void __init pcibios_fixup_bus(struct pci_bus *bus)
+{
+	if (bus->parent) {
+		/* This is a subordinate bridge */
+		pci_read_bridge_bases(bus);
+	}
+}
+
+char __init *pcibios_setup(char *str)
+{
+	return str;
+}
+
+void pcibios_set_master(struct pci_dev *dev)
+{
+	/* No special bus mastering setup handling */
+}
+
+/* the next one is stolen from the alpha port... */
+
+void __init
+pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	u16 cmd, old_cmd;
+	int idx;
+	struct resource *r;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+	for (idx=0; idx<6; idx++) {
+		r = &dev->resource[idx];
+		if (!r->start && r->end) {
+			printk(KERN_ERR "PCI: Device %s not available because "
+			       "of resource collisions\n", pci_name(dev));
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			cmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			cmd |= PCI_COMMAND_MEMORY;
+	}
+	if (cmd != old_cmd) {
+		printk("PCI: Enabling device %s (%04x -> %04x)\n",
+		       pci_name(dev), old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Return the index of the PCI controller for device pdev.
+ */
+
+int
+pci_controller_num(struct pci_dev *dev)
+{
+	struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
+	return pci_ctrl->index;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s,
+ * modelled on the sparc64 implementation by Dave Miller.
+ *  -- paulus.
+ */
+
+/*
+ * Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap.  They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static __inline__ int
+__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+		       enum pci_mmap_state mmap_state)
+{
+	struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long io_offset = 0;
+	int i, res_bit;
+
+	if (pci_ctrl == 0)
+		return -EINVAL;		/* should never happen */
+
+	/* If memory, add on the PCI bridge address offset */
+	if (mmap_state == pci_mmap_mem) {
+		res_bit = IORESOURCE_MEM;
+	} else {
+		io_offset = (unsigned long)pci_ctrl->io_space.base;
+		offset += io_offset;
+		res_bit = IORESOURCE_IO;
+	}
+
+	/*
+	 * Check that the offset requested corresponds to one of the
+	 * resources of the device.
+	 */
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		struct resource *rp = &dev->resource[i];
+		int flags = rp->flags;
+
+		/* treat ROM as memory (should be already) */
+		if (i == PCI_ROM_RESOURCE)
+			flags |= IORESOURCE_MEM;
+
+		/* Active and same type? */
+		if ((flags & res_bit) == 0)
+			continue;
+
+		/* In the range of this resource? */
+		if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
+			continue;
+
+		/* found it! construct the final physical address */
+		if (mmap_state == pci_mmap_io)
+			offset += pci_ctrl->io_space.start - io_offset;
+		vma->vm_pgoff = offset >> PAGE_SHIFT;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static __inline__ void
+__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
+		      enum pci_mmap_state mmap_state, int write_combine)
+{
+	int prot = pgprot_val(vma->vm_page_prot);
+
+	/* Set to write-through */
+	prot &= ~_PAGE_NO_CACHE;
+#if 0
+	if (!write_combine)
+		prot |= _PAGE_WRITETHRU;
+#endif
+	vma->vm_page_prot = __pgprot(prot);
+}
+
+/*
+ * Perform the actual remap of the pages for a PCI device mapping, as
+ * appropriate for this architecture.  The region in the process to map
+ * is described by vm_start and vm_end members of VMA, the base physical
+ * address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state,
+			int write_combine)
+{
+	int ret;
+
+	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
+	if (ret < 0)
+		return ret;
+
+	__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+
+	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+			         vma->vm_end - vma->vm_start,vma->vm_page_prot);
+
+	return ret;
+}
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 00000000..1b91a97f
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,47 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <asm/platform.h>
+#include <asm/timex.h>
+#include <asm/param.h>		/* HZ */
+
+#define _F(r,f,a,b)							\
+	r __platform_##f a b;                                   	\
+	r platform_##f a __attribute__((weak, alias("__platform_"#f)))
+
+/*
+ * Default functions that are used if no platform specific function is defined.
+ * (Please, refer to include/asm-xtensa/platform.h for more information)
+ */
+
+_F(void, setup, (char** cmd), { });
+_F(void, init_irq, (void), { });
+_F(void, restart, (void), { while(1); });
+_F(void, halt, (void), { while(1); });
+_F(void, power_off, (void), { while(1); });
+_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
+_F(void, heartbeat, (void), { });
+_F(int,  pcibios_fixup, (void), { return 0; });
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+_F(void, calibrate_ccount, (void),
+{
+  printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
+  ccount_per_jiffy = 100 * (1000000UL/HZ);
+});
+#endif
+
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 00000000..6a2d6edf
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,336 @@
+/*
+ * arch/xtensa/kernel/process.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/mmu.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/asm-offsets.h>
+#include <asm/regs.h>
+
+extern void ret_from_fork(void);
+
+struct task_struct *current_set[NR_CPUS] = {&init_task, };
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+
+#if XTENSA_HAVE_COPROCESSORS
+
+void coprocessor_release_all(struct thread_info *ti)
+{
+	unsigned long cpenable;
+	int i;
+
+	/* Make sure we don't switch tasks during this operation. */
+
+	preempt_disable();
+
+	/* Walk through all cp owners and release it for the requested one. */
+
+	cpenable = ti->cpenable;
+
+	for (i = 0; i < XCHAL_CP_MAX; i++) {
+		if (coprocessor_owner[i] == ti) {
+			coprocessor_owner[i] = 0;
+			cpenable &= ~(1 << i);
+		}
+	}
+
+	ti->cpenable = cpenable;
+	coprocessor_clear_cpenable();
+
+	preempt_enable();
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+	unsigned long cpenable;
+	int i;
+
+	preempt_disable();
+
+	cpenable = ti->cpenable;
+
+	for (i = 0; i < XCHAL_CP_MAX; i++) {
+		if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+			coprocessor_flush(ti, i);
+		cpenable >>= 1;
+	}
+
+	preempt_enable();
+}
+
+#endif
+
+
+/*
+ * Powermanagement idle function, if any is provided by the platform.
+ */
+
+void cpu_idle(void)
+{
+  	local_irq_enable();
+
+	/* endless idle loop with no priority at all */
+	while (1) {
+		while (!need_resched())
+			platform_idle();
+		schedule_preempt_disabled();
+	}
+}
+
+/*
+ * This is called when the thread calls exit().
+ */
+void exit_thread(void)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_release_all(current_thread_info());
+#endif
+}
+
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
+void flush_thread(void)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	struct thread_info *ti = current_thread_info();
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+#endif
+}
+
+/*
+ * This is called before the thread is copied. 
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_flush_all(task_thread_info(tsk));
+#endif
+}
+
+/*
+ * Copy thread.
+ *
+ * The stack layout for the new thread looks like this:
+ *
+ *	+------------------------+ <- sp in childregs (= tos)
+ *	|       childregs        |
+ *	+------------------------+ <- thread.sp = sp in dummy-frame
+ *	|      dummy-frame       |    (saved in dummy-frame spill-area)
+ *	+------------------------+
+ *
+ * We create a dummy frame to return to ret_from_fork:
+ *   a0 points to ret_from_fork (simulating a call4)
+ *   sp points to itself (thread.sp)
+ *   a2, a3 are unused.
+ *
+ * Note: This is a pristine frame, so we don't need any spill region on top of
+ *       childregs.
+ */
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+		unsigned long unused,
+                struct task_struct * p, struct pt_regs * regs)
+{
+	struct pt_regs *childregs;
+	struct thread_info *ti;
+	unsigned long tos;
+	int user_mode = user_mode(regs);
+
+	/* Set up new TSS. */
+	tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+	if (user_mode)
+		childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
+	else
+		childregs = (struct pt_regs*)tos - 1;
+
+	*childregs = *regs;
+
+	/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
+	*((int*)childregs - 3) = (unsigned long)childregs;
+	*((int*)childregs - 4) = 0;
+
+	childregs->areg[1] = tos;
+	childregs->areg[2] = 0;
+	p->set_child_tid = p->clear_child_tid = NULL;
+	p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
+	p->thread.sp = (unsigned long)childregs;
+
+	if (user_mode(regs)) {
+
+		int len = childregs->wmask & ~0xf;
+		childregs->areg[1] = usp;
+		memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+		       &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+// FIXME: we need to set THREADPTR in thread_info...
+		if (clone_flags & CLONE_SETTLS)
+			childregs->areg[2] = childregs->areg[6];
+
+	} else {
+		/* In kernel space, we start a new thread with a new stack. */
+		childregs->wmask = 1;
+	}
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+	ti = task_thread_info(p);
+	ti->cpenable = 0;
+#endif
+
+	return 0;
+}
+
+
+/*
+ * These bracket the sleeping functions..
+ */
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long sp, pc;
+	unsigned long stack_page = (unsigned long) task_stack_page(p);
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	sp = p->thread.sp;
+	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+
+	do {
+		if (sp < stack_page + sizeof(struct task_struct) ||
+		    sp >= (stack_page + THREAD_SIZE) ||
+		    pc == 0)
+			return 0;
+		if (!in_sched_functions(pc))
+			return pc;
+
+		/* Stack layout: sp-4: ra, sp-3: sp' */
+
+		pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+		sp = *(unsigned long *)sp - 3;
+	} while (count++ < 16);
+	return 0;
+}
+
+/*
+ * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
+ * of processor registers.  Besides different ordering,
+ * xtensa_gregset_t contains non-live register information that
+ * 'struct pt_regs' does not.  Exception handling (primarily) uses
+ * 'struct pt_regs'.  Core files and ptrace use xtensa_gregset_t.
+ *
+ */
+
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
+{
+	unsigned long wb, ws, wm;
+	int live, last;
+
+	wb = regs->windowbase;
+	ws = regs->windowstart;
+	wm = regs->wmask;
+	ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
+
+	/* Don't leak any random bits. */
+
+	memset(elfregs, 0, sizeof (elfregs));
+
+	/* Note:  PS.EXCM is not set while user task is running; its
+	 * being set in regs->ps is for exception handling convenience.
+	 */
+
+	elfregs->pc		= regs->pc;
+	elfregs->ps		= (regs->ps & ~(1 << PS_EXCM_BIT));
+	elfregs->lbeg		= regs->lbeg;
+	elfregs->lend		= regs->lend;
+	elfregs->lcount		= regs->lcount;
+	elfregs->sar		= regs->sar;
+	elfregs->windowstart	= ws;
+
+	live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+	last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+	memcpy(elfregs->a, regs->areg, live * 4);
+	memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
+}
+
+int dump_fpu(void)
+{
+	return 0;
+}
+
+asmlinkage
+long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
+                  void __user *parent_tid, void *child_tls,
+                  void __user *child_tid, long a5,
+                  struct pt_regs *regs)
+{
+        if (!newsp)
+                newsp = regs->areg[1];
+        return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+}
+
+/*
+ * xtensa_execve() executes a new program.
+ */
+
+asmlinkage
+long xtensa_execve(const char __user *name,
+		   const char __user *const __user *argv,
+                   const char __user *const __user *envp,
+                   long a3, long a4, long a5,
+                   struct pt_regs *regs)
+{
+	long error;
+	char * filename;
+
+	filename = getname(name);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+	error = do_execve(filename, argv, envp, regs);
+	putname(filename);
+out:
+	return error;
+}
+
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 00000000..33eea4c1
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,346 @@
+// TODO some minor issues
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007  Tensilica Inc.
+ *
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Scott Foehner<sfoehner@yahoo.com>,
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/elf.h>
+#include <asm/coprocessor.h>
+
+
+void user_enable_single_step(struct task_struct *child)
+{
+	child->ptrace |= PT_SINGLESTEP;
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+	child->ptrace &= ~PT_SINGLESTEP;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
+ */
+
+void ptrace_disable(struct task_struct *child)
+{
+	/* Nothing to do.. */
+}
+
+int ptrace_getregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	xtensa_gregset_t __user *gregset = uregs;
+	unsigned long wm = regs->wmask;
+	unsigned long wb = regs->windowbase;
+	int live, i;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+		return -EIO;
+
+	__put_user(regs->pc, &gregset->pc);
+	__put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
+	__put_user(regs->lbeg, &gregset->lbeg);
+	__put_user(regs->lend, &gregset->lend);
+	__put_user(regs->lcount, &gregset->lcount);
+	__put_user(regs->windowstart, &gregset->windowstart);
+	__put_user(regs->windowbase, &gregset->windowbase);
+
+	live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+
+	for (i = 0; i < live; i++)
+		__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
+	for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
+		__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
+
+	return 0;
+}
+
+int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	xtensa_gregset_t *gregset = uregs;
+	const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+	unsigned long ps;
+	unsigned long wb;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+		return -EIO;
+
+	__get_user(regs->pc, &gregset->pc);
+	__get_user(ps, &gregset->ps);
+	__get_user(regs->lbeg, &gregset->lbeg);
+	__get_user(regs->lend, &gregset->lend);
+	__get_user(regs->lcount, &gregset->lcount);
+	__get_user(regs->windowstart, &gregset->windowstart);
+	__get_user(wb, &gregset->windowbase);
+
+	regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
+
+	if (wb >= XCHAL_NUM_AREGS / 4)
+		return -EFAULT;
+
+	regs->windowbase = wb;
+
+	if (wb != 0 &&  __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
+					 gregset->a, wb * 16))
+		return -EFAULT;
+
+	if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16))
+		return -EFAULT;
+
+	return 0;
+}
+
+
+int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	struct thread_info *ti = task_thread_info(child);
+	elf_xtregs_t __user *xtregs = uregs;
+	int ret = 0;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+		return -EIO;
+
+#if XTENSA_HAVE_COPROCESSORS
+	/* Flush all coprocessor registers to memory. */
+	coprocessor_flush_all(ti);
+	ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
+			      sizeof(xtregs_coprocessor_t));
+#endif
+	ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+			      sizeof(xtregs->opt));
+	ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
+			      sizeof(xtregs->user));
+
+	return ret ? -EFAULT : 0;
+}
+
+int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+	struct thread_info *ti = task_thread_info(child);
+	struct pt_regs *regs = task_pt_regs(child);
+	elf_xtregs_t *xtregs = uregs;
+	int ret = 0;
+
+	if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+		return -EFAULT;
+
+#if XTENSA_HAVE_COPROCESSORS
+	/* Flush all coprocessors before we overwrite them. */
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+
+	ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, 
+				sizeof(xtregs_coprocessor_t));
+#endif
+	ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+				sizeof(xtregs->opt));
+	ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
+				sizeof(xtregs->user));
+
+	return ret ? -EFAULT : 0;
+}
+
+int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
+{
+	struct pt_regs *regs;
+	unsigned long tmp;
+
+	regs = task_pt_regs(child);
+	tmp = 0;  /* Default return value. */
+
+	switch(regno) {
+
+		case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+			tmp = regs->areg[regno - REG_AR_BASE];
+			break;
+
+		case REG_A_BASE ... REG_A_BASE + 15:
+			tmp = regs->areg[regno - REG_A_BASE];
+			break;
+
+		case REG_PC:
+			tmp = regs->pc;
+			break;
+
+		case REG_PS:
+			/* Note:  PS.EXCM is not set while user task is running;
+			 * its being set in regs is for exception handling
+			 * convenience.  */
+			tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
+			break;
+
+		case REG_WB:
+			break;		/* tmp = 0 */
+
+		case REG_WS:
+		{
+			unsigned long wb = regs->windowbase;
+			unsigned long ws = regs->windowstart;
+			tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
+			break;
+		}
+		case REG_LBEG:
+			tmp = regs->lbeg;
+			break;
+
+		case REG_LEND:
+			tmp = regs->lend;
+			break;
+
+		case REG_LCOUNT:
+			tmp = regs->lcount;
+			break;
+
+		case REG_SAR:
+			tmp = regs->sar;
+			break;
+
+		case SYSCALL_NR:
+			tmp = regs->syscall;
+			break;
+
+		default:
+			return -EIO;
+	}
+	return put_user(tmp, ret);
+}
+
+int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+	struct pt_regs *regs;
+	regs = task_pt_regs(child);
+
+	switch (regno) {
+		case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+			regs->areg[regno - REG_AR_BASE] = val;
+			break;
+
+		case REG_A_BASE ... REG_A_BASE + 15:
+			regs->areg[regno - REG_A_BASE] = val;
+			break;
+
+		case REG_PC:
+			regs->pc = val;
+			break;
+
+		case SYSCALL_NR:
+			regs->syscall = val;
+			break;
+
+		default:
+			return -EIO;
+	}
+	return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	int ret = -EPERM;
+	void __user *datap = (void __user *) data;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:	/* read word at location addr. */
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
+		break;
+
+	case PTRACE_PEEKUSR:	/* read register specified by addr. */
+		ret = ptrace_peekusr(child, addr, datap);
+		break;
+
+	case PTRACE_POKETEXT:	/* write the word at location addr. */
+	case PTRACE_POKEDATA:
+		ret = generic_ptrace_pokedata(child, addr, data);
+		break;
+
+	case PTRACE_POKEUSR:	/* write register specified by addr. */
+		ret = ptrace_pokeusr(child, addr, data);
+		break;
+
+	case PTRACE_GETREGS:
+		ret = ptrace_getregs(child, datap);
+		break;
+
+	case PTRACE_SETREGS:
+		ret = ptrace_setregs(child, datap);
+		break;
+
+	case PTRACE_GETXTREGS:
+		ret = ptrace_getxregs(child, datap);
+		break;
+
+	case PTRACE_SETXTREGS:
+		ret = ptrace_setxregs(child, datap);
+		break;
+
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+void do_syscall_trace(void)
+{
+	/*
+	 * The 0x80 provides a way for the tracing parent to distinguish
+	 * between a syscall stop and SIGTRAP delivery
+	 */
+	ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
+
+void do_syscall_trace_enter(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE)
+			&& (current->ptrace & PT_PTRACED))
+		do_syscall_trace();
+
+#if 0
+	audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
+#endif
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+	if ((test_thread_flag(TIF_SYSCALL_TRACE))
+			&& (current->ptrace & PT_PTRACED))
+		do_syscall_trace();
+}
+
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 00000000..17e746f7
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,480 @@
+/*
+ * arch/xtensa/kernel/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995  Linus Torvalds
+ * Copyright (C) 2001 - 2005  Tensilica Inc.
+ *
+ * Chris Zankel	<chris@zankel.net>
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/screen_info.h>
+#include <linux/bootmem.h>
+#include <linux/kernel.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+# include <linux/console.h>
+#endif
+
+#ifdef CONFIG_RTC
+# include <linux/timex.h>
+#endif
+
+#ifdef CONFIG_PROC_FS
+# include <linux/seq_file.h>
+#endif
+
+#include <asm/bootparam.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/timex.h>
+#include <asm/platform.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/param.h>
+
+#include <platform/hardware.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
+#endif
+
+#ifdef CONFIG_BLK_DEV_FD
+extern struct fd_ops no_fd_ops;
+struct fd_ops *fd_ops;
+#endif
+
+extern struct rtc_ops no_rtc_ops;
+struct rtc_ops *rtc_ops;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void *initrd_start;
+extern void *initrd_end;
+extern void *__initrd_start;
+extern void *__initrd_end;
+int initrd_is_mapped = 0;
+extern int initrd_below_start_ok;
+#endif
+
+unsigned char aux_device_present;
+extern unsigned long loops_per_jiffy;
+
+/* Command line specified as configuration option. */
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+#endif
+
+sysmem_info_t __initdata sysmem;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+int initrd_is_mapped;
+#endif
+
+#ifdef CONFIG_MMU
+extern void init_mmu(void);
+#else
+static inline void init_mmu(void) { }
+#endif
+
+extern void zones_init(void);
+
+/*
+ * Boot parameter parsing.
+ *
+ * The Xtensa port uses a list of variable-sized tags to pass data to
+ * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
+ * to be recognised. The list is terminated with a zero-sized
+ * BP_TAG_LAST tag.
+ */
+
+typedef struct tagtable {
+	u32 tag;
+	int (*parse)(const bp_tag_t*);
+} tagtable_t;
+
+#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn 		\
+	__attribute__((unused, __section__(".taglist"))) = { tag, fn }
+
+/* parse current tag */
+
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+	meminfo_t *mi = (meminfo_t*)(tag->data);
+
+	if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+		return -1;
+
+	if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
+		printk(KERN_WARNING
+		       "Ignoring memory bank 0x%08lx size %ldKB\n",
+		       (unsigned long)mi->start,
+		       (unsigned long)mi->end - (unsigned long)mi->start);
+		return -EINVAL;
+	}
+	sysmem.bank[sysmem.nr_banks].type  = mi->type;
+	sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
+	sysmem.bank[sysmem.nr_banks].end   = mi->end & PAGE_SIZE;
+	sysmem.nr_banks++;
+
+	return 0;
+}
+
+__tagtable(BP_TAG_MEMORY, parse_tag_mem);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init parse_tag_initrd(const bp_tag_t* tag)
+{
+	meminfo_t* mi;
+	mi = (meminfo_t*)(tag->data);
+	initrd_start = (void*)(mi->start);
+	initrd_end = (void*)(mi->end);
+
+	return 0;
+}
+
+__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+static int __init parse_tag_cmdline(const bp_tag_t* tag)
+{
+	strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
+	command_line[COMMAND_LINE_SIZE - 1] = '\0';
+	return 0;
+}
+
+__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
+
+static int __init parse_bootparam(const bp_tag_t* tag)
+{
+	extern tagtable_t __tagtable_begin, __tagtable_end;
+	tagtable_t *t;
+
+	/* Boot parameters must start with a BP_TAG_FIRST tag. */
+
+	if (tag->id != BP_TAG_FIRST) {
+		printk(KERN_WARNING "Invalid boot parameters!\n");
+		return 0;
+	}
+
+	tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
+
+	/* Parse all tags. */
+
+	while (tag != NULL && tag->id != BP_TAG_LAST) {
+	 	for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+			if (tag->id == t->tag) {
+				t->parse(tag);
+				break;
+			}
+		}
+		if (t == &__tagtable_end)
+			printk(KERN_WARNING "Ignoring tag "
+			       "0x%08x\n", tag->id);
+		tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
+	}
+
+	return 0;
+}
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	initrd_start = &__initrd_start;
+	initrd_end = &__initrd_end;
+#endif
+
+	sysmem.nr_banks = 0;
+
+#ifdef CONFIG_CMDLINE_BOOL
+	strcpy(command_line, default_command_line);
+#endif
+
+	/* Parse boot parameters */
+
+        if (bp_start)
+	  parse_bootparam(bp_start);
+
+	if (sysmem.nr_banks == 0) {
+		sysmem.nr_banks = 1;
+		sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
+		sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
+				     + PLATFORM_DEFAULT_MEM_SIZE;
+	}
+
+	/* Early hook for platforms */
+
+	platform_init(bp_start);
+
+	/* Initialize MMU. */
+
+	init_mmu();
+}
+
+/*
+ * Initialize system. Setup memory and reserve regions.
+ */
+
+extern char _end;
+extern char _stext;
+extern char _WindowVectors_text_start;
+extern char _WindowVectors_text_end;
+extern char _DebugInterruptVector_literal_start;
+extern char _DebugInterruptVector_text_end;
+extern char _KernelExceptionVector_literal_start;
+extern char _KernelExceptionVector_text_end;
+extern char _UserExceptionVector_literal_start;
+extern char _UserExceptionVector_text_end;
+extern char _DoubleExceptionVector_literal_start;
+extern char _DoubleExceptionVector_text_end;
+
+void __init setup_arch(char **cmdline_p)
+{
+	extern int mem_reserve(unsigned long, unsigned long, int);
+	extern void bootmem_init(void);
+
+	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
+	*cmdline_p = command_line;
+
+	/* Reserve some memory regions */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start < initrd_end) {
+		initrd_is_mapped = mem_reserve(__pa(initrd_start),
+					       __pa(initrd_end), 0);
+		initrd_below_start_ok = 1;
+ 	} else {
+		initrd_start = 0;
+	}
+#endif
+
+	mem_reserve(__pa(&_stext),__pa(&_end), 1);
+
+	mem_reserve(__pa(&_WindowVectors_text_start),
+		    __pa(&_WindowVectors_text_end), 0);
+
+	mem_reserve(__pa(&_DebugInterruptVector_literal_start),
+		    __pa(&_DebugInterruptVector_text_end), 0);
+
+	mem_reserve(__pa(&_KernelExceptionVector_literal_start),
+		    __pa(&_KernelExceptionVector_text_end), 0);
+
+	mem_reserve(__pa(&_UserExceptionVector_literal_start),
+		    __pa(&_UserExceptionVector_text_end), 0);
+
+	mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
+		    __pa(&_DoubleExceptionVector_text_end), 0);
+
+	bootmem_init();
+
+	platform_setup(cmdline_p);
+
+
+	paging_init();
+	zones_init();
+
+#ifdef CONFIG_VT
+# if defined(CONFIG_VGA_CONSOLE)
+	conswitchp = &vga_con;
+# elif defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+# endif
+#endif
+
+#ifdef CONFIG_PCI
+	platform_pcibios_init();
+#endif
+}
+
+void machine_restart(char * cmd)
+{
+	platform_restart();
+}
+
+void machine_halt(void)
+{
+	platform_halt();
+	while (1);
+}
+
+void machine_power_off(void)
+{
+	platform_power_off();
+	while (1);
+}
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Display some core information through /proc/cpuinfo.
+ */
+
+static int
+c_show(struct seq_file *f, void *slot)
+{
+	/* high-level stuff */
+	seq_printf(f,"processor\t: 0\n"
+		     "vendor_id\t: Tensilica\n"
+		     "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+		     "core ID\t\t: " XCHAL_CORE_ID "\n"
+		     "build ID\t: 0x%x\n"
+		     "byte order\t: %s\n"
+ 		     "cpu MHz\t\t: %lu.%02lu\n"
+		     "bogomips\t: %lu.%02lu\n",
+		     XCHAL_BUILD_UNIQUE_ID,
+		     XCHAL_HAVE_BE ?  "big" : "little",
+		     CCOUNT_PER_JIFFY/(1000000/HZ),
+		     (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
+		     loops_per_jiffy/(500000/HZ),
+		     (loops_per_jiffy/(5000/HZ)) % 100);
+
+	seq_printf(f,"flags\t\t: "
+#if XCHAL_HAVE_NMI
+		     "nmi "
+#endif
+#if XCHAL_HAVE_DEBUG
+		     "debug "
+# if XCHAL_HAVE_OCD
+		     "ocd "
+# endif
+#endif
+#if XCHAL_HAVE_DENSITY
+	    	     "density "
+#endif
+#if XCHAL_HAVE_BOOLEANS
+		     "boolean "
+#endif
+#if XCHAL_HAVE_LOOPS
+		     "loop "
+#endif
+#if XCHAL_HAVE_NSA
+		     "nsa "
+#endif
+#if XCHAL_HAVE_MINMAX
+		     "minmax "
+#endif
+#if XCHAL_HAVE_SEXT
+		     "sext "
+#endif
+#if XCHAL_HAVE_CLAMPS
+		     "clamps "
+#endif
+#if XCHAL_HAVE_MAC16
+		     "mac16 "
+#endif
+#if XCHAL_HAVE_MUL16
+		     "mul16 "
+#endif
+#if XCHAL_HAVE_MUL32
+		     "mul32 "
+#endif
+#if XCHAL_HAVE_MUL32_HIGH
+		     "mul32h "
+#endif
+#if XCHAL_HAVE_FP
+		     "fpu "
+#endif
+		     "\n");
+
+	/* Registers. */
+	seq_printf(f,"physical aregs\t: %d\n"
+		     "misc regs\t: %d\n"
+		     "ibreak\t\t: %d\n"
+		     "dbreak\t\t: %d\n",
+		     XCHAL_NUM_AREGS,
+		     XCHAL_NUM_MISC_REGS,
+		     XCHAL_NUM_IBREAK,
+		     XCHAL_NUM_DBREAK);
+
+
+	/* Interrupt. */
+	seq_printf(f,"num ints\t: %d\n"
+		     "ext ints\t: %d\n"
+		     "int levels\t: %d\n"
+		     "timers\t\t: %d\n"
+		     "debug level\t: %d\n",
+		     XCHAL_NUM_INTERRUPTS,
+		     XCHAL_NUM_EXTINTERRUPTS,
+		     XCHAL_NUM_INTLEVELS,
+		     XCHAL_NUM_TIMERS,
+		     XCHAL_DEBUGLEVEL);
+
+	/* Cache */
+	seq_printf(f,"icache line size: %d\n"
+		     "icache ways\t: %d\n"
+		     "icache size\t: %d\n"
+		     "icache flags\t: "
+#if XCHAL_ICACHE_LINE_LOCKABLE
+		     "lock"
+#endif
+		     "\n"
+		     "dcache line size: %d\n"
+		     "dcache ways\t: %d\n"
+		     "dcache size\t: %d\n"
+		     "dcache flags\t: "
+#if XCHAL_DCACHE_IS_WRITEBACK
+		     "writeback"
+#endif
+#if XCHAL_DCACHE_LINE_LOCKABLE
+		     "lock"
+#endif
+		     "\n",
+		     XCHAL_ICACHE_LINESIZE,
+		     XCHAL_ICACHE_WAYS,
+		     XCHAL_ICACHE_SIZE,
+		     XCHAL_DCACHE_LINESIZE,
+		     XCHAL_DCACHE_WAYS,
+		     XCHAL_DCACHE_SIZE);
+
+	return 0;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+	return (void *) ((*pos == 0) ? (void *)1 : NULL);
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+	return NULL;
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op =
+{
+	start:  c_start,
+	next:   c_next,
+	stop:   c_stop,
+	show:   c_show
+};
+
+#endif /* CONFIG_PROC_FS */
+
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 00000000..d78869a0
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,565 @@
+/*
+ * arch/xtensa/kernel/signal.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005, 2006 Tensilica Inc.
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ * 1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/unistd.h>
+
+#define DEBUG_SIG  0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+extern struct task_struct *coproc_owners[];
+
+struct rt_sigframe
+{
+	struct siginfo info;
+	struct ucontext uc;
+	struct {
+		xtregs_opt_t opt;
+		xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+		xtregs_coprocessor_t cp;
+#endif
+	} xtregs;
+	unsigned char retcode[6];
+	unsigned int window[4];
+};
+
+/* 
+ * Flush register windows stored in pt_regs to stack.
+ * Returns 1 for errors.
+ */
+
+int
+flush_window_regs_user(struct pt_regs *regs)
+{
+	const unsigned long ws = regs->windowstart;
+	const unsigned long wb = regs->windowbase;
+	unsigned long sp = 0;
+	unsigned long wm;
+	int err = 1;
+	int base;
+
+	/* Return if no other frames. */
+
+	if (regs->wmask == 1)
+		return 0;
+
+	/* Rotate windowmask and skip empty frames. */
+
+	wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
+	base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
+		
+	/* For call8 or call12 frames, we need the previous stack pointer. */
+
+	if ((regs->wmask & 2) == 0)
+		if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
+			goto errout;
+
+	/* Spill frames to stack. */
+
+	while (base < XCHAL_NUM_AREGS / 4) {
+
+		int m = (wm >> base);
+		int inc = 0;
+
+		/* Save registers a4..a7 (call8) or a4...a11 (call12) */
+
+		if (m & 2) {			/* call4 */
+			inc = 1;
+
+		} else if (m & 4) {		/* call8 */
+			if (copy_to_user((void*)(sp - 32),
+					   &regs->areg[(base + 1) * 4], 16))
+				goto errout;
+			inc = 2;
+
+		} else if (m & 8) {	/* call12 */
+			if (copy_to_user((void*)(sp - 48),
+					   &regs->areg[(base + 1) * 4], 32))
+				goto errout;
+			inc = 3;
+		}
+
+		/* Save current frame a0..a3 under next SP */
+
+		sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
+		if (copy_to_user((void*)(sp - 16), &regs->areg[base * 4], 16))
+			goto errout;
+
+		/* Get current stack pointer for next loop iteration. */
+
+		sp = regs->areg[base * 4 + 1];
+		base += inc;
+	}
+
+	regs->wmask = 1;
+	regs->windowstart = 1 << wb;
+
+	return 0;
+
+errout:
+	return err;
+}
+
+/*
+ * Note: We don't copy double exception 'regs', we have to finish double exc. 
+ * first before we return to signal handler! This dbl.exc.handler might cause 
+ * another double exception, but I think we are fine as the situation is the 
+ * same as if we had returned to the signal handerl and got an interrupt 
+ * immediately...
+ */
+
+static int
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
+{
+	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+	struct thread_info *ti = current_thread_info();
+	int err = 0;
+
+#define COPY(x)	err |= __put_user(regs->x, &sc->sc_##x)
+	COPY(pc);
+	COPY(ps);
+	COPY(lbeg);
+	COPY(lend);
+	COPY(lcount);
+	COPY(sar);
+#undef COPY
+
+	err |= flush_window_regs_user(regs);
+	err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+	err |= __put_user(0, &sc->sc_xtregs);
+
+	if (err)
+		return err;
+
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+	err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+			      sizeof (frame->xtregs.cp));
+#endif
+	err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+			      sizeof (xtregs_opt_t));
+	err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+			      sizeof (xtregs_user_t));
+
+	err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
+
+	return err;
+}
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
+{
+	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+	struct thread_info *ti = current_thread_info();
+	unsigned int err = 0;
+	unsigned long ps;
+
+#define COPY(x)	err |= __get_user(regs->x, &sc->sc_##x)
+	COPY(pc);
+	COPY(lbeg);
+	COPY(lend);
+	COPY(lcount);
+	COPY(sar);
+#undef COPY
+
+	/* All registers were flushed to stack. Start with a prestine frame. */
+
+	regs->wmask = 1;
+	regs->windowbase = 0;
+	regs->windowstart = 1;
+
+	regs->syscall = -1;		/* disable syscall checks */
+
+	/* For PS, restore only PS.CALLINC.
+	 * Assume that all other bits are either the same as for the signal
+	 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+	 */
+	err |= __get_user(ps, &sc->sc_ps);
+	regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
+
+	/* Additional corruption checks */
+
+	if ((regs->lcount > 0)
+	    && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+		err = 1;
+
+	err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
+
+	if (err)
+		return err;
+
+ 	/* The signal handler may have used coprocessors in which
+	 * case they are still enabled.  We disable them to force a
+	 * reloading of the original task's CP state by the lazy
+	 * context-switching mechanisms of CP exception handling.
+	 * Also, we essentially discard any coprocessor state that the
+	 * signal handler created. */
+
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_release_all(ti);
+	err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+				sizeof (frame->xtregs.cp));
+#endif
+	err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+				sizeof (xtregs_user_t));
+	err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+				sizeof (xtregs_opt_t));
+
+	return err;
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
+				    long a4, long a5, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	sigset_t set;
+	int ret;
+
+	if (regs->depc > 64)
+		panic("rt_sigreturn in double exception!\n");
+
+	frame = (struct rt_sigframe __user *) regs->areg[1];
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, frame))
+		goto badframe;
+
+	ret = regs->areg[2];
+
+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->areg[1]) == -EFAULT)
+		goto badframe;
+
+	return ret;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+gen_return_code(unsigned char *codemem)
+{
+	int err = 0;
+
+	/*
+	 * The 12-bit immediate is really split up within the 24-bit MOVI
+	 * instruction.  As long as the above system call numbers fit within
+	 * 8-bits, the following code works fine. See the Xtensa ISA for
+	 * details.
+	 */
+
+#if __NR_rt_sigreturn > 255
+# error Generating the MOVI instruction below breaks!
+#endif
+
+#ifdef __XTENSA_EB__   /* Big Endian version */
+	/* Generate instruction:  MOVI a2, __NR_rt_sigreturn */
+	err |= __put_user(0x22, &codemem[0]);
+	err |= __put_user(0x0a, &codemem[1]);
+	err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+	/* Generate instruction:  SYSCALL */
+	err |= __put_user(0x00, &codemem[3]);
+	err |= __put_user(0x05, &codemem[4]);
+	err |= __put_user(0x00, &codemem[5]);
+
+#elif defined __XTENSA_EL__   /* Little Endian version */
+	/* Generate instruction:  MOVI a2, __NR_rt_sigreturn */
+	err |= __put_user(0x22, &codemem[0]);
+	err |= __put_user(0xa0, &codemem[1]);
+	err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+	/* Generate instruction:  SYSCALL */
+	err |= __put_user(0x00, &codemem[3]);
+	err |= __put_user(0x50, &codemem[4]);
+	err |= __put_user(0x00, &codemem[5]);
+#else
+# error Must use compiler for Xtensa processors.
+#endif
+
+	/* Flush generated code out of the data cache */
+
+	if (err == 0) {
+		__invalidate_icache_range((unsigned long)codemem, 6UL);
+		__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
+	}
+
+	return err;
+}
+
+
+static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+		       sigset_t *set, struct pt_regs *regs)
+{
+	struct rt_sigframe *frame;
+	int err = 0;
+	int signal;
+	unsigned long sp, ra;
+
+	sp = regs->areg[1];
+
+	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+		sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	frame = (void *)((sp - sizeof(*frame)) & -16ul);
+
+	if (regs->depc > 64)
+		panic ("Double exception sys_sigreturn\n");
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+		goto give_sigsegv;
+	}
+
+	signal = current_thread_info()->exec_domain
+		&& current_thread_info()->exec_domain->signal_invmap
+		&& sig < 32
+		? current_thread_info()->exec_domain->signal_invmap[sig]
+		: sig;
+
+	if (ka->sa.sa_flags & SA_SIGINFO) {
+		err |= copy_siginfo_to_user(&frame->info, info);
+	}
+
+	/* Create the user context.  */
+
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user((void *)current->sas_ss_sp,
+			  &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->areg[1]),
+			  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= setup_sigcontext(frame, regs);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		ra = (unsigned long)ka->sa.sa_restorer;
+	} else {
+
+		/* Create sys_rt_sigreturn syscall in stack frame */
+
+		err |= gen_return_code(frame->retcode);
+
+		if (err) {
+			goto give_sigsegv;
+		}
+		ra = (unsigned long) frame->retcode;
+	}
+
+	/* 
+	 * Create signal handler execution context.
+	 * Return context not modified until this point.
+	 */
+
+	/* Set up registers for signal handler */
+	start_thread(regs, (unsigned long) ka->sa.sa_handler, 
+		     (unsigned long) frame);
+
+	/* Set up a stack frame for a call4
+	 * Note: PS.CALLINC is set to one by start_thread
+	 */
+	regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
+	regs->areg[6] = (unsigned long) signal;
+	regs->areg[7] = (unsigned long) &frame->info;
+	regs->areg[8] = (unsigned long) &frame->uc;
+
+	/* Set access mode to USER_DS.  Nomenclature is outdated, but
+	 * functionality is used in uaccess.h
+	 */
+	set_fs(USER_DS);
+
+#if DEBUG_SIG
+	printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
+		current->comm, current->pid, signal, frame, regs->pc);
+#endif
+
+	return 0;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	return -EFAULT;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage long xtensa_rt_sigsuspend(sigset_t __user *unewset, 
+    				     size_t sigsetsize,
+    				     long a2, long a3, long a4, long a5, 
+				     struct pt_regs *regs)
+{
+	sigset_t saveset, newset;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&newset, unewset, sizeof(newset)))
+		return -EFAULT;
+
+	sigdelsetmask(&newset, ~_BLOCKABLE);
+	saveset = current->blocked;
+	set_current_blocked(&newset);
+
+	regs->areg[2] = -EINTR;
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, 
+				   stack_t __user *uoss,
+    				   long a2, long a3, long a4, long a5,
+				   struct pt_regs *regs)
+{
+	return do_sigaltstack(uss, uoss, regs->areg[1]);
+}
+
+
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+	siginfo_t info;
+	int signr;
+	struct k_sigaction ka;
+
+	if (!user_mode(regs))
+		return 0;
+
+	if (try_to_freeze())
+		goto no_signal;
+
+	if (!oldset)
+		oldset = &current->blocked;
+
+	task_pt_regs(current)->icountlevel = 0;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+	if (signr > 0) {
+		int ret;
+
+		/* Are we from a system call? */
+
+		if ((signed)regs->syscall >= 0) {
+
+			/* If so, check system call restarting.. */
+
+			switch (regs->areg[2]) {
+				case -ERESTARTNOHAND:
+				case -ERESTART_RESTARTBLOCK:
+					regs->areg[2] = -EINTR;
+					break;
+
+				case -ERESTARTSYS:
+					if (!(ka.sa.sa_flags & SA_RESTART)) {
+						regs->areg[2] = -EINTR;
+						break;
+					}
+					/* fallthrough */
+				case -ERESTARTNOINTR:
+					regs->areg[2] = regs->syscall;
+					regs->pc -= 3;
+					break;
+
+				default:
+					/* nothing to do */
+					if (regs->areg[2] != 0)
+					break;
+			}
+		}
+
+		/* Whee!  Actually deliver the signal.  */
+		/* Set up the stack frame */
+		ret = setup_frame(signr, &ka, &info, oldset, regs);
+		if (ret)
+			return ret;
+
+		block_sigmask(&ka, signr);
+		if (current->ptrace & PT_SINGLESTEP)
+			task_pt_regs(current)->icountlevel = 1;
+
+		return 1;
+	}
+
+no_signal:
+	/* Did we come from a system call? */
+	if ((signed) regs->syscall >= 0) {
+		/* Restart the system call - no handlers present */
+		switch (regs->areg[2]) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs->areg[2] = regs->syscall;
+			regs->pc -= 3;
+			break;
+		case -ERESTART_RESTARTBLOCK:
+			regs->areg[2] = __NR_restart_syscall;
+			regs->pc -= 3;
+			break;
+		}
+	}
+	if (current->ptrace & PT_SINGLESTEP)
+		task_pt_regs(current)->icountlevel = 1;
+	return 0;
+}
+
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 00000000..816e6d0d
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,57 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+#include <asm/uaccess.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+
+typedef void (*syscall_t)(void);
+
+syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
+	[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
+
+#undef __SYSCALL
+#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
+#undef _XTENSA_UNISTD_H
+#undef  __KERNEL_SYSCALLS__
+#include <asm/unistd.h>
+};
+
+asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
+{
+	unsigned long ret;
+	long err;
+
+	err = do_shmat(shmid, shmaddr, shmflg, &ret);
+	if (err)
+		return err;
+	return (long)ret;
+}
+
+asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
+{
+	return sys_fadvise64_64(fd, offset, len, advice);
+}
+
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 00000000..ac62f9cf
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,117 @@
+/*
+ * arch/xtensa/kernel/time.c
+ *
+ * Timer and clock support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+
+#include <asm/timex.h>
+#include <asm/platform.h>
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+unsigned long ccount_per_jiffy;		/* per 1/HZ */
+unsigned long nsec_per_ccount;		/* nsec per ccount increment */
+#endif
+
+static cycle_t ccount_read(void)
+{
+	return (cycle_t)get_ccount();
+}
+
+static struct clocksource ccount_clocksource = {
+	.name = "ccount",
+	.rating = 200,
+	.read = ccount_read,
+	.mask = CLOCKSOURCE_MASK(32),
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id);
+static struct irqaction timer_irqaction = {
+	.handler =	timer_interrupt,
+	.flags =	IRQF_DISABLED,
+	.name =		"timer",
+};
+
+void __init time_init(void)
+{
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+	printk("Calibrating CPU frequency ");
+	platform_calibrate_ccount();
+	printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
+			(int)(ccount_per_jiffy/(10000/HZ))%100);
+#endif
+	clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
+
+	/* Initialize the linux timer interrupt. */
+
+	setup_irq(LINUX_TIMER_INT, &timer_irqaction);
+	set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
+}
+
+/*
+ * The timer interrupt is called HZ times per second.
+ */
+
+irqreturn_t timer_interrupt (int irq, void *dev_id)
+{
+
+	unsigned long next;
+
+	next = get_linux_timer();
+
+again:
+	while ((signed long)(get_ccount() - next) > 0) {
+
+		profile_tick(CPU_PROFILING);
+#ifndef CONFIG_SMP
+		update_process_times(user_mode(get_irq_regs()));
+#endif
+
+		xtime_update(1); /* Linux handler in kernel/time/timekeeping */
+
+		/* Note that writing CCOMPARE clears the interrupt. */
+
+		next += CCOUNT_PER_JIFFY;
+		set_linux_timer(next);
+	}
+
+	/* Allow platform to do something useful (Wdog). */
+
+	platform_heartbeat();
+
+	/* Make sure we didn't miss any tick... */
+
+	if ((signed long)(get_ccount() - next) > 0)
+		goto again;
+
+	return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void __cpuinit calibrate_delay(void)
+{
+	loops_per_jiffy = CCOUNT_PER_JIFFY;
+	printk("Calibrating delay loop (skipped)... "
+	       "%lu.%02lu BogoMIPS preset\n",
+	       loops_per_jiffy/(1000000/HZ),
+	       (loops_per_jiffy/(10000/HZ)) % 100);
+}
+#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 00000000..bc1e14cf
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,527 @@
+/*
+ * arch/xtensa/kernel/traps.c
+ *
+ * Exception handling.
+ *
+ * Derived from code with the following copyrights:
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Modified for R3000 by Paul M. Antoine, 1995, 1996
+ * Complete output from die() by Ulf Carlsson, 1998
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Essentially rewritten for the Xtensa architecture port.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel	<chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+
+#include <asm/ptrace.h>
+#include <asm/timex.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_KGDB
+extern int gdb_enter;
+extern int return_from_debug_flag;
+#endif
+
+/*
+ * Machine specific interrupt handlers
+ */
+
+extern void kernel_exception(void);
+extern void user_exception(void);
+
+extern void fast_syscall_kernel(void);
+extern void fast_syscall_user(void);
+extern void fast_alloca(void);
+extern void fast_unaligned(void);
+extern void fast_second_level_miss(void);
+extern void fast_store_prohibited(void);
+extern void fast_coprocessor(void);
+
+extern void do_illegal_instruction (struct pt_regs*);
+extern void do_interrupt (struct pt_regs*);
+extern void do_unaligned_user (struct pt_regs*);
+extern void do_multihit (struct pt_regs*, unsigned long);
+extern void do_page_fault (struct pt_regs*, unsigned long);
+extern void do_debug (struct pt_regs*);
+extern void system_call (struct pt_regs*);
+
+/*
+ * The vector table must be preceded by a save area (which
+ * implies it must be in RAM, unless one places RAM immediately
+ * before a ROM and puts the vector at the start of the ROM (!))
+ */
+
+#define KRNL		0x01
+#define USER		0x02
+
+#define COPROCESSOR(x)							\
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+
+typedef struct {
+	int cause;
+	int fast;
+	void* handler;
+} dispatch_init_table_t;
+
+static dispatch_init_table_t __initdata dispatch_init_table[] = {
+
+{ EXCCAUSE_ILLEGAL_INSTRUCTION,	0,	   do_illegal_instruction},
+{ EXCCAUSE_SYSTEM_CALL,		KRNL,	   fast_syscall_kernel },
+{ EXCCAUSE_SYSTEM_CALL,		USER,	   fast_syscall_user },
+{ EXCCAUSE_SYSTEM_CALL,		0,	   system_call },
+/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ EXCCAUSE_LEVEL1_INTERRUPT,	0,	   do_interrupt },
+{ EXCCAUSE_ALLOCA,		USER|KRNL, fast_alloca },
+/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+/* EXCCAUSE_PRIVILEGED unhandled */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef CONFIG_UNALIGNED_USER
+{ EXCCAUSE_UNALIGNED,		USER,	   fast_unaligned },
+#else
+{ EXCCAUSE_UNALIGNED,		0,	   do_unaligned_user },
+#endif
+{ EXCCAUSE_UNALIGNED,		KRNL,	   fast_unaligned },
+#endif
+#ifdef CONFIG_MMU
+{ EXCCAUSE_ITLB_MISS,		0,	   do_page_fault },
+{ EXCCAUSE_ITLB_MISS,		USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MULTIHIT,		0,	   do_multihit },
+{ EXCCAUSE_ITLB_PRIVILEGE,	0,	   do_page_fault },
+/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+{ EXCCAUSE_DTLB_MISS,		USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS,		0,	   do_page_fault },
+{ EXCCAUSE_DTLB_MULTIHIT,		0,	   do_multihit },
+{ EXCCAUSE_DTLB_PRIVILEGE,	0,	   do_page_fault },
+/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+#endif /* CONFIG_MMU */
+/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
+#if XTENSA_HAVE_COPROCESSOR(0)
+COPROCESSOR(0),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(1)
+COPROCESSOR(1),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(2)
+COPROCESSOR(2),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(3)
+COPROCESSOR(3),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(4)
+COPROCESSOR(4),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(5)
+COPROCESSOR(5),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(6)
+COPROCESSOR(6),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(7)
+COPROCESSOR(7),
+#endif
+{ EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
+{ -1, -1, 0 }
+
+};
+
+/* The exception table <exc_table> serves two functions:
+ * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
+ * 2. it is a temporary memory buffer for the exception handlers.
+ */
+
+unsigned long exc_table[EXC_TABLE_SIZE/4];
+
+void die(const char*, struct pt_regs*, long);
+
+static inline void
+__die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+	if (!user_mode(regs))
+		die(str, regs, err);
+}
+
+/*
+ * Unhandled Exceptions. Kill user task or panic if in kernel space.
+ */
+
+void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+{
+	__die_if_kernel("Caught unhandled exception - should not happen",
+	    		regs, SIGKILL);
+
+	/* If in user mode, send SIGILL signal to current process */
+	printk("Caught unhandled exception in '%s' "
+	       "(pid = %d, pc = %#010lx) - should not happen\n"
+	       "\tEXCCAUSE is %ld\n",
+	       current->comm, task_pid_nr(current), regs->pc, exccause);
+	force_sig(SIGILL, current);
+}
+
+/*
+ * Multi-hit exception. This if fatal!
+ */
+
+void do_multihit(struct pt_regs *regs, unsigned long exccause)
+{
+	die("Caught multihit exception", regs, SIGKILL);
+}
+
+/*
+ * Level-1 interrupt.
+ * We currently have no priority encoding.
+ */
+
+unsigned long ignored_level1_interrupts;
+extern void do_IRQ(int, struct pt_regs *);
+
+void do_interrupt (struct pt_regs *regs)
+{
+	unsigned long intread = get_sr (INTREAD);
+	unsigned long intenable = get_sr (INTENABLE);
+	int i, mask;
+
+	/* Handle all interrupts (no priorities).
+	 * (Clear the interrupt before processing, in case it's
+	 *  edge-triggered or software-generated)
+	 */
+
+	for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
+		if (mask & (intread & intenable)) {
+			set_sr (mask, INTCLEAR);
+			do_IRQ (i,regs);
+		}
+	}
+}
+
+/*
+ * Illegal instruction. Fatal if in kernel space.
+ */
+
+void
+do_illegal_instruction(struct pt_regs *regs)
+{
+	__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
+
+	/* If in user mode, send SIGILL signal to current process. */
+
+	printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+	    current->comm, task_pid_nr(current), regs->pc);
+	force_sig(SIGILL, current);
+}
+
+
+/*
+ * Handle unaligned memory accesses from user space. Kill task.
+ *
+ * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
+ * accesses causes from user space.
+ */
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifndef CONFIG_UNALIGNED_USER
+void
+do_unaligned_user (struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	__die_if_kernel("Unhandled unaligned exception in kernel",
+	    		regs, SIGKILL);
+
+	current->thread.bad_vaddr = regs->excvaddr;
+	current->thread.error_code = -3;
+	printk("Unaligned memory access to %08lx in '%s' "
+	       "(pid = %d, pc = %#010lx)\n",
+	       regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRALN;
+	info.si_addr = (void *) regs->excvaddr;
+	force_sig_info(SIGSEGV, &info, current);
+
+}
+#endif
+#endif
+
+void
+do_debug(struct pt_regs *regs)
+{
+#ifdef CONFIG_KGDB
+	/* If remote debugging is configured AND enabled, we give control to
+	 * kgdb.  Otherwise, we fall through, perhaps giving control to the
+	 * native debugger.
+	 */
+
+	if (gdb_enter) {
+		extern void gdb_handle_exception(struct pt_regs *);
+		gdb_handle_exception(regs);
+		return_from_debug_flag = 1;
+		return;
+	}
+#endif
+
+	__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
+
+	/* If in user mode, send SIGTRAP signal to current process */
+
+	force_sig(SIGTRAP, current);
+}
+
+
+/*
+ * Initialize dispatch tables.
+ *
+ * The exception vectors are stored compressed the __init section in the
+ * dispatch_init_table. This function initializes the following three tables
+ * from that compressed table:
+ * - fast user		first dispatch table for user exceptions
+ * - fast kernel	first dispatch table for kernel exceptions
+ * - default C-handler	C-handler called by the default fast handler.
+ *
+ * See vectors.S for more details.
+ */
+
+#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
+
+void __init trap_init(void)
+{
+	int i;
+
+	/* Setup default vectors. */
+
+	for(i = 0; i < 64; i++) {
+		set_handler(EXC_TABLE_FAST_USER/4   + i, user_exception);
+		set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
+		set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
+	}
+
+	/* Setup specific handlers. */
+
+	for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
+
+		int fast = dispatch_init_table[i].fast;
+		int cause = dispatch_init_table[i].cause;
+		void *handler = dispatch_init_table[i].handler;
+
+		if (fast == 0)
+			set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
+		if (fast && fast & USER)
+			set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
+		if (fast && fast & KRNL)
+			set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
+	}
+
+	/* Initialize EXCSAVE_1 to hold the address of the exception table. */
+
+	i = (unsigned long)exc_table;
+	__asm__ __volatile__("wsr  %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
+}
+
+/*
+ * This function dumps the current valid window frame and other base registers.
+ */
+
+void show_regs(struct pt_regs * regs)
+{
+	int i, wmask;
+
+	wmask = regs->wmask & ~1;
+
+	for (i = 0; i < 16; i++) {
+		if ((i % 8) == 0)
+			printk(KERN_INFO "a%02d:", i);
+		printk(KERN_CONT " %08lx", regs->areg[i]);
+	}
+	printk(KERN_CONT "\n");
+
+	printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+	       regs->pc, regs->ps, regs->depc, regs->excvaddr);
+	printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+	       regs->lbeg, regs->lend, regs->lcount, regs->sar);
+	if (user_mode(regs))
+		printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+		       regs->windowbase, regs->windowstart, regs->wmask,
+		       regs->syscall);
+}
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+	unsigned long *sp;
+
+	if (!task || task == current)
+		__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+	else
+		sp = (unsigned long *)task->thread.sp;
+
+	return sp;
+}
+
+static inline void spill_registers(void)
+{
+	unsigned int a0, ps;
+
+	__asm__ __volatile__ (
+		"movi	a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
+		"mov	a12, a0\n\t"
+		"rsr	a13," __stringify(SAR) "\n\t"
+		"xsr	a14," __stringify(PS) "\n\t"
+		"movi	a0, _spill_registers\n\t"
+		"rsync\n\t"
+		"callx0 a0\n\t"
+		"mov	a0, a12\n\t"
+		"wsr	a13," __stringify(SAR) "\n\t"
+		"wsr	a14," __stringify(PS) "\n\t"
+		:: "a" (&a0), "a" (&ps)
+		: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
+}
+
+void show_trace(struct task_struct *task, unsigned long *sp)
+{
+	unsigned long a0, a1, pc;
+	unsigned long sp_start, sp_end;
+
+	if (sp)
+		a1 = (unsigned long)sp;
+	else
+		a1 = (unsigned long)stack_pointer(task);
+
+	sp_start = a1 & ~(THREAD_SIZE-1);
+	sp_end = sp_start + THREAD_SIZE;
+
+	printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+	printk("\n");
+#endif
+	spill_registers();
+
+	while (a1 > sp_start && a1 < sp_end) {
+		sp = (unsigned long*)a1;
+
+		a0 = *(sp - 4);
+		a1 = *(sp - 3);
+
+		if (a1 <= (unsigned long) sp)
+			break;
+
+		pc = MAKE_PC_FROM_RA(a0, a1);
+
+		if (kernel_text_address(pc)) {
+			printk(" [<%08lx>] ", pc);
+			print_symbol("%s\n", pc);
+		}
+	}
+	printk("\n");
+}
+
+/*
+ * This routine abuses get_user()/put_user() to reference pointers
+ * with at least a bit of error checking ...
+ */
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	int i = 0;
+	unsigned long *stack;
+
+	if (!sp)
+		sp = stack_pointer(task);
+ 	stack = sp;
+
+	printk("\nStack: ");
+
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (kstack_end(sp))
+			break;
+		if (i && ((i % 8) == 0))
+			printk("\n       ");
+		printk("%08lx ", *sp++);
+	}
+	printk("\n");
+	show_trace(task, stack);
+}
+
+void dump_stack(void)
+{
+	show_stack(current, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+
+void show_code(unsigned int *pc)
+{
+	long i;
+
+	printk("\nCode:");
+
+	for(i = -3 ; i < 6 ; i++) {
+		unsigned long insn;
+		if (__get_user(insn, pc + i)) {
+			printk(" (Bad address in pc)\n");
+			break;
+		}
+		printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
+	}
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+	static int die_counter;
+	int nl = 0;
+
+	console_verbose();
+	spin_lock_irq(&die_lock);
+
+	printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+	printk("PREEMPT ");
+	nl = 1;
+#endif
+	if (nl)
+		printk("\n");
+	show_regs(regs);
+	if (!user_mode(regs))
+		show_stack(NULL, (unsigned long*)regs->areg[1]);
+
+	add_taint(TAINT_DIE);
+	spin_unlock_irq(&die_lock);
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+
+	if (panic_on_oops)
+		panic("Fatal exception");
+
+	do_exit(err);
+}
+
+
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 00000000..70066e35
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,468 @@
+/*
+ * arch/xtensa/kernel/vectors.S
+ *
+ * This file contains all exception vectors (user, kernel, and double),
+ * as well as the window vectors (overflow and underflow), and the debug
+ * vector. These are the primary vectors executed by the processor if an
+ * exception occurs.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica, Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+/*
+ * We use a two-level table approach. The user and kernel exception vectors
+ * use a first-level dispatch table to dispatch the exception to a registered
+ * fast handler or the default handler, if no fast handler was registered.
+ * The default handler sets up a C-stack and dispatches the exception to a
+ * registerd C handler in the second-level dispatch table.
+ *
+ * Fast handler entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original value in depc
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ * The value for PT_DEPC saved to stack also functions as a boolean to
+ * indicate that the exception is either a double or a regular exception:
+ *
+ *   PT_DEPC	>= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
+ *		<  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note:  Neither the kernel nor the user exception handler generate literals.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#define WINDOW_VECTORS_SIZE   0x180
+
+
+/*
+ * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
+ *
+ * We get here when an exception occurred while we were in userland.
+ * We switch to the kernel stack and jump to the first level handler
+ * associated to the exception cause.
+ *
+ * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
+ *       decremented by PT_USER_SIZE.
+ */
+
+	.section .UserExceptionVector.text, "ax"
+
+ENTRY(_UserExceptionVector)
+
+	xsr	a3, EXCSAVE_1		# save a3 and get dispatch table
+	wsr	a2, DEPC		# save a2
+	l32i	a2, a3, EXC_TABLE_KSTK	# load kernel stack to a2
+	s32i	a0, a2, PT_AREG0	# save a0 to ESF
+	rsr	a0, EXCCAUSE		# retrieve exception cause
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3		# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_USER	# load handler
+	jx	a0
+
+/*
+ * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
+ *
+ * We get this exception when we were already in kernel space.
+ * We decrement the current stack pointer (kernel) by PT_SIZE and
+ * jump to the first-level handler associated with the exception cause.
+ *
+ * Note: we need to preserve space for the spill region.
+ */
+
+	.section .KernelExceptionVector.text, "ax"
+
+ENTRY(_KernelExceptionVector)
+
+	xsr	a3, EXCSAVE_1		# save a3, and get dispatch table
+	wsr	a2, DEPC		# save a2
+	addi	a2, a1, -16-PT_SIZE	# adjust stack pointer
+	s32i	a0, a2, PT_AREG0	# save a0 to ESF
+	rsr	a0, EXCCAUSE		# retrieve exception cause
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3		# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_KERNEL	# load handler address
+	jx	a0
+
+
+/*
+ * Double exception vector (Exceptions with PS.EXCM == 1)
+ * We get this exception when another exception occurs while were are
+ * already in an exception, such as window overflow/underflow exception,
+ * or 'expected' exceptions, for example memory exception when we were trying
+ * to read data from an invalid address in user space.
+ *
+ * Note that this vector is never invoked for level-1 interrupts, because such
+ * interrupts are disabled (masked) when PS.EXCM is set.
+ *
+ * We decode the exception and take the appropriate action.  However, the
+ * double exception vector is much more careful, because a lot more error
+ * cases go through the double exception vector than through the user and
+ * kernel exception vectors.
+ *
+ * Occasionally, the kernel expects a double exception to occur.  This usually
+ * happens when accessing user-space memory with the user's permissions
+ * (l32e/s32e instructions).  The kernel state, though, is not always suitable
+ * for immediate transfer of control to handle_double, where "normal" exception
+ * processing occurs. Also in kernel mode, TLB misses can occur if accessing
+ * vmalloc memory, possibly requiring repair in a double exception handler.
+ *
+ * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
+ * a boolean variable and a pointer to a fixup routine. If the variable
+ * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
+ * zero indicates to use the default kernel/user exception handler.
+ * There is only one exception, when the value is identical to the exc_table
+ * label, the kernel is in trouble. This mechanism is used to protect critical
+ * sections, mainly when the handler writes to the stack to assert the stack
+ * pointer is valid. Once the fixup/default handler leaves that area, the
+ * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
+ *
+ * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
+ * nonzero address of a fixup routine before it could cause a double exception
+ * and reset it before it returns.
+ *
+ * Some other things to take care of when a fast exception handler doesn't
+ * specify a particular fixup handler but wants to use the default handlers:
+ *
+ *  - The original stack pointer (in a1) must not be modified. The fast
+ *    exception handler should only use a2 as the stack pointer.
+ *
+ *  - If the fast handler manipulates the stack pointer (in a2), it has to
+ *    register a valid fixup handler and cannot use the default handlers.
+ *
+ *  - The handler can use any other generic register from a3 to a15, but it
+ *    must save the content of these registers to stack (PT_AREG3...PT_AREGx)
+ *
+ *  - These registers must be saved before a double exception can occur.
+ *
+ *  - If we ever implement handling signals while in double exceptions, the
+ *    number of registers a fast handler has saved (excluding a0 and a1) must
+ *    be written to  PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
+ *
+ * The fixup handlers are special handlers:
+ *
+ *  - Fixup entry conditions differ from regular exceptions:
+ *
+ *	a0:	   DEPC
+ *	a1: 	   a1
+ *	a2:	   trashed, original value in EXC_TABLE_DOUBLE_A2
+ *	a3:	   exctable
+ *	depc:	   a0
+ *	excsave_1: a3
+ *
+ *  - When the kernel enters the fixup handler, it still assumes it is in a
+ *    critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
+ *    The fixup handler, therefore, has to re-register itself as the fixup
+ *    handler before it returns from the double exception.
+ *
+ *  - Fixup handler can share the same exception frame with the fast handler.
+ *    The kernel stack pointer is not changed when entering the fixup handler.
+ *
+ *  - Fixup handlers can jump to the default kernel and user exception
+ *    handlers. Before it jumps, though, it has to setup a exception frame
+ *    on stack. Because the default handler resets the register fixup handler
+ *    the fixup handler must make sure that the default handler returns to
+ *    it instead of the exception address, so it can re-register itself as
+ *    the fixup handler.
+ *
+ * In case of a critical condition where the kernel cannot recover, we jump
+ * to unrecoverable_exception with the following entry conditions.
+ * All registers a0...a15 are unchanged from the last exception, except:
+ *
+ *	a0:	   last address before we jumped to the unrecoverable_exception.
+ *	excsave_1: a0
+ *
+ *
+ * See the handle_alloca_user and spill_registers routines for example clients.
+ *
+ * FIXME: Note: we currently don't allow signal handling coming from a double
+ *        exception, so the item markt with (*) is not required.
+ */
+
+	.section .DoubleExceptionVector.text, "ax"
+	.begin literal_prefix .DoubleExceptionVector
+
+ENTRY(_DoubleExceptionVector)
+
+	/* Deliberately destroy excsave (don't assume it's value was valid). */
+
+	wsr	a3, EXCSAVE_1		# save a3
+
+	/* Check for kernel double exception (usually fatal). */
+
+	rsr	a3, PS
+	_bbci.l	a3, PS_UM_BIT, .Lksp
+
+	/* Check if we are currently handling a window exception. */
+	/* Note: We don't need to indicate that we enter a critical section. */
+
+	xsr	a0, DEPC		# get DEPC, save a0
+
+	movi	a3, XCHAL_WINDOW_VECTORS_VADDR
+	_bltu	a0, a3, .Lfixup
+	addi	a3, a3, WINDOW_VECTORS_SIZE
+	_bgeu	a0, a3, .Lfixup
+
+	/* Window overflow/underflow exception. Get stack pointer. */
+
+	mov	a3, a2
+	movi	a2, exc_table
+	l32i	a2, a2, EXC_TABLE_KSTK
+
+	/* Check for overflow/underflow exception, jump if overflow. */
+
+	_bbci.l	a0, 6, .Lovfl
+
+	/* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3  */
+
+	/* Restart window underflow exception.
+	 * We return to the instruction in user space that caused the window
+	 * underflow exception. Therefore, we change window base to the value
+	 * before we entered the window underflow exception and prepare the
+	 * registers to return as if we were coming from a regular exception
+	 * by changing depc (in a0).
+	 * Note: We can trash the current window frame (a0...a3) and depc!
+	 */
+
+	wsr	a2, DEPC		# save stack pointer temporarily
+	rsr	a0, PS
+	extui	a0, a0, PS_OWB_SHIFT, 4
+	wsr	a0, WINDOWBASE
+	rsync
+
+	/* We are now in the previous window frame. Save registers again. */
+
+	xsr	a2, DEPC		# save a2 and get stack pointer
+	s32i	a0, a2, PT_AREG0
+
+	wsr	a3, EXCSAVE_1		# save a3
+	movi	a3, exc_table
+
+	rsr	a0, EXCCAUSE
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3
+	l32i	a0, a0, EXC_TABLE_FAST_USER
+	jx	a0
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+	/* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
+
+	movi	a3, exc_table
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# temporary variable
+
+	/* Enter critical section. */
+
+	l32i	a2, a3, EXC_TABLE_FIXUP
+	s32i	a3, a3, EXC_TABLE_FIXUP
+	beq	a2, a3, .Lunrecoverable_fixup	# critical!
+	beqz	a2, .Ldflt			# no handler was registered
+
+	/* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
+
+	jx	a2
+
+.Ldflt:	/* Get stack pointer. */
+
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+	addi	a2, a3, -PT_USER_SIZE
+
+.Lovfl:	/* Jump to default handlers. */
+
+	/* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+
+	xsr	a3, DEPC
+	s32i	a0, a2, PT_DEPC
+	s32i	a3, a2, PT_AREG0
+
+	/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
+
+	movi	a3, exc_table
+	rsr	a0, EXCCAUSE
+	addx4	a0, a0, a3
+	l32i	a0, a0, EXC_TABLE_FAST_USER
+	jx	a0
+
+	/*
+	 * We only allow the ITLB miss exception if we are in kernel space.
+	 * All other exceptions are unexpected and thus unrecoverable!
+	 */
+
+#ifdef CONFIG_MMU
+	.extern fast_second_level_miss_double_kernel
+
+.Lksp:	/* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+	rsr	a3, EXCCAUSE
+	beqi	a3, EXCCAUSE_ITLB_MISS, 1f
+	addi	a3, a3, -EXCCAUSE_DTLB_MISS
+	bnez	a3, .Lunrecoverable
+1:	movi	a3, fast_second_level_miss_double_kernel
+	jx	a3
+#else
+.equ	.Lksp,	.Lunrecoverable
+#endif
+
+	/* Critical! We can't handle this situation. PANIC! */
+
+	.extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a0, DEPC
+
+.Lunrecoverable:
+	rsr	a3, EXCSAVE_1
+	wsr	a0, EXCSAVE_1
+	movi	a0, unrecoverable_exception
+	callx0	a0
+
+	.end literal_prefix
+
+
+/*
+ * Debug interrupt vector
+ *
+ * There is not much space here, so simply jump to another handler.
+ * EXCSAVE[DEBUGLEVEL] has been set to that handler.
+ */
+
+	.section .DebugInterruptVector.text, "ax"
+
+ENTRY(_DebugInterruptVector)
+	xsr	a0, EXCSAVE + XCHAL_DEBUGLEVEL
+	jx	a0
+
+
+
+/* Window overflow and underflow handlers.
+ * The handlers must be 64 bytes apart, first starting with the underflow
+ * handlers underflow-4 to underflow-12, then the overflow handlers
+ * overflow-4 to overflow-12.
+ *
+ * Note: We rerun the underflow handlers if we hit an exception, so
+ *	 we try to access any page that would cause a page fault early.
+ */
+
+	.section		.WindowVectors.text, "ax"
+
+
+/* 4-Register Window Overflow Vector (Handler) */
+
+	.align 64
+.global _WindowOverflow4
+_WindowOverflow4:
+	s32e	a0, a5, -16
+	s32e	a1, a5, -12
+	s32e	a2, a5,  -8
+	s32e	a3, a5,  -4
+	rfwo
+
+
+/* 4-Register Window Underflow Vector (Handler) */
+
+	.align 64
+.global _WindowUnderflow4
+_WindowUnderflow4:
+	l32e	a0, a5, -16
+	l32e	a1, a5, -12
+	l32e	a2, a5,  -8
+	l32e	a3, a5,  -4
+	rfwu
+
+
+/* 8-Register Window Overflow Vector (Handler) */
+
+	.align 64
+.global _WindowOverflow8
+_WindowOverflow8:
+	s32e	a0, a9, -16
+	l32e	a0, a1, -12
+	s32e	a2, a9,  -8
+	s32e	a1, a9, -12
+	s32e	a3, a9,  -4
+	s32e	a4, a0, -32
+	s32e	a5, a0, -28
+	s32e	a6, a0, -24
+	s32e	a7, a0, -20
+	rfwo
+
+/* 8-Register Window Underflow Vector (Handler) */
+
+	.align 64
+.global _WindowUnderflow8
+_WindowUnderflow8:
+	l32e	a1, a9, -12
+	l32e	a0, a9, -16
+	l32e	a7, a1, -12
+	l32e	a2, a9,  -8
+	l32e	a4, a7, -32
+	l32e	a3, a9,  -4
+	l32e	a5, a7, -28
+	l32e	a6, a7, -24
+	l32e	a7, a7, -20
+	rfwu
+
+
+/* 12-Register Window Overflow Vector (Handler) */
+
+	.align 64
+.global _WindowOverflow12
+_WindowOverflow12:
+	s32e	a0,  a13, -16
+	l32e	a0,  a1,  -12
+	s32e	a1,  a13, -12
+	s32e	a2,  a13,  -8
+	s32e	a3,  a13,  -4
+	s32e	a4,  a0,  -48
+	s32e	a5,  a0,  -44
+	s32e	a6,  a0,  -40
+	s32e	a7,  a0,  -36
+	s32e	a8,  a0,  -32
+	s32e	a9,  a0,  -28
+	s32e	a10, a0,  -24
+	s32e	a11, a0,  -20
+	rfwo
+
+/* 12-Register Window Underflow Vector (Handler) */
+
+	.align 64
+.global _WindowUnderflow12
+_WindowUnderflow12:
+	l32e	a1,  a13, -12
+	l32e	a0,  a13, -16
+	l32e	a11, a1,  -12
+	l32e	a2,  a13,  -8
+	l32e	a4,  a11, -48
+	l32e	a8,  a11, -32
+	l32e	a3,  a13,  -4
+	l32e	a5,  a11, -44
+	l32e	a6,  a11, -40
+	l32e	a7,  a11, -36
+	l32e	a9,  a11, -28
+	l32e	a10, a11, -24
+	l32e	a11, a11, -20
+	rfwu
+
+	.text
+
+
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 00000000..88ecea3f
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,272 @@
+/*
+ * arch/xtensa/kernel/vmlinux.lds.S
+ *
+ * Xtensa linker script
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#include <variant/core.h>
+#include <platform/hardware.h>
+OUTPUT_ARCH(xtensa)
+ENTRY(_start)
+
+#ifdef __XTENSA_EB__
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+#ifndef KERNELOFFSET
+#define KERNELOFFSET 0xd0001000
+#endif
+
+/* Note: In the following macros, it would be nice to specify only the
+   vector name and section kind and construct "sym" and "section" using
+   CPP concatenation, but that does not work reliably.  Concatenating a
+   string with "." produces an invalid token.  CPP will not print a
+   warning because it thinks this is an assembly file, but it leaves
+   them as multiple tokens and there may or may not be whitespace
+   between them.  */
+
+/* Macro for a relocation entry */
+
+#define RELOCATE_ENTRY(sym, section)		\
+	LONG(sym ## _start);			\
+	LONG(sym ## _end);			\
+	LONG(LOADADDR(section))
+
+/* Macro to define a section for a vector.
+ *
+ * Use of the MIN function catches the types of errors illustrated in
+ * the following example:
+ *
+ * Assume the section .DoubleExceptionVector.literal is completely
+ * full.  Then a programmer adds code to .DoubleExceptionVector.text
+ * that produces another literal.  The final literal position will
+ * overlay onto the first word of the adjacent code section
+ * .DoubleExceptionVector.text.  (In practice, the literals will
+ * overwrite the code, and the first few instructions will be
+ * garbage.)
+ */
+
+#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec)       \
+  section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size,		    \
+		         LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3)   \
+  {									    \
+    . = ALIGN(4);							    \
+    sym ## _start = ABSOLUTE(.);		 			    \
+    *(section)								    \
+    sym ## _end = ABSOLUTE(.);						    \
+  }
+
+/*
+ *  Mapping of input sections to output sections when linking.
+ */
+
+SECTIONS
+{
+  . = KERNELOFFSET;
+  /* .text section */
+
+  _text = .;
+  _stext = .;
+  _ftext = .;
+
+  .text :
+  {
+    /* The HEAD_TEXT section must be the first section! */
+    HEAD_TEXT
+    TEXT_TEXT
+    VMLINUX_SYMBOL(__sched_text_start) = .;
+    *(.sched.literal .sched.text)
+    VMLINUX_SYMBOL(__sched_text_end) = .;
+    VMLINUX_SYMBOL(__lock_text_start) = .;
+    *(.spinlock.literal .spinlock.text)
+    VMLINUX_SYMBOL(__lock_text_end) = .;
+
+  }
+  _etext = .;
+  PROVIDE (etext = .);
+
+  . = ALIGN(16);
+
+  RODATA
+
+  /*  Relocation table */
+
+  .fixup   : { *(.fixup) }
+
+  EXCEPTION_TABLE(16)
+  /* Data section */
+
+  _fdata = .;
+  RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+  _edata = .;
+
+  /* Initialization code and data: */
+
+  . = ALIGN(PAGE_SIZE);
+  __init_begin = .;
+  INIT_TEXT_SECTION(PAGE_SIZE)
+
+  .init.data :
+  {
+    INIT_DATA
+    . = ALIGN(0x4);
+    __tagtable_begin = .;
+    *(.taglist)
+    __tagtable_end = .;
+
+    . = ALIGN(16);
+    __boot_reloc_table_start = ABSOLUTE(.);
+
+    RELOCATE_ENTRY(_WindowVectors_text,
+		   .WindowVectors.text);
+    RELOCATE_ENTRY(_KernelExceptionVector_text,
+		   .KernelExceptionVector.text);
+    RELOCATE_ENTRY(_UserExceptionVector_text,
+		   .UserExceptionVector.text);
+    RELOCATE_ENTRY(_DoubleExceptionVector_literal,
+		   .DoubleExceptionVector.literal);
+    RELOCATE_ENTRY(_DoubleExceptionVector_text,
+		   .DoubleExceptionVector.text);
+    RELOCATE_ENTRY(_DebugInterruptVector_text,
+		   .DebugInterruptVector.text);
+  
+    __boot_reloc_table_end = ABSOLUTE(.) ;
+
+    INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+    INIT_CALLS
+    CON_INITCALL
+    SECURITY_INITCALL
+    INIT_RAM_FS
+  }
+
+  PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
+
+  /* We need this dummy segment here */
+
+  . = ALIGN(4);
+  .dummy : { LONG(0) }
+
+  /* The vectors are relocated to the real position at startup time */
+
+  SECTION_VECTOR (_WindowVectors_text,
+		  .WindowVectors.text,
+		  XCHAL_WINDOW_VECTORS_VADDR, 4,
+		  .dummy)
+  SECTION_VECTOR (_DebugInterruptVector_literal,
+		  .DebugInterruptVector.literal,
+		  XCHAL_DEBUG_VECTOR_VADDR - 4,
+		  SIZEOF(.WindowVectors.text),
+		  .WindowVectors.text)
+  SECTION_VECTOR (_DebugInterruptVector_text,
+		  .DebugInterruptVector.text,
+		  XCHAL_DEBUG_VECTOR_VADDR,
+		  4,
+		  .DebugInterruptVector.literal)
+  SECTION_VECTOR (_KernelExceptionVector_literal,
+		  .KernelExceptionVector.literal,
+		  XCHAL_KERNEL_VECTOR_VADDR - 4,
+		  SIZEOF(.DebugInterruptVector.text),
+		  .DebugInterruptVector.text)
+  SECTION_VECTOR (_KernelExceptionVector_text,
+		  .KernelExceptionVector.text,
+		  XCHAL_KERNEL_VECTOR_VADDR,
+		  4,
+		  .KernelExceptionVector.literal)
+  SECTION_VECTOR (_UserExceptionVector_literal,
+		  .UserExceptionVector.literal,
+		  XCHAL_USER_VECTOR_VADDR - 4,
+		  SIZEOF(.KernelExceptionVector.text),
+		  .KernelExceptionVector.text)
+  SECTION_VECTOR (_UserExceptionVector_text,
+		  .UserExceptionVector.text,
+		  XCHAL_USER_VECTOR_VADDR,
+		  4,
+		  .UserExceptionVector.literal)
+  SECTION_VECTOR (_DoubleExceptionVector_literal,
+		  .DoubleExceptionVector.literal,
+		  XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+		  SIZEOF(.UserExceptionVector.text),
+		  .UserExceptionVector.text)
+  SECTION_VECTOR (_DoubleExceptionVector_text,
+		  .DoubleExceptionVector.text,
+		  XCHAL_DOUBLEEXC_VECTOR_VADDR,
+		  32,
+		  .DoubleExceptionVector.literal)
+
+  . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+  . = ALIGN(PAGE_SIZE);
+
+  __init_end = .;
+
+  BSS_SECTION(0, 8192, 0)
+
+  _end = .;
+
+  /* only used by the boot loader  */
+
+  . = ALIGN(0x10);
+  .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
+
+  . = ALIGN(0x1000);
+  __initrd_start = .;
+  .initrd : { *(.initrd) }
+  __initrd_end = .;
+
+  .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+  {
+    *(.ResetVector.text)
+  }
+
+  .xt.lit : { *(.xt.lit) }
+  .xt.prop : { *(.xt.prop) }
+
+  .debug  0 :  { *(.debug) }
+  .line  0 :  { *(.line) }
+  .debug_srcinfo  0 :  { *(.debug_srcinfo) }
+  .debug_sfnames  0 :  { *(.debug_sfnames) }
+  .debug_aranges  0 :  { *(.debug_aranges) }
+  .debug_pubnames  0 :  { *(.debug_pubnames) }
+  .debug_info  0 :  { *(.debug_info) }
+  .debug_abbrev  0 :  { *(.debug_abbrev) }
+  .debug_line  0 :  { *(.debug_line) }
+  .debug_frame  0 :  { *(.debug_frame) }
+  .debug_str  0 :  { *(.debug_str) }
+  .debug_loc  0 :  { *(.debug_loc) }
+  .debug_macinfo  0 :  { *(.debug_macinfo) }
+  .debug_weaknames  0 :  { *(.debug_weaknames) }
+  .debug_funcnames  0 :  { *(.debug_funcnames) }
+  .debug_typenames  0 :  { *(.debug_typenames) }
+  .debug_varnames  0 :  { *(.debug_varnames) }
+
+  .xt.insn 0 :
+  {
+    *(.xt.insn)
+    *(.gnu.linkonce.x*)
+  }
+
+  .xt.lit 0 :
+  {
+    *(.xt.lit)
+    *(.gnu.linkonce.p*)
+  }
+
+  /* Sections to be discarded */
+  DISCARDS
+  /DISCARD/ : { *(.exit.literal) }
+}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 00000000..c9a7c5b7
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,97 @@
+/*
+ * arch/xtensa/kernel/xtensa_ksyms.c
+ *
+ * Export Xtensa-specific functions for loadable modules.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005  Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/in6.h>
+
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#ifdef CONFIG_BLK_DEV_FD
+#include <asm/floppy.h>
+#endif
+#ifdef CONFIG_NET
+#include <net/checksum.h>
+#endif /* CONFIG_NET */
+
+
+/*
+ * String functions
+ */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * gcc internal math functions
+ */
+extern long long __ashrdi3(long long, int);
+extern long long __ashldi3(long long, int);
+extern long long __lshrdi3(long long, int);
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern long long __muldi3(long long, long long);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
+extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
+extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+
+#ifdef CONFIG_NET
+/*
+ * Networking support
+ */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+#endif /* CONFIG_NET */
+
+/*
+ * Architecture-specific symbols
+ */
+EXPORT_SYMBOL(__xtensa_copy_user);
+
+/*
+ * Kernel hacking ...
+ */
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+// FIXME EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
-- 
cgit