summaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile43
-rw-r--r--arch/blackfin/kernel/asm-offsets.c158
-rw-r--r--arch/blackfin/kernel/bfin_dma.c542
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c1326
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c124
-rw-r--r--arch/blackfin/kernel/cplb-mpu/Makefile10
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c102
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c379
-rw-r--r--arch/blackfin/kernel/cplb-nompu/Makefile10
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c194
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c202
-rw-r--r--arch/blackfin/kernel/cplbinfo.c177
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c1889
-rw-r--r--arch/blackfin/kernel/dma-mapping.c149
-rw-r--r--arch/blackfin/kernel/dumpstack.c174
-rw-r--r--arch/blackfin/kernel/early_printk.c272
-rw-r--r--arch/blackfin/kernel/entry.S108
-rw-r--r--arch/blackfin/kernel/exception.c45
-rw-r--r--arch/blackfin/kernel/fixed_code.S155
-rw-r--r--arch/blackfin/kernel/flat.c84
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S225
-rw-r--r--arch/blackfin/kernel/ftrace.c128
-rw-r--r--arch/blackfin/kernel/gptimers.c300
-rw-r--r--arch/blackfin/kernel/init_task.c32
-rw-r--r--arch/blackfin/kernel/ipipe.c397
-rw-r--r--arch/blackfin/kernel/irqchip.c153
-rw-r--r--arch/blackfin/kernel/kgdb.c480
-rw-r--r--arch/blackfin/kernel/kgdb_test.c114
-rw-r--r--arch/blackfin/kernel/module.c289
-rw-r--r--arch/blackfin/kernel/nmi.c287
-rw-r--r--arch/blackfin/kernel/perf_event.c499
-rw-r--r--arch/blackfin/kernel/process.c513
-rw-r--r--arch/blackfin/kernel/pseudodbg.c191
-rw-r--r--arch/blackfin/kernel/ptrace.c407
-rw-r--r--arch/blackfin/kernel/pwm.c100
-rw-r--r--arch/blackfin/kernel/reboot.c112
-rw-r--r--arch/blackfin/kernel/setup.c1369
-rw-r--r--arch/blackfin/kernel/shadow_console.c111
-rw-r--r--arch/blackfin/kernel/signal.c354
-rw-r--r--arch/blackfin/kernel/stacktrace.c53
-rw-r--r--arch/blackfin/kernel/sys_bfin.c88
-rw-r--r--arch/blackfin/kernel/time-ts.c375
-rw-r--r--arch/blackfin/kernel/time.c156
-rw-r--r--arch/blackfin/kernel/trace.c984
-rw-r--r--arch/blackfin/kernel/traps.c582
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S267
46 files changed, 14709 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
new file mode 100644
index 00000000..9a0d6d70
--- /dev/null
+++ b/arch/blackfin/kernel/Makefile
@@ -0,0 +1,43 @@
+#
+# arch/blackfin/kernel/Makefile
+#
+
+extra-y := init_task.o vmlinux.lds
+
+obj-y := \
+ entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
+ sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
+ fixed_code.o reboot.o bfin_gpio.o bfin_dma.o \
+ exception.o dumpstack.o
+
+ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
+ obj-y += time-ts.o
+else
+ obj-y += time.o
+endif
+
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+CFLAGS_REMOVE_ftrace.o = -pg
+
+obj-$(CONFIG_HAVE_PWM) += pwm.o
+obj-$(CONFIG_IPIPE) += ipipe.o
+obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
+obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
+obj-$(CONFIG_NMI_WATCHDOG) += nmi.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_DEBUG_VERBOSE) += trace.o
+obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+
+# the kgdb test puts code into L2 and without linker
+# relaxation, we need to force long calls to/from it
+CFLAGS_kgdb_test.o := -mlong-calls
+
+obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
new file mode 100644
index 00000000..37fcae95
--- /dev/null
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -0,0 +1,158 @@
+/*
+ * generate definitions needed by assembly language modules
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/irq.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/pda.h>
+
+int main(void)
+{
+ /* offsets into the task struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING,
+ offsetof(irq_cpustat_t, __softirq_pending));
+
+ /* offsets into the thread struct */
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+ DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat));
+ DEFINE(PT_SR, offsetof(struct thread_struct, seqstat));
+ DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
+ DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
+ DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
+
+ /* offsets into the pt_regs */
+ DEFINE(PT_ORIG_R0, offsetof(struct pt_regs, orig_r0));
+ DEFINE(PT_ORIG_P0, offsetof(struct pt_regs, orig_p0));
+ DEFINE(PT_ORIG_PC, offsetof(struct pt_regs, orig_pc));
+ DEFINE(PT_R0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_R1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_R2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_R3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_R4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_R5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_R6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_R7, offsetof(struct pt_regs, r7));
+
+ DEFINE(PT_P0, offsetof(struct pt_regs, p0));
+ DEFINE(PT_P1, offsetof(struct pt_regs, p1));
+ DEFINE(PT_P2, offsetof(struct pt_regs, p2));
+ DEFINE(PT_P3, offsetof(struct pt_regs, p3));
+ DEFINE(PT_P4, offsetof(struct pt_regs, p4));
+ DEFINE(PT_P5, offsetof(struct pt_regs, p5));
+
+ DEFINE(PT_FP, offsetof(struct pt_regs, fp));
+ DEFINE(PT_USP, offsetof(struct pt_regs, usp));
+ DEFINE(PT_I0, offsetof(struct pt_regs, i0));
+ DEFINE(PT_I1, offsetof(struct pt_regs, i1));
+ DEFINE(PT_I2, offsetof(struct pt_regs, i2));
+ DEFINE(PT_I3, offsetof(struct pt_regs, i3));
+ DEFINE(PT_M0, offsetof(struct pt_regs, m0));
+ DEFINE(PT_M1, offsetof(struct pt_regs, m1));
+ DEFINE(PT_M2, offsetof(struct pt_regs, m2));
+ DEFINE(PT_M3, offsetof(struct pt_regs, m3));
+ DEFINE(PT_L0, offsetof(struct pt_regs, l0));
+ DEFINE(PT_L1, offsetof(struct pt_regs, l1));
+ DEFINE(PT_L2, offsetof(struct pt_regs, l2));
+ DEFINE(PT_L3, offsetof(struct pt_regs, l3));
+ DEFINE(PT_B0, offsetof(struct pt_regs, b0));
+ DEFINE(PT_B1, offsetof(struct pt_regs, b1));
+ DEFINE(PT_B2, offsetof(struct pt_regs, b2));
+ DEFINE(PT_B3, offsetof(struct pt_regs, b3));
+ DEFINE(PT_A0X, offsetof(struct pt_regs, a0x));
+ DEFINE(PT_A0W, offsetof(struct pt_regs, a0w));
+ DEFINE(PT_A1X, offsetof(struct pt_regs, a1x));
+ DEFINE(PT_A1W, offsetof(struct pt_regs, a1w));
+ DEFINE(PT_LC0, offsetof(struct pt_regs, lc0));
+ DEFINE(PT_LC1, offsetof(struct pt_regs, lc1));
+ DEFINE(PT_LT0, offsetof(struct pt_regs, lt0));
+ DEFINE(PT_LT1, offsetof(struct pt_regs, lt1));
+ DEFINE(PT_LB0, offsetof(struct pt_regs, lb0));
+ DEFINE(PT_LB1, offsetof(struct pt_regs, lb1));
+ DEFINE(PT_ASTAT, offsetof(struct pt_regs, astat));
+ DEFINE(PT_RESERVED, offsetof(struct pt_regs, reserved));
+ DEFINE(PT_RETS, offsetof(struct pt_regs, rets));
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_RETX, offsetof(struct pt_regs, retx));
+ DEFINE(PT_RETN, offsetof(struct pt_regs, retn));
+ DEFINE(PT_RETE, offsetof(struct pt_regs, rete));
+ DEFINE(PT_SEQSTAT, offsetof(struct pt_regs, seqstat));
+ DEFINE(PT_SYSCFG, offsetof(struct pt_regs, syscfg));
+ DEFINE(PT_IPEND, offsetof(struct pt_regs, ipend));
+ DEFINE(SIZEOF_PTREGS, sizeof(struct pt_regs));
+ DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs)); /* Needed by gdb */
+ DEFINE(PT_TEXT_END_ADDR, 4 + sizeof(struct pt_regs));/* Needed by gdb */
+ DEFINE(PT_DATA_ADDR, 8 + sizeof(struct pt_regs)); /* Needed by gdb */
+ DEFINE(PT_FDPIC_EXEC, 12 + sizeof(struct pt_regs)); /* Needed by gdb */
+ DEFINE(PT_FDPIC_INTERP, 16 + sizeof(struct pt_regs));/* Needed by gdb */
+
+ /* signal defines */
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(SIGTRAP, SIGTRAP);
+
+ /* PDA management (in L1 scratchpad) */
+ DEFINE(PDA_SYSCFG, offsetof(struct blackfin_pda, syscfg));
+#ifdef CONFIG_SMP
+ DEFINE(PDA_IRQFLAGS, offsetof(struct blackfin_pda, imask));
+#endif
+ DEFINE(PDA_IPDT, offsetof(struct blackfin_pda, ipdt));
+ DEFINE(PDA_IPDT_SWAPCOUNT, offsetof(struct blackfin_pda, ipdt_swapcount));
+ DEFINE(PDA_DPDT, offsetof(struct blackfin_pda, dpdt));
+ DEFINE(PDA_DPDT_SWAPCOUNT, offsetof(struct blackfin_pda, dpdt_swapcount));
+ DEFINE(PDA_EXIPTR, offsetof(struct blackfin_pda, ex_iptr));
+ DEFINE(PDA_EXOPTR, offsetof(struct blackfin_pda, ex_optr));
+ DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
+ DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
+ DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
+ DEFINE(PDA_EXIPEND, offsetof(struct blackfin_pda, ex_ipend));
+#ifdef ANOMALY_05000261
+ DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
+#endif
+ DEFINE(PDA_DCPLB, offsetof(struct blackfin_pda, dcplb_fault_addr));
+ DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
+ DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
+ DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ DEFINE(PDA_DF_DCPLB, offsetof(struct blackfin_pda, dcplb_doublefault_addr));
+ DEFINE(PDA_DF_ICPLB, offsetof(struct blackfin_pda, icplb_doublefault_addr));
+ DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault));
+ DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault));
+#endif
+
+ /* PDA initial management */
+ DEFINE(PDA_INIT_RETX, offsetof(struct blackfin_initial_pda, retx));
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ DEFINE(PDA_INIT_DF_DCPLB, offsetof(struct blackfin_initial_pda, dcplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_ICPLB, offsetof(struct blackfin_initial_pda, icplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_SEQSTAT, offsetof(struct blackfin_initial_pda, seqstat_doublefault));
+ DEFINE(PDA_INIT_DF_RETX, offsetof(struct blackfin_initial_pda, retx_doublefault));
+#endif
+
+#ifdef CONFIG_SMP
+ /* Inter-core lock (in L2 SRAM) */
+ DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
+#endif
+
+ return 0;
+}
diff --git a/arch/blackfin/kernel/bfin_dma.c b/arch/blackfin/kernel/bfin_dma.c
new file mode 100644
index 00000000..40c2ed61
--- /dev/null
+++ b/arch/blackfin/kernel/bfin_dma.c
@@ -0,0 +1,542 @@
+/*
+ * bfin_dma.c - Blackfin DMA implementation
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <asm/early_printk.h>
+
+/*
+ * To make sure we work around 05000119 - we always check DMA_DONE bit,
+ * never the DMA_RUN bit
+ */
+
+struct dma_channel dma_ch[MAX_DMA_CHANNELS];
+EXPORT_SYMBOL(dma_ch);
+
+static int __init blackfin_dma_init(void)
+{
+ int i;
+
+ printk(KERN_INFO "Blackfin DMA Controller\n");
+
+
+#if ANOMALY_05000480
+ bfin_write_DMAC_TC_PER(0x0111);
+#endif
+
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ atomic_set(&dma_ch[i].chan_status, 0);
+ dma_ch[i].regs = dma_io_base_addr[i];
+ }
+ /* Mark MEMDMA Channel 0 as requested since we're using it internally */
+ request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
+ request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
+
+#if defined(CONFIG_DEB_DMA_URGENT)
+ bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
+ | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
+#endif
+
+ return 0;
+}
+arch_initcall(blackfin_dma_init);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
+ if (dma_channel_active(i))
+ seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
+
+ return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+ .open = proc_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+ return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
+}
+late_initcall(proc_dma_init);
+#endif
+
+static void set_dma_peripheral_map(unsigned int channel, const char *device_id)
+{
+#ifdef CONFIG_BF54x
+ unsigned int per_map;
+
+ switch (channel) {
+ case CH_UART2_RX: per_map = 0xC << 12; break;
+ case CH_UART2_TX: per_map = 0xD << 12; break;
+ case CH_UART3_RX: per_map = 0xE << 12; break;
+ case CH_UART3_TX: per_map = 0xF << 12; break;
+ default: return;
+ }
+
+ if (strncmp(device_id, "BFIN_UART", 9) == 0)
+ dma_ch[channel].regs->peripheral_map = per_map;
+#endif
+}
+
+/**
+ * request_dma - request a DMA channel
+ *
+ * Request the specific DMA channel from the system if it's available.
+ */
+int request_dma(unsigned int channel, const char *device_id)
+{
+ pr_debug("request_dma() : BEGIN\n");
+
+ if (device_id == NULL)
+ printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
+
+#if defined(CONFIG_BF561) && ANOMALY_05000182
+ if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
+ if (get_cclk() > 500000000) {
+ printk(KERN_WARNING
+ "Request IMDMA failed due to ANOMALY 05000182\n");
+ return -EFAULT;
+ }
+ }
+#endif
+
+ if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
+ pr_debug("DMA CHANNEL IN USE\n");
+ return -EBUSY;
+ }
+
+ set_dma_peripheral_map(channel, device_id);
+ dma_ch[channel].device_id = device_id;
+ dma_ch[channel].irq = 0;
+
+ /* This is to be enabled by putting a restriction -
+ * you have to request DMA, before doing any operations on
+ * descriptor/channel
+ */
+ pr_debug("request_dma() : END\n");
+ return 0;
+}
+EXPORT_SYMBOL(request_dma);
+
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
+{
+ int ret;
+ unsigned int irq;
+
+ BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
+ !atomic_read(&dma_ch[channel].chan_status));
+
+ irq = channel2irq(channel);
+ ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
+ if (ret)
+ return ret;
+
+ dma_ch[channel].irq = irq;
+ dma_ch[channel].data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL(set_dma_callback);
+
+/**
+ * clear_dma_buffer - clear DMA fifos for specified channel
+ *
+ * Set the Buffer Clear bit in the Configuration register of specific DMA
+ * channel. This will stop the descriptor based DMA operation.
+ */
+static void clear_dma_buffer(unsigned int channel)
+{
+ dma_ch[channel].regs->cfg |= RESTART;
+ SSYNC();
+ dma_ch[channel].regs->cfg &= ~RESTART;
+}
+
+void free_dma(unsigned int channel)
+{
+ pr_debug("freedma() : BEGIN\n");
+ BUG_ON(channel >= MAX_DMA_CHANNELS ||
+ !atomic_read(&dma_ch[channel].chan_status));
+
+ /* Halt the DMA */
+ disable_dma(channel);
+ clear_dma_buffer(channel);
+
+ if (dma_ch[channel].irq)
+ free_irq(dma_ch[channel].irq, dma_ch[channel].data);
+
+ /* Clear the DMA Variable in the Channel */
+ atomic_set(&dma_ch[channel].chan_status, 0);
+
+ pr_debug("freedma() : END\n");
+}
+EXPORT_SYMBOL(free_dma);
+
+#ifdef CONFIG_PM
+# ifndef MAX_DMA_SUSPEND_CHANNELS
+# define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
+# endif
+int blackfin_dma_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+ if (dma_ch[i].regs->cfg & DMAEN) {
+ printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
+ return -EBUSY;
+ }
+
+ if (i < MAX_DMA_SUSPEND_CHANNELS)
+ dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
+ }
+
+#if ANOMALY_05000480
+ bfin_write_DMAC_TC_PER(0x0);
+#endif
+ return 0;
+}
+
+void blackfin_dma_resume(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+ dma_ch[i].regs->cfg = 0;
+
+ if (i < MAX_DMA_SUSPEND_CHANNELS)
+ dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
+ }
+#if ANOMALY_05000480
+ bfin_write_DMAC_TC_PER(0x0111);
+#endif
+}
+#endif
+
+/**
+ * blackfin_dma_early_init - minimal DMA init
+ *
+ * Setup a few DMA registers so we can safely do DMA transfers early on in
+ * the kernel booting process. Really this just means using dma_memcpy().
+ */
+void __init blackfin_dma_early_init(void)
+{
+ early_shadow_stamp();
+ bfin_write_MDMA_S0_CONFIG(0);
+ bfin_write_MDMA_S1_CONFIG(0);
+}
+
+void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
+{
+ unsigned long dst = (unsigned long)pdst;
+ unsigned long src = (unsigned long)psrc;
+ struct dma_register *dst_ch, *src_ch;
+
+ early_shadow_stamp();
+
+ /* We assume that everything is 4 byte aligned, so include
+ * a basic sanity check
+ */
+ BUG_ON(dst % 4);
+ BUG_ON(src % 4);
+ BUG_ON(size % 4);
+
+ src_ch = 0;
+ /* Find an avalible memDMA channel */
+ while (1) {
+ if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
+ dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
+ src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
+ } else {
+ dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
+ src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
+ }
+
+ if (!bfin_read16(&src_ch->cfg))
+ break;
+ else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
+ bfin_write16(&src_ch->cfg, 0);
+ break;
+ }
+ }
+
+ /* Force a sync in case a previous config reset on this channel
+ * occurred. This is needed so subsequent writes to DMA registers
+ * are not spuriously lost/corrupted.
+ */
+ __builtin_bfin_ssync();
+
+ /* Destination */
+ bfin_write32(&dst_ch->start_addr, dst);
+ bfin_write16(&dst_ch->x_count, size >> 2);
+ bfin_write16(&dst_ch->x_modify, 1 << 2);
+ bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
+
+ /* Source */
+ bfin_write32(&src_ch->start_addr, src);
+ bfin_write16(&src_ch->x_count, size >> 2);
+ bfin_write16(&src_ch->x_modify, 1 << 2);
+ bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
+
+ /* Enable */
+ bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
+ bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
+
+ /* Since we are atomic now, don't use the workaround ssync */
+ __builtin_bfin_ssync();
+}
+
+void __init early_dma_memcpy_done(void)
+{
+ early_shadow_stamp();
+
+ while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
+ (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
+ continue;
+
+ bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+ bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
+ /*
+ * Now that DMA is done, we would normally flush cache, but
+ * i/d cache isn't running this early, so we don't bother,
+ * and just clear out the DMA channel for next time
+ */
+ bfin_write_MDMA_S0_CONFIG(0);
+ bfin_write_MDMA_S1_CONFIG(0);
+ bfin_write_MDMA_D0_CONFIG(0);
+ bfin_write_MDMA_D1_CONFIG(0);
+
+ __builtin_bfin_ssync();
+}
+
+/**
+ * __dma_memcpy - program the MDMA registers
+ *
+ * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs
+ * while programming registers so that everything is fully configured. Wait
+ * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE
+ * check will make sure we don't clobber any existing transfer.
+ */
+static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
+{
+ static DEFINE_SPINLOCK(mdma_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdma_lock, flags);
+
+ /* Force a sync in case a previous config reset on this channel
+ * occurred. This is needed so subsequent writes to DMA registers
+ * are not spuriously lost/corrupted. Do it under irq lock and
+ * without the anomaly version (because we are atomic already).
+ */
+ __builtin_bfin_ssync();
+
+ if (bfin_read_MDMA_S0_CONFIG())
+ while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
+ continue;
+
+ if (conf & DMA2D) {
+ /* For larger bit sizes, we've already divided down cnt so it
+ * is no longer a multiple of 64k. So we have to break down
+ * the limit here so it is a multiple of the incoming size.
+ * There is no limitation here in terms of total size other
+ * than the hardware though as the bits lost in the shift are
+ * made up by MODIFY (== we can hit the whole address space).
+ * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
+ */
+ u32 shift = abs(dmod) >> 1;
+ size_t ycnt = cnt >> (16 - shift);
+ cnt = 1 << (16 - shift);
+ bfin_write_MDMA_D0_Y_COUNT(ycnt);
+ bfin_write_MDMA_S0_Y_COUNT(ycnt);
+ bfin_write_MDMA_D0_Y_MODIFY(dmod);
+ bfin_write_MDMA_S0_Y_MODIFY(smod);
+ }
+
+ bfin_write_MDMA_D0_START_ADDR(daddr);
+ bfin_write_MDMA_D0_X_COUNT(cnt);
+ bfin_write_MDMA_D0_X_MODIFY(dmod);
+ bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+
+ bfin_write_MDMA_S0_START_ADDR(saddr);
+ bfin_write_MDMA_S0_X_COUNT(cnt);
+ bfin_write_MDMA_S0_X_MODIFY(smod);
+ bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+
+ bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
+ bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
+
+ spin_unlock_irqrestore(&mdma_lock, flags);
+
+ SSYNC();
+
+ while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
+ if (bfin_read_MDMA_S0_CONFIG())
+ continue;
+ else
+ return;
+
+ bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+
+ bfin_write_MDMA_S0_CONFIG(0);
+ bfin_write_MDMA_D0_CONFIG(0);
+}
+
+/**
+ * _dma_memcpy - translate C memcpy settings into MDMA settings
+ *
+ * Handle all the high level steps before we touch the MDMA registers. So
+ * handle direction, tweaking of sizes, and formatting of addresses.
+ */
+static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
+{
+ u32 conf, shift;
+ s16 mod;
+ unsigned long dst = (unsigned long)pdst;
+ unsigned long src = (unsigned long)psrc;
+
+ if (size == 0)
+ return NULL;
+
+ if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
+ conf = WDSIZE_32;
+ shift = 2;
+ } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
+ conf = WDSIZE_16;
+ shift = 1;
+ } else {
+ conf = WDSIZE_8;
+ shift = 0;
+ }
+
+ /* If the two memory regions have a chance of overlapping, make
+ * sure the memcpy still works as expected. Do this by having the
+ * copy run backwards instead.
+ */
+ mod = 1 << shift;
+ if (src < dst) {
+ mod *= -1;
+ dst += size + mod;
+ src += size + mod;
+ }
+ size >>= shift;
+
+ if (size > 0x10000)
+ conf |= DMA2D;
+
+ __dma_memcpy(dst, mod, src, mod, size, conf);
+
+ return pdst;
+}
+
+/**
+ * dma_memcpy - DMA memcpy under mutex lock
+ *
+ * Do not check arguments before starting the DMA memcpy. Break the transfer
+ * up into two pieces. The first transfer is in multiples of 64k and the
+ * second transfer is the piece smaller than 64k.
+ */
+void *dma_memcpy(void *pdst, const void *psrc, size_t size)
+{
+ unsigned long dst = (unsigned long)pdst;
+ unsigned long src = (unsigned long)psrc;
+
+ if (bfin_addr_dcacheable(src))
+ blackfin_dcache_flush_range(src, src + size);
+
+ if (bfin_addr_dcacheable(dst))
+ blackfin_dcache_invalidate_range(dst, dst + size);
+
+ return dma_memcpy_nocache(pdst, psrc, size);
+}
+EXPORT_SYMBOL(dma_memcpy);
+
+/**
+ * dma_memcpy_nocache - DMA memcpy under mutex lock
+ * - No cache flush/invalidate
+ *
+ * Do not check arguments before starting the DMA memcpy. Break the transfer
+ * up into two pieces. The first transfer is in multiples of 64k and the
+ * second transfer is the piece smaller than 64k.
+ */
+void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size)
+{
+ size_t bulk, rest;
+
+ bulk = size & ~0xffff;
+ rest = size - bulk;
+ if (bulk)
+ _dma_memcpy(pdst, psrc, bulk);
+ _dma_memcpy(pdst + bulk, psrc + bulk, rest);
+ return pdst;
+}
+EXPORT_SYMBOL(dma_memcpy_nocache);
+
+/**
+ * safe_dma_memcpy - DMA memcpy w/argument checking
+ *
+ * Verify arguments are safe before heading to dma_memcpy().
+ */
+void *safe_dma_memcpy(void *dst, const void *src, size_t size)
+{
+ if (!access_ok(VERIFY_WRITE, dst, size))
+ return NULL;
+ if (!access_ok(VERIFY_READ, src, size))
+ return NULL;
+ return dma_memcpy(dst, src, size);
+}
+EXPORT_SYMBOL(safe_dma_memcpy);
+
+static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
+ u16 size, u16 dma_size)
+{
+ blackfin_dcache_flush_range(buf, buf + len * size);
+ __dma_memcpy(addr, 0, buf, size, len, dma_size);
+}
+
+static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
+ u16 size, u16 dma_size)
+{
+ blackfin_dcache_invalidate_range(buf, buf + len * size);
+ __dma_memcpy(buf, size, addr, 0, len, dma_size);
+}
+
+#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
+void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
+{ \
+ _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
+} \
+EXPORT_SYMBOL(dma_##io##s##bwl)
+MAKE_DMA_IO(out, b, 1, 8, const);
+MAKE_DMA_IO(in, b, 1, 8, );
+MAKE_DMA_IO(out, w, 2, 16, const);
+MAKE_DMA_IO(in, w, 2, 16, );
+MAKE_DMA_IO(out, l, 4, 32, const);
+MAKE_DMA_IO(in, l, 4, 32, );
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
new file mode 100644
index 00000000..02796b88
--- /dev/null
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -0,0 +1,1326 @@
+/*
+ * GPIO Abstraction Layer
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/blackfin.h>
+#include <asm/gpio.h>
+#include <asm/portmux.h>
+#include <linux/irq.h>
+#include <asm/irq_handler.h>
+
+#if ANOMALY_05000311 || ANOMALY_05000323
+enum {
+ AWA_data = SYSCR,
+ AWA_data_clear = SYSCR,
+ AWA_data_set = SYSCR,
+ AWA_toggle = SYSCR,
+ AWA_maska = BFIN_UART_SCR,
+ AWA_maska_clear = BFIN_UART_SCR,
+ AWA_maska_set = BFIN_UART_SCR,
+ AWA_maska_toggle = BFIN_UART_SCR,
+ AWA_maskb = BFIN_UART_GCTL,
+ AWA_maskb_clear = BFIN_UART_GCTL,
+ AWA_maskb_set = BFIN_UART_GCTL,
+ AWA_maskb_toggle = BFIN_UART_GCTL,
+ AWA_dir = SPORT1_STAT,
+ AWA_polar = SPORT1_STAT,
+ AWA_edge = SPORT1_STAT,
+ AWA_both = SPORT1_STAT,
+#if ANOMALY_05000311
+ AWA_inen = TIMER_ENABLE,
+#elif ANOMALY_05000323
+ AWA_inen = DMA1_1_CONFIG,
+#endif
+};
+ /* Anomaly Workaround */
+#define AWA_DUMMY_READ(name) bfin_read16(AWA_ ## name)
+#else
+#define AWA_DUMMY_READ(...) do { } while (0)
+#endif
+
+static struct gpio_port_t * const gpio_array[] = {
+#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
+ (struct gpio_port_t *) FIO_FLAG_D,
+#elif defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
+ (struct gpio_port_t *) PORTFIO,
+ (struct gpio_port_t *) PORTGIO,
+ (struct gpio_port_t *) PORTHIO,
+#elif defined(BF561_FAMILY)
+ (struct gpio_port_t *) FIO0_FLAG_D,
+ (struct gpio_port_t *) FIO1_FLAG_D,
+ (struct gpio_port_t *) FIO2_FLAG_D,
+#elif defined(CONFIG_BF54x)
+ (struct gpio_port_t *)PORTA_FER,
+ (struct gpio_port_t *)PORTB_FER,
+ (struct gpio_port_t *)PORTC_FER,
+ (struct gpio_port_t *)PORTD_FER,
+ (struct gpio_port_t *)PORTE_FER,
+ (struct gpio_port_t *)PORTF_FER,
+ (struct gpio_port_t *)PORTG_FER,
+ (struct gpio_port_t *)PORTH_FER,
+ (struct gpio_port_t *)PORTI_FER,
+ (struct gpio_port_t *)PORTJ_FER,
+#else
+# error no gpio arrays defined
+#endif
+};
+
+#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
+static unsigned short * const port_fer[] = {
+ (unsigned short *) PORTF_FER,
+ (unsigned short *) PORTG_FER,
+ (unsigned short *) PORTH_FER,
+};
+
+# if !defined(BF537_FAMILY)
+static unsigned short * const port_mux[] = {
+ (unsigned short *) PORTF_MUX,
+ (unsigned short *) PORTG_MUX,
+ (unsigned short *) PORTH_MUX,
+};
+
+static const
+u8 pmux_offset[][16] = {
+# if defined(CONFIG_BF52x)
+ { 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 6, 8, 8, 10, 10 }, /* PORTF */
+ { 0, 0, 0, 0, 0, 2, 2, 4, 4, 6, 8, 10, 10, 10, 12, 12 }, /* PORTG */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 4, 4, 4, 4, 4 }, /* PORTH */
+# elif defined(CONFIG_BF51x)
+ { 0, 2, 2, 2, 2, 2, 2, 4, 6, 6, 6, 8, 8, 8, 8, 10 }, /* PORTF */
+ { 0, 0, 0, 2, 4, 6, 6, 6, 8, 10, 10, 12, 14, 14, 14, 14 }, /* PORTG */
+ { 0, 0, 0, 0, 2, 2, 4, 6, 10, 10, 10, 10, 10, 10, 10, 10 }, /* PORTH */
+# endif
+};
+# endif
+
+#elif defined(BF538_FAMILY)
+static unsigned short * const port_fer[] = {
+ (unsigned short *) PORTCIO_FER,
+ (unsigned short *) PORTDIO_FER,
+ (unsigned short *) PORTEIO_FER,
+};
+#endif
+
+#define RESOURCE_LABEL_SIZE 16
+
+static struct str_ident {
+ char name[RESOURCE_LABEL_SIZE];
+} str_ident[MAX_RESOURCES];
+
+#if defined(CONFIG_PM)
+static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
+# ifdef BF538_FAMILY
+static unsigned short port_fer_saved[3];
+# endif
+#endif
+
+static void gpio_error(unsigned gpio)
+{
+ printk(KERN_ERR "bfin-gpio: GPIO %d wasn't requested!\n", gpio);
+}
+
+static void set_label(unsigned short ident, const char *label)
+{
+ if (label) {
+ strncpy(str_ident[ident].name, label,
+ RESOURCE_LABEL_SIZE);
+ str_ident[ident].name[RESOURCE_LABEL_SIZE - 1] = 0;
+ }
+}
+
+static char *get_label(unsigned short ident)
+{
+ return (*str_ident[ident].name ? str_ident[ident].name : "UNKNOWN");
+}
+
+static int cmp_label(unsigned short ident, const char *label)
+{
+ if (label == NULL) {
+ dump_stack();
+ printk(KERN_ERR "Please provide none-null label\n");
+ }
+
+ if (label)
+ return strcmp(str_ident[ident].name, label);
+ else
+ return -EINVAL;
+}
+
+#define map_entry(m, i) reserved_##m##_map[gpio_bank(i)]
+#define is_reserved(m, i, e) (map_entry(m, i) & gpio_bit(i))
+#define reserve(m, i) (map_entry(m, i) |= gpio_bit(i))
+#define unreserve(m, i) (map_entry(m, i) &= ~gpio_bit(i))
+#define DECLARE_RESERVED_MAP(m, c) static unsigned short reserved_##m##_map[c]
+
+DECLARE_RESERVED_MAP(gpio, GPIO_BANK_NUM);
+DECLARE_RESERVED_MAP(peri, DIV_ROUND_UP(MAX_RESOURCES, GPIO_BANKSIZE));
+DECLARE_RESERVED_MAP(gpio_irq, GPIO_BANK_NUM);
+
+inline int check_gpio(unsigned gpio)
+{
+#if defined(CONFIG_BF54x)
+ if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
+ || gpio == GPIO_PH14 || gpio == GPIO_PH15
+ || gpio == GPIO_PJ14 || gpio == GPIO_PJ15)
+ return -EINVAL;
+#endif
+ if (gpio >= MAX_BLACKFIN_GPIOS)
+ return -EINVAL;
+ return 0;
+}
+
+static void port_setup(unsigned gpio, unsigned short usage)
+{
+#if defined(BF538_FAMILY)
+ /*
+ * BF538/9 Port C,D and E are special.
+ * Inverted PORT_FER polarity on CDE and no PORF_FER on F
+ * Regular PORT F GPIOs are handled here, CDE are exclusively
+ * managed by GPIOLIB
+ */
+
+ if (gpio < MAX_BLACKFIN_GPIOS || gpio >= MAX_RESOURCES)
+ return;
+
+ gpio -= MAX_BLACKFIN_GPIOS;
+
+ if (usage == GPIO_USAGE)
+ *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
+ else
+ *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+ SSYNC();
+ return;
+#endif
+
+ if (check_gpio(gpio))
+ return;
+
+#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
+ if (usage == GPIO_USAGE)
+ *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+ else
+ *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
+ SSYNC();
+#elif defined(CONFIG_BF54x)
+ if (usage == GPIO_USAGE)
+ gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
+ else
+ gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio);
+ SSYNC();
+#endif
+}
+
+#ifdef BF537_FAMILY
+static const s8 port_mux[] = {
+ [GPIO_PF0] = 3,
+ [GPIO_PF1] = 3,
+ [GPIO_PF2] = 4,
+ [GPIO_PF3] = 4,
+ [GPIO_PF4] = 5,
+ [GPIO_PF5] = 6,
+ [GPIO_PF6] = 7,
+ [GPIO_PF7] = 8,
+ [GPIO_PF8 ... GPIO_PF15] = -1,
+ [GPIO_PG0 ... GPIO_PG7] = -1,
+ [GPIO_PG8] = 9,
+ [GPIO_PG9] = 9,
+ [GPIO_PG10] = 10,
+ [GPIO_PG11] = 10,
+ [GPIO_PG12] = 10,
+ [GPIO_PG13] = 11,
+ [GPIO_PG14] = 11,
+ [GPIO_PG15] = 11,
+ [GPIO_PH0 ... GPIO_PH15] = -1,
+ [PORT_PJ0 ... PORT_PJ3] = -1,
+ [PORT_PJ4] = 1,
+ [PORT_PJ5] = 1,
+ [PORT_PJ6 ... PORT_PJ9] = -1,
+ [PORT_PJ10] = 0,
+ [PORT_PJ11] = 0,
+};
+
+static int portmux_group_check(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u16 function = P_FUNCT2MUX(per);
+ s8 offset = port_mux[ident];
+ u16 m, pmux, pfunc;
+
+ if (offset < 0)
+ return 0;
+
+ pmux = bfin_read_PORT_MUX();
+ for (m = 0; m < ARRAY_SIZE(port_mux); ++m) {
+ if (m == ident)
+ continue;
+ if (port_mux[m] != offset)
+ continue;
+ if (!is_reserved(peri, m, 1))
+ continue;
+
+ if (offset == 1)
+ pfunc = (pmux >> offset) & 3;
+ else
+ pfunc = (pmux >> offset) & 1;
+ if (pfunc != function) {
+ pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
+ ident, function, m, pfunc);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void portmux_setup(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u16 function = P_FUNCT2MUX(per);
+ s8 offset = port_mux[ident];
+ u16 pmux;
+
+ if (offset == -1)
+ return;
+
+ pmux = bfin_read_PORT_MUX();
+ if (offset != 1)
+ pmux &= ~(1 << offset);
+ else
+ pmux &= ~(3 << 1);
+ pmux |= (function << offset);
+ bfin_write_PORT_MUX(pmux);
+}
+#elif defined(CONFIG_BF54x)
+inline void portmux_setup(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u16 function = P_FUNCT2MUX(per);
+ u32 pmux;
+
+ pmux = gpio_array[gpio_bank(ident)]->port_mux;
+
+ pmux &= ~(0x3 << (2 * gpio_sub_n(ident)));
+ pmux |= (function & 0x3) << (2 * gpio_sub_n(ident));
+
+ gpio_array[gpio_bank(ident)]->port_mux = pmux;
+}
+
+inline u16 get_portmux(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u32 pmux = gpio_array[gpio_bank(ident)]->port_mux;
+ return (pmux >> (2 * gpio_sub_n(ident)) & 0x3);
+}
+static int portmux_group_check(unsigned short per)
+{
+ return 0;
+}
+#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+static int portmux_group_check(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u16 function = P_FUNCT2MUX(per);
+ u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
+ u16 pin, gpiopin, pfunc;
+
+ for (pin = 0; pin < GPIO_BANKSIZE; ++pin) {
+ if (offset != pmux_offset[gpio_bank(ident)][pin])
+ continue;
+
+ gpiopin = gpio_bank(ident) * GPIO_BANKSIZE + pin;
+ if (gpiopin == ident)
+ continue;
+ if (!is_reserved(peri, gpiopin, 1))
+ continue;
+
+ pfunc = *port_mux[gpio_bank(ident)];
+ pfunc = (pfunc >> offset) & 3;
+ if (pfunc != function) {
+ pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
+ ident, function, gpiopin, pfunc);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+inline void portmux_setup(unsigned short per)
+{
+ u16 ident = P_IDENT(per);
+ u16 function = P_FUNCT2MUX(per);
+ u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
+ u16 pmux;
+
+ pmux = *port_mux[gpio_bank(ident)];
+ if (((pmux >> offset) & 3) == function)
+ return;
+ pmux &= ~(3 << offset);
+ pmux |= (function & 3) << offset;
+ *port_mux[gpio_bank(ident)] = pmux;
+ SSYNC();
+}
+#else
+# define portmux_setup(...) do { } while (0)
+static int portmux_group_check(unsigned short per)
+{
+ return 0;
+}
+#endif
+
+#ifndef CONFIG_BF54x
+/***********************************************************
+*
+* FUNCTIONS: Blackfin General Purpose Ports Access Functions
+*
+* INPUTS/OUTPUTS:
+* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS
+*
+*
+* DESCRIPTION: These functions abstract direct register access
+* to Blackfin processor General Purpose
+* Ports Regsiters
+*
+* CAUTION: These functions do not belong to the GPIO Driver API
+*************************************************************
+* MODIFICATION HISTORY :
+**************************************************************/
+
+/* Set a specific bit */
+
+#define SET_GPIO(name) \
+void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
+{ \
+ unsigned long flags; \
+ flags = hard_local_irq_save(); \
+ if (arg) \
+ gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
+ else \
+ gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
+ AWA_DUMMY_READ(name); \
+ hard_local_irq_restore(flags); \
+} \
+EXPORT_SYMBOL(set_gpio_ ## name);
+
+SET_GPIO(dir) /* set_gpio_dir() */
+SET_GPIO(inen) /* set_gpio_inen() */
+SET_GPIO(polar) /* set_gpio_polar() */
+SET_GPIO(edge) /* set_gpio_edge() */
+SET_GPIO(both) /* set_gpio_both() */
+
+
+#define SET_GPIO_SC(name) \
+void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
+{ \
+ unsigned long flags; \
+ if (ANOMALY_05000311 || ANOMALY_05000323) \
+ flags = hard_local_irq_save(); \
+ if (arg) \
+ gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
+ else \
+ gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
+ if (ANOMALY_05000311 || ANOMALY_05000323) { \
+ AWA_DUMMY_READ(name); \
+ hard_local_irq_restore(flags); \
+ } \
+} \
+EXPORT_SYMBOL(set_gpio_ ## name);
+
+SET_GPIO_SC(maska)
+SET_GPIO_SC(maskb)
+SET_GPIO_SC(data)
+
+void set_gpio_toggle(unsigned gpio)
+{
+ unsigned long flags;
+ if (ANOMALY_05000311 || ANOMALY_05000323)
+ flags = hard_local_irq_save();
+ gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
+ if (ANOMALY_05000311 || ANOMALY_05000323) {
+ AWA_DUMMY_READ(toggle);
+ hard_local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(set_gpio_toggle);
+
+
+/*Set current PORT date (16-bit word)*/
+
+#define SET_GPIO_P(name) \
+void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
+{ \
+ unsigned long flags; \
+ if (ANOMALY_05000311 || ANOMALY_05000323) \
+ flags = hard_local_irq_save(); \
+ gpio_array[gpio_bank(gpio)]->name = arg; \
+ if (ANOMALY_05000311 || ANOMALY_05000323) { \
+ AWA_DUMMY_READ(name); \
+ hard_local_irq_restore(flags); \
+ } \
+} \
+EXPORT_SYMBOL(set_gpiop_ ## name);
+
+SET_GPIO_P(data)
+SET_GPIO_P(dir)
+SET_GPIO_P(inen)
+SET_GPIO_P(polar)
+SET_GPIO_P(edge)
+SET_GPIO_P(both)
+SET_GPIO_P(maska)
+SET_GPIO_P(maskb)
+
+/* Get a specific bit */
+#define GET_GPIO(name) \
+unsigned short get_gpio_ ## name(unsigned gpio) \
+{ \
+ unsigned long flags; \
+ unsigned short ret; \
+ if (ANOMALY_05000311 || ANOMALY_05000323) \
+ flags = hard_local_irq_save(); \
+ ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
+ if (ANOMALY_05000311 || ANOMALY_05000323) { \
+ AWA_DUMMY_READ(name); \
+ hard_local_irq_restore(flags); \
+ } \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_gpio_ ## name);
+
+GET_GPIO(data)
+GET_GPIO(dir)
+GET_GPIO(inen)
+GET_GPIO(polar)
+GET_GPIO(edge)
+GET_GPIO(both)
+GET_GPIO(maska)
+GET_GPIO(maskb)
+
+/*Get current PORT date (16-bit word)*/
+
+#define GET_GPIO_P(name) \
+unsigned short get_gpiop_ ## name(unsigned gpio) \
+{ \
+ unsigned long flags; \
+ unsigned short ret; \
+ if (ANOMALY_05000311 || ANOMALY_05000323) \
+ flags = hard_local_irq_save(); \
+ ret = (gpio_array[gpio_bank(gpio)]->name); \
+ if (ANOMALY_05000311 || ANOMALY_05000323) { \
+ AWA_DUMMY_READ(name); \
+ hard_local_irq_restore(flags); \
+ } \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_gpiop_ ## name);
+
+GET_GPIO_P(data)
+GET_GPIO_P(dir)
+GET_GPIO_P(inen)
+GET_GPIO_P(polar)
+GET_GPIO_P(edge)
+GET_GPIO_P(both)
+GET_GPIO_P(maska)
+GET_GPIO_P(maskb)
+
+
+#ifdef CONFIG_PM
+DECLARE_RESERVED_MAP(wakeup, GPIO_BANK_NUM);
+
+static const unsigned int sic_iwr_irqs[] = {
+#if defined(BF533_FAMILY)
+ IRQ_PROG_INTB
+#elif defined(BF537_FAMILY)
+ IRQ_PF_INTB_WATCH, IRQ_PORTG_INTB, IRQ_PH_INTB_MAC_TX
+#elif defined(BF538_FAMILY)
+ IRQ_PORTF_INTB
+#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+ IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB
+#elif defined(BF561_FAMILY)
+ IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB
+#else
+# error no SIC_IWR defined
+#endif
+};
+
+/***********************************************************
+*
+* FUNCTIONS: Blackfin PM Setup API
+*
+* INPUTS/OUTPUTS:
+* gpio - GPIO Number between 0 and MAX_BLACKFIN_GPIOS
+* type -
+* PM_WAKE_RISING
+* PM_WAKE_FALLING
+* PM_WAKE_HIGH
+* PM_WAKE_LOW
+* PM_WAKE_BOTH_EDGES
+*
+* DESCRIPTION: Blackfin PM Driver API
+*
+* CAUTION:
+*************************************************************
+* MODIFICATION HISTORY :
+**************************************************************/
+int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
+{
+ unsigned long flags;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+
+ flags = hard_local_irq_save();
+ if (ctrl)
+ reserve(wakeup, gpio);
+ else
+ unreserve(wakeup, gpio);
+
+ set_gpio_maskb(gpio, ctrl);
+ hard_local_irq_restore(flags);
+
+ return 0;
+}
+
+int bfin_pm_standby_ctrl(unsigned ctrl)
+{
+ u16 bank, mask, i;
+
+ for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
+ mask = map_entry(wakeup, i);
+ bank = gpio_bank(i);
+
+ if (mask)
+ bfin_internal_set_wake(sic_iwr_irqs[bank], ctrl);
+ }
+ return 0;
+}
+
+void bfin_gpio_pm_hibernate_suspend(void)
+{
+ int i, bank;
+
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ port_fer_saved[i] = *port_fer[i];
+#endif
+
+ for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
+ bank = gpio_bank(i);
+
+#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
+ gpio_bank_saved[bank].fer = *port_fer[bank];
+#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+ gpio_bank_saved[bank].mux = *port_mux[bank];
+#else
+ if (bank == 0)
+ gpio_bank_saved[bank].mux = bfin_read_PORT_MUX();
+#endif
+#endif
+ gpio_bank_saved[bank].data = gpio_array[bank]->data;
+ gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
+ gpio_bank_saved[bank].polar = gpio_array[bank]->polar;
+ gpio_bank_saved[bank].dir = gpio_array[bank]->dir;
+ gpio_bank_saved[bank].edge = gpio_array[bank]->edge;
+ gpio_bank_saved[bank].both = gpio_array[bank]->both;
+ gpio_bank_saved[bank].maska = gpio_array[bank]->maska;
+ }
+
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_suspend();
+#endif
+
+ AWA_DUMMY_READ(maska);
+}
+
+void bfin_gpio_pm_hibernate_restore(void)
+{
+ int i, bank;
+
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ *port_fer[i] = port_fer_saved[i];
+#endif
+
+ for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
+ bank = gpio_bank(i);
+
+#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
+#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+ *port_mux[bank] = gpio_bank_saved[bank].mux;
+#else
+ if (bank == 0)
+ bfin_write_PORT_MUX(gpio_bank_saved[bank].mux);
+#endif
+ *port_fer[bank] = gpio_bank_saved[bank].fer;
+#endif
+ gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
+ gpio_array[bank]->data_set = gpio_bank_saved[bank].data
+ & gpio_bank_saved[bank].dir;
+ gpio_array[bank]->dir = gpio_bank_saved[bank].dir;
+ gpio_array[bank]->polar = gpio_bank_saved[bank].polar;
+ gpio_array[bank]->edge = gpio_bank_saved[bank].edge;
+ gpio_array[bank]->both = gpio_bank_saved[bank].both;
+ gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
+ }
+
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_restore();
+#endif
+
+ AWA_DUMMY_READ(maska);
+}
+
+
+#endif
+#else /* CONFIG_BF54x */
+#ifdef CONFIG_PM
+
+int bfin_pm_standby_ctrl(unsigned ctrl)
+{
+ return 0;
+}
+
+void bfin_gpio_pm_hibernate_suspend(void)
+{
+ int i, bank;
+
+ for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
+ bank = gpio_bank(i);
+
+ gpio_bank_saved[bank].fer = gpio_array[bank]->port_fer;
+ gpio_bank_saved[bank].mux = gpio_array[bank]->port_mux;
+ gpio_bank_saved[bank].data = gpio_array[bank]->data;
+ gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
+ gpio_bank_saved[bank].dir = gpio_array[bank]->dir_set;
+ }
+}
+
+void bfin_gpio_pm_hibernate_restore(void)
+{
+ int i, bank;
+
+ for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
+ bank = gpio_bank(i);
+
+ gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
+ gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
+ gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
+ gpio_array[bank]->data_set = gpio_bank_saved[bank].data
+ & gpio_bank_saved[bank].dir;
+ gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
+ }
+}
+#endif
+
+unsigned short get_gpio_dir(unsigned gpio)
+{
+ return (0x01 & (gpio_array[gpio_bank(gpio)]->dir_clear >> gpio_sub_n(gpio)));
+}
+EXPORT_SYMBOL(get_gpio_dir);
+
+#endif /* CONFIG_BF54x */
+
+/***********************************************************
+*
+* FUNCTIONS: Blackfin Peripheral Resource Allocation
+* and PortMux Setup
+*
+* INPUTS/OUTPUTS:
+* per Peripheral Identifier
+* label String
+*
+* DESCRIPTION: Blackfin Peripheral Resource Allocation and Setup API
+*
+* CAUTION:
+*************************************************************
+* MODIFICATION HISTORY :
+**************************************************************/
+
+int peripheral_request(unsigned short per, const char *label)
+{
+ unsigned long flags;
+ unsigned short ident = P_IDENT(per);
+
+ /*
+ * Don't cares are pins with only one dedicated function
+ */
+
+ if (per & P_DONTCARE)
+ return 0;
+
+ if (!(per & P_DEFINED))
+ return -ENODEV;
+
+ BUG_ON(ident >= MAX_RESOURCES);
+
+ flags = hard_local_irq_save();
+
+ /* If a pin can be muxed as either GPIO or peripheral, make
+ * sure it is not already a GPIO pin when we request it.
+ */
+ if (unlikely(!check_gpio(ident) && is_reserved(gpio, ident, 1))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ printk(KERN_ERR
+ "%s: Peripheral %d is already reserved as GPIO by %s !\n",
+ __func__, ident, get_label(ident));
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+
+ if (unlikely(is_reserved(peri, ident, 1))) {
+
+ /*
+ * Pin functions like AMC address strobes my
+ * be requested and used by several drivers
+ */
+
+#ifdef CONFIG_BF54x
+ if (!((per & P_MAYSHARE) && get_portmux(per) == P_FUNCT2MUX(per))) {
+#else
+ if (!(per & P_MAYSHARE)) {
+#endif
+ /*
+ * Allow that the identical pin function can
+ * be requested from the same driver twice
+ */
+
+ if (cmp_label(ident, label) == 0)
+ goto anyway;
+
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ printk(KERN_ERR
+ "%s: Peripheral %d function %d is already reserved by %s !\n",
+ __func__, ident, P_FUNCT2MUX(per), get_label(ident));
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+ }
+
+ if (unlikely(portmux_group_check(per))) {
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+ anyway:
+ reserve(peri, ident);
+
+ portmux_setup(per);
+ port_setup(ident, PERIPHERAL_USAGE);
+
+ hard_local_irq_restore(flags);
+ set_label(ident, label);
+
+ return 0;
+}
+EXPORT_SYMBOL(peripheral_request);
+
+int peripheral_request_list(const unsigned short per[], const char *label)
+{
+ u16 cnt;
+ int ret;
+
+ for (cnt = 0; per[cnt] != 0; cnt++) {
+
+ ret = peripheral_request(per[cnt], label);
+
+ if (ret < 0) {
+ for ( ; cnt > 0; cnt--)
+ peripheral_free(per[cnt - 1]);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(peripheral_request_list);
+
+void peripheral_free(unsigned short per)
+{
+ unsigned long flags;
+ unsigned short ident = P_IDENT(per);
+
+ if (per & P_DONTCARE)
+ return;
+
+ if (!(per & P_DEFINED))
+ return;
+
+ flags = hard_local_irq_save();
+
+ if (unlikely(!is_reserved(peri, ident, 0))) {
+ hard_local_irq_restore(flags);
+ return;
+ }
+
+ if (!(per & P_MAYSHARE))
+ port_setup(ident, GPIO_USAGE);
+
+ unreserve(peri, ident);
+
+ set_label(ident, "free");
+
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(peripheral_free);
+
+void peripheral_free_list(const unsigned short per[])
+{
+ u16 cnt;
+ for (cnt = 0; per[cnt] != 0; cnt++)
+ peripheral_free(per[cnt]);
+}
+EXPORT_SYMBOL(peripheral_free_list);
+
+/***********************************************************
+*
+* FUNCTIONS: Blackfin GPIO Driver
+*
+* INPUTS/OUTPUTS:
+* gpio PIO Number between 0 and MAX_BLACKFIN_GPIOS
+* label String
+*
+* DESCRIPTION: Blackfin GPIO Driver API
+*
+* CAUTION:
+*************************************************************
+* MODIFICATION HISTORY :
+**************************************************************/
+
+int bfin_gpio_request(unsigned gpio, const char *label)
+{
+ unsigned long flags;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+
+ flags = hard_local_irq_save();
+
+ /*
+ * Allow that the identical GPIO can
+ * be requested from the same driver twice
+ * Do nothing and return -
+ */
+
+ if (cmp_label(gpio, label) == 0) {
+ hard_local_irq_restore(flags);
+ return 0;
+ }
+
+ if (unlikely(is_reserved(gpio, gpio, 1))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
+ gpio, get_label(gpio));
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+ if (unlikely(is_reserved(peri, gpio, 1))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ printk(KERN_ERR
+ "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
+ gpio, get_label(gpio));
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+ if (unlikely(is_reserved(gpio_irq, gpio, 1))) {
+ printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
+ " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
+ }
+#ifndef CONFIG_BF54x
+ else { /* Reset POLAR setting when acquiring a gpio for the first time */
+ set_gpio_polar(gpio, 0);
+ }
+#endif
+
+ reserve(gpio, gpio);
+ set_label(gpio, label);
+
+ hard_local_irq_restore(flags);
+
+ port_setup(gpio, GPIO_USAGE);
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_gpio_request);
+
+void bfin_gpio_free(unsigned gpio)
+{
+ unsigned long flags;
+
+ if (check_gpio(gpio) < 0)
+ return;
+
+ might_sleep();
+
+ flags = hard_local_irq_save();
+
+ if (unlikely(!is_reserved(gpio, gpio, 0))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ gpio_error(gpio);
+ hard_local_irq_restore(flags);
+ return;
+ }
+
+ unreserve(gpio, gpio);
+
+ set_label(gpio, "free");
+
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(bfin_gpio_free);
+
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+DECLARE_RESERVED_MAP(special_gpio, gpio_bank(MAX_RESOURCES));
+
+int bfin_special_gpio_request(unsigned gpio, const char *label)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+
+ /*
+ * Allow that the identical GPIO can
+ * be requested from the same driver twice
+ * Do nothing and return -
+ */
+
+ if (cmp_label(gpio, label) == 0) {
+ hard_local_irq_restore(flags);
+ return 0;
+ }
+
+ if (unlikely(is_reserved(special_gpio, gpio, 1))) {
+ hard_local_irq_restore(flags);
+ printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
+ gpio, get_label(gpio));
+
+ return -EBUSY;
+ }
+ if (unlikely(is_reserved(peri, gpio, 1))) {
+ hard_local_irq_restore(flags);
+ printk(KERN_ERR
+ "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
+ gpio, get_label(gpio));
+
+ return -EBUSY;
+ }
+
+ reserve(special_gpio, gpio);
+ reserve(peri, gpio);
+
+ set_label(gpio, label);
+ hard_local_irq_restore(flags);
+ port_setup(gpio, GPIO_USAGE);
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_special_gpio_request);
+
+void bfin_special_gpio_free(unsigned gpio)
+{
+ unsigned long flags;
+
+ might_sleep();
+
+ flags = hard_local_irq_save();
+
+ if (unlikely(!is_reserved(special_gpio, gpio, 0))) {
+ gpio_error(gpio);
+ hard_local_irq_restore(flags);
+ return;
+ }
+
+ unreserve(special_gpio, gpio);
+ unreserve(peri, gpio);
+ set_label(gpio, "free");
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(bfin_special_gpio_free);
+#endif
+
+
+int bfin_gpio_irq_request(unsigned gpio, const char *label)
+{
+ unsigned long flags;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+
+ flags = hard_local_irq_save();
+
+ if (unlikely(is_reserved(peri, gpio, 1))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ printk(KERN_ERR
+ "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
+ gpio, get_label(gpio));
+ hard_local_irq_restore(flags);
+ return -EBUSY;
+ }
+ if (unlikely(is_reserved(gpio, gpio, 1)))
+ printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved by %s! "
+ "(Documentation/blackfin/bfin-gpio-notes.txt)\n",
+ gpio, get_label(gpio));
+
+ reserve(gpio_irq, gpio);
+ set_label(gpio, label);
+
+ hard_local_irq_restore(flags);
+
+ port_setup(gpio, GPIO_USAGE);
+
+ return 0;
+}
+
+void bfin_gpio_irq_free(unsigned gpio)
+{
+ unsigned long flags;
+
+ if (check_gpio(gpio) < 0)
+ return;
+
+ flags = hard_local_irq_save();
+
+ if (unlikely(!is_reserved(gpio_irq, gpio, 0))) {
+ if (system_state == SYSTEM_BOOTING)
+ dump_stack();
+ gpio_error(gpio);
+ hard_local_irq_restore(flags);
+ return;
+ }
+
+ unreserve(gpio_irq, gpio);
+
+ set_label(gpio, "free");
+
+ hard_local_irq_restore(flags);
+}
+
+static inline void __bfin_gpio_direction_input(unsigned gpio)
+{
+#ifdef CONFIG_BF54x
+ gpio_array[gpio_bank(gpio)]->dir_clear = gpio_bit(gpio);
+#else
+ gpio_array[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
+#endif
+ gpio_array[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
+}
+
+int bfin_gpio_direction_input(unsigned gpio)
+{
+ unsigned long flags;
+
+ if (unlikely(!is_reserved(gpio, gpio, 0))) {
+ gpio_error(gpio);
+ return -EINVAL;
+ }
+
+ flags = hard_local_irq_save();
+ __bfin_gpio_direction_input(gpio);
+ AWA_DUMMY_READ(inen);
+ hard_local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_gpio_direction_input);
+
+void bfin_gpio_irq_prepare(unsigned gpio)
+{
+#ifdef CONFIG_BF54x
+ unsigned long flags;
+#endif
+
+ port_setup(gpio, GPIO_USAGE);
+
+#ifdef CONFIG_BF54x
+ flags = hard_local_irq_save();
+ __bfin_gpio_direction_input(gpio);
+ hard_local_irq_restore(flags);
+#endif
+}
+
+void bfin_gpio_set_value(unsigned gpio, int arg)
+{
+ if (arg)
+ gpio_array[gpio_bank(gpio)]->data_set = gpio_bit(gpio);
+ else
+ gpio_array[gpio_bank(gpio)]->data_clear = gpio_bit(gpio);
+}
+EXPORT_SYMBOL(bfin_gpio_set_value);
+
+int bfin_gpio_direction_output(unsigned gpio, int value)
+{
+ unsigned long flags;
+
+ if (unlikely(!is_reserved(gpio, gpio, 0))) {
+ gpio_error(gpio);
+ return -EINVAL;
+ }
+
+ flags = hard_local_irq_save();
+
+ gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
+ gpio_set_value(gpio, value);
+#ifdef CONFIG_BF54x
+ gpio_array[gpio_bank(gpio)]->dir_set = gpio_bit(gpio);
+#else
+ gpio_array[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
+#endif
+
+ AWA_DUMMY_READ(dir);
+ hard_local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_gpio_direction_output);
+
+int bfin_gpio_get_value(unsigned gpio)
+{
+#ifdef CONFIG_BF54x
+ return (1 & (gpio_array[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)));
+#else
+ unsigned long flags;
+
+ if (unlikely(get_gpio_edge(gpio))) {
+ int ret;
+ flags = hard_local_irq_save();
+ set_gpio_edge(gpio, 0);
+ ret = get_gpio_data(gpio);
+ set_gpio_edge(gpio, 1);
+ hard_local_irq_restore(flags);
+ return ret;
+ } else
+ return get_gpio_data(gpio);
+#endif
+}
+EXPORT_SYMBOL(bfin_gpio_get_value);
+
+/* If we are booting from SPI and our board lacks a strong enough pull up,
+ * the core can reset and execute the bootrom faster than the resistor can
+ * pull the signal logically high. To work around this (common) error in
+ * board design, we explicitly set the pin back to GPIO mode, force /CS
+ * high, and wait for the electrons to do their thing.
+ *
+ * This function only makes sense to be called from reset code, but it
+ * lives here as we need to force all the GPIO states w/out going through
+ * BUG() checks and such.
+ */
+void bfin_reset_boot_spi_cs(unsigned short pin)
+{
+ unsigned short gpio = P_IDENT(pin);
+ port_setup(gpio, GPIO_USAGE);
+ gpio_array[gpio_bank(gpio)]->data_set = gpio_bit(gpio);
+ AWA_DUMMY_READ(data_set);
+ udelay(1);
+}
+
+#if defined(CONFIG_PROC_FS)
+static int gpio_proc_show(struct seq_file *m, void *v)
+{
+ int c, irq, gpio;
+
+ for (c = 0; c < MAX_RESOURCES; c++) {
+ irq = is_reserved(gpio_irq, c, 1);
+ gpio = is_reserved(gpio, c, 1);
+ if (!check_gpio(c) && (gpio || irq))
+ seq_printf(m, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
+ get_label(c), (gpio && irq) ? " *" : "",
+ get_gpio_dir(c) ? "OUTPUT" : "INPUT");
+ else if (is_reserved(peri, c, 1))
+ seq_printf(m, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
+ else
+ continue;
+ }
+
+ return 0;
+}
+
+static int gpio_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gpio_proc_show, NULL);
+}
+
+static const struct file_operations gpio_proc_ops = {
+ .open = gpio_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int gpio_register_proc(void)
+{
+ struct proc_dir_entry *proc_gpio;
+
+ proc_gpio = proc_create("gpio", S_IRUGO, NULL, &gpio_proc_ops);
+ return proc_gpio != NULL;
+}
+__initcall(gpio_register_proc);
+#endif
+
+#ifdef CONFIG_GPIOLIB
+static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_gpio_direction_input(gpio);
+}
+
+static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
+{
+ return bfin_gpio_direction_output(gpio, level);
+}
+
+static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_gpio_get_value(gpio);
+}
+
+static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ return bfin_gpio_set_value(gpio, value);
+}
+
+static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_gpio_request(gpio, chip->label);
+}
+
+static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_gpio_free(gpio);
+}
+
+static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ return gpio + GPIO_IRQ_BASE;
+}
+
+static struct gpio_chip bfin_chip = {
+ .label = "BFIN-GPIO",
+ .direction_input = bfin_gpiolib_direction_input,
+ .get = bfin_gpiolib_get_value,
+ .direction_output = bfin_gpiolib_direction_output,
+ .set = bfin_gpiolib_set_value,
+ .request = bfin_gpiolib_gpio_request,
+ .free = bfin_gpiolib_gpio_free,
+ .to_irq = bfin_gpiolib_gpio_to_irq,
+ .base = 0,
+ .ngpio = MAX_BLACKFIN_GPIOS,
+};
+
+static int __init bfin_gpiolib_setup(void)
+{
+ return gpiochip_add(&bfin_chip);
+}
+arch_initcall(bfin_gpiolib_setup);
+#endif
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
new file mode 100644
index 00000000..c446591b
--- /dev/null
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -0,0 +1,124 @@
+/*
+ * arch/blackfin/kernel/bfin_ksyms.c - exports for random symbols
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/irq_handler.h>
+
+/* Allow people to have their own Blackfin exception handler in a module */
+EXPORT_SYMBOL(bfin_return_from_exception);
+
+/* All the Blackfin cache functions: mach-common/cache.S */
+EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
+EXPORT_SYMBOL(blackfin_icache_flush_range);
+EXPORT_SYMBOL(blackfin_dcache_flush_range);
+EXPORT_SYMBOL(blackfin_dflush_page);
+
+/* The following are special because they're not called
+ * explicitly (the C compiler generates them). Fortunately,
+ * their interface isn't gonna change any time soon now, so
+ * it's OK to leave it out of version control.
+ */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+
+/*
+ * Because string functions are both inline and exported functions and
+ * folder arch/blackfin/lib is configured as a library path in Makefile,
+ * symbols exported in folder lib is not linked into built-in.o but
+ * inlined only. In order to export string symbols to kernel module
+ * properly, they should be exported here.
+ */
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __smulsi3_highpart(void);
+extern void __umulsi3_highpart(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__umulsi3_highpart);
+EXPORT_SYMBOL(__smulsi3_highpart);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+
+/* Input/output symbols: lib/{in,out}s.S */
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsw_8);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insw_8);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insl);
+EXPORT_SYMBOL(insl_16);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__raw_atomic_update_asm);
+EXPORT_SYMBOL(__raw_atomic_clear_asm);
+EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_xor_asm);
+EXPORT_SYMBOL(__raw_atomic_test_asm);
+EXPORT_SYMBOL(__raw_xchg_1_asm);
+EXPORT_SYMBOL(__raw_xchg_2_asm);
+EXPORT_SYMBOL(__raw_xchg_4_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_1_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_2_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_4_asm);
+EXPORT_SYMBOL(__raw_spin_is_locked_asm);
+EXPORT_SYMBOL(__raw_spin_lock_asm);
+EXPORT_SYMBOL(__raw_spin_trylock_asm);
+EXPORT_SYMBOL(__raw_spin_unlock_asm);
+EXPORT_SYMBOL(__raw_read_lock_asm);
+EXPORT_SYMBOL(__raw_read_trylock_asm);
+EXPORT_SYMBOL(__raw_read_unlock_asm);
+EXPORT_SYMBOL(__raw_write_lock_asm);
+EXPORT_SYMBOL(__raw_write_trylock_asm);
+EXPORT_SYMBOL(__raw_write_unlock_asm);
+EXPORT_SYMBOL(__raw_bit_set_asm);
+EXPORT_SYMBOL(__raw_bit_clear_asm);
+EXPORT_SYMBOL(__raw_bit_toggle_asm);
+EXPORT_SYMBOL(__raw_bit_test_asm);
+EXPORT_SYMBOL(__raw_bit_test_set_asm);
+EXPORT_SYMBOL(__raw_bit_test_clear_asm);
+EXPORT_SYMBOL(__raw_bit_test_toggle_asm);
+EXPORT_SYMBOL(__raw_uncached_fetch_asm);
+#ifdef __ARCH_SYNC_CORE_DCACHE
+EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
+EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
+#endif
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
new file mode 100644
index 00000000..394d0b1b
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -0,0 +1,10 @@
+#
+# arch/blackfin/kernel/cplb-nompu/Makefile
+#
+
+obj-y := cplbinit.o cplbmgr.o
+
+CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
+ -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
+ -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
+ -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
new file mode 100644
index 00000000..c15fd05f
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -0,0 +1,102 @@
+/*
+ * Blackfin CPLB initialization
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/cplb.h>
+#include <asm/cplbinit.h>
+#include <asm/mem_map.h>
+
+struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
+struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
+
+int first_switched_icplb, first_switched_dcplb;
+int first_mask_dcplb;
+
+void __init generate_cplb_tables_cpu(unsigned int cpu)
+{
+ int i_d, i_i;
+ unsigned long addr;
+ unsigned long d_data, i_data;
+ unsigned long d_cache = 0, i_cache = 0;
+
+ printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
+
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#endif
+
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_cache = CPLB_L1_CHBL;
+#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_cache |= CPLB_L1_AOW | CPLB_WT;
+#endif
+#endif
+
+ i_d = i_i = 0;
+
+ /* Set up the zero page. */
+ dcplb_tbl[cpu][i_d].addr = 0;
+ dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+
+ icplb_tbl[cpu][i_i].addr = 0;
+ icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB;
+
+ /* Cover kernel memory with 4M pages. */
+ addr = 0;
+ d_data = d_cache | CPLB_SUPV_WR | CPLB_VALID | PAGE_SIZE_4MB | CPLB_DIRTY;
+ i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB;
+
+ for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
+ }
+
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
+#endif
+
+ /* Cover L1 memory. One 4M area for code and data each is enough. */
+#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
+ dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
+ dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+#endif
+#if L1_CODE_LENGTH > 0
+ icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu);
+ icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+#endif
+
+ /* Cover L2 memory */
+#if L2_LENGTH > 0
+ dcplb_tbl[cpu][i_d].addr = L2_START;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
+ icplb_tbl[cpu][i_i].addr = L2_START;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
+#endif
+
+ first_mask_dcplb = i_d;
+ first_switched_dcplb = i_d + (1 << page_mask_order);
+ first_switched_icplb = i_i;
+
+ while (i_d < MAX_CPLBS)
+ dcplb_tbl[cpu][i_d++].data = 0;
+ while (i_i < MAX_CPLBS)
+ icplb_tbl[cpu][i_i++].data = 0;
+}
+
+void __init generate_cplb_tables_all(void)
+{
+}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
new file mode 100644
index 00000000..b56bd851
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -0,0 +1,379 @@
+/*
+ * Blackfin CPLB exception handling for when MPU in on
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/cplb.h>
+#include <asm/cplbinit.h>
+#include <asm/mmu_context.h>
+
+/*
+ * WARNING
+ *
+ * This file is compiled with certain -ffixed-reg options. We have to
+ * make sure not to call any functions here that could clobber these
+ * registers.
+ */
+
+int page_mask_nelts;
+int page_mask_order;
+unsigned long *current_rwx_mask[NR_CPUS];
+
+int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
+int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
+int nr_cplb_flush[NR_CPUS];
+
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+#define MGR_ATTR __attribute__((l1_text))
+#else
+#define MGR_ATTR
+#endif
+
+/*
+ * Given the contents of the status register, return the index of the
+ * CPLB that caused the fault.
+ */
+static inline int faulting_cplb_index(int status)
+{
+ int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
+ return 30 - signbits;
+}
+
+/*
+ * Given the contents of the status register and the DCPLB_DATA contents,
+ * return true if a write access should be permitted.
+ */
+static inline int write_permitted(int status, unsigned long data)
+{
+ if (status & FAULT_USERSUPV)
+ return !!(data & CPLB_SUPV_WR);
+ else
+ return !!(data & CPLB_USER_WR);
+}
+
+/* Counters to implement round-robin replacement. */
+static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
+
+/*
+ * Find an ICPLB entry to be evicted and return its index.
+ */
+MGR_ATTR static int evict_one_icplb(unsigned int cpu)
+{
+ int i;
+ for (i = first_switched_icplb; i < MAX_CPLBS; i++)
+ if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
+ return i;
+ i = first_switched_icplb + icplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_icplb;
+ icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
+ }
+ icplb_rr_index[cpu]++;
+ return i;
+}
+
+MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
+{
+ int i;
+ for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
+ if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
+ return i;
+ i = first_switched_dcplb + dcplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_dcplb;
+ dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
+ }
+ dcplb_rr_index[cpu]++;
+ return i;
+}
+
+MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
+{
+ unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
+ int status = bfin_read_DCPLB_STATUS();
+ unsigned long *mask;
+ int idx;
+ unsigned long d_data;
+
+ nr_dcplb_miss[cpu]++;
+
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ if (bfin_addr_dcacheable(addr)) {
+ d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
+ }
+#endif
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
+ mask = current_rwx_mask[cpu];
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_RD;
+ }
+#endif
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ d_data &= ~PAGE_SIZE_4KB;
+ d_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ if (reserved_mem_dcache_on)
+ d_data |= CPLB_L1_CHBL;
+ } else {
+ mask = current_rwx_mask[cpu];
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_RD;
+
+ mask += page_mask_nelts;
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_WR;
+ }
+ }
+ idx = evict_one_dcplb(cpu);
+
+ addr &= PAGE_MASK;
+ dcplb_tbl[cpu][idx].addr = addr;
+ dcplb_tbl[cpu][idx].data = d_data;
+
+ _disable_dcplb();
+ bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
+ bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
+ _enable_dcplb();
+
+ return 0;
+}
+
+MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
+{
+ unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
+ int status = bfin_read_ICPLB_STATUS();
+ int idx;
+ unsigned long i_data;
+
+ nr_icplb_miss[cpu]++;
+
+ /* If inside the uncached DMA region, fault. */
+ if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
+ return CPLB_PROT_VIOL;
+
+ if (status & FAULT_USERSUPV)
+ nr_icplb_supv_miss[cpu]++;
+
+ /*
+ * First, try to find a CPLB that matches this address. If we
+ * find one, then the fact that we're in the miss handler means
+ * that the instruction crosses a page boundary.
+ */
+ for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
+ if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
+ unsigned long this_addr = icplb_tbl[cpu][idx].addr;
+ if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
+ addr += PAGE_SIZE;
+ break;
+ }
+ }
+ }
+
+ i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
+
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ /*
+ * Normal RAM, and possibly the reserved memory area, are
+ * cacheable.
+ */
+ if (addr < _ramend ||
+ (addr < physical_mem_end && reserved_mem_icache_on))
+ i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#endif
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask[cpu];
+
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[idx] & bit)
+ i_data |= CPLB_USER_RD;
+ }
+ }
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & FAULT_USERSUPV)) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ i_data &= ~PAGE_SIZE_4KB;
+ i_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ i_data |= CPLB_USER_RD;
+ if (reserved_mem_icache_on)
+ i_data |= CPLB_L1_CHBL;
+ } else {
+ /*
+ * Two cases to distinguish - a supervisor access must
+ * necessarily be for a module page; we grant it
+ * unconditionally (could do better here in the future).
+ * Otherwise, check the x bitmap of the current process.
+ */
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask[cpu];
+
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[idx] & bit)
+ i_data |= CPLB_USER_RD;
+ }
+ }
+ }
+ idx = evict_one_icplb(cpu);
+ addr &= PAGE_MASK;
+ icplb_tbl[cpu][idx].addr = addr;
+ icplb_tbl[cpu][idx].data = i_data;
+
+ _disable_icplb();
+ bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
+ bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
+ _enable_icplb();
+
+ return 0;
+}
+
+MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
+{
+ int status = bfin_read_DCPLB_STATUS();
+
+ nr_dcplb_prot[cpu]++;
+
+ if (status & FAULT_RW) {
+ int idx = faulting_cplb_index(status);
+ unsigned long data = dcplb_tbl[cpu][idx].data;
+ if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
+ write_permitted(status, data)) {
+ data |= CPLB_DIRTY;
+ dcplb_tbl[cpu][idx].data = data;
+ bfin_write32(DCPLB_DATA0 + idx * 4, data);
+ return 0;
+ }
+ }
+ return CPLB_PROT_VIOL;
+}
+
+MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
+{
+ int cause = seqstat & 0x3f;
+ unsigned int cpu = raw_smp_processor_id();
+ switch (cause) {
+ case 0x23:
+ return dcplb_protection_fault(cpu);
+ case 0x2C:
+ return icplb_miss(cpu);
+ case 0x26:
+ return dcplb_miss(cpu);
+ default:
+ return 1;
+ }
+}
+
+void flush_switched_cplbs(unsigned int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ nr_cplb_flush[cpu]++;
+
+ flags = hard_local_irq_save();
+ _disable_icplb();
+ for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
+ icplb_tbl[cpu][i].data = 0;
+ bfin_write32(ICPLB_DATA0 + i * 4, 0);
+ }
+ _enable_icplb();
+
+ _disable_dcplb();
+ for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
+ dcplb_tbl[cpu][i].data = 0;
+ bfin_write32(DCPLB_DATA0 + i * 4, 0);
+ }
+ _enable_dcplb();
+ hard_local_irq_restore(flags);
+
+}
+
+void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
+{
+ int i;
+ unsigned long addr = (unsigned long)masks;
+ unsigned long d_data;
+ unsigned long flags;
+
+ if (!masks) {
+ current_rwx_mask[cpu] = masks;
+ return;
+ }
+
+ flags = hard_local_irq_save();
+ current_rwx_mask[cpu] = masks;
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
+#endif
+ }
+
+ _disable_dcplb();
+ for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
+ dcplb_tbl[cpu][i].addr = addr;
+ dcplb_tbl[cpu][i].data = d_data;
+ bfin_write32(DCPLB_DATA0 + i * 4, d_data);
+ bfin_write32(DCPLB_ADDR0 + i * 4, addr);
+ addr += PAGE_SIZE;
+ }
+ _enable_dcplb();
+ hard_local_irq_restore(flags);
+}
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile
new file mode 100644
index 00000000..394d0b1b
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/Makefile
@@ -0,0 +1,10 @@
+#
+# arch/blackfin/kernel/cplb-nompu/Makefile
+#
+
+obj-y := cplbinit.o cplbmgr.o
+
+CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
+ -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
+ -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
+ -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
new file mode 100644
index 00000000..886e0001
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -0,0 +1,194 @@
+/*
+ * Blackfin CPLB initialization
+ *
+ * Copyright 2007-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/cplb.h>
+#include <asm/cplbinit.h>
+#include <asm/mem_map.h>
+
+struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
+struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
+
+int first_switched_icplb PDT_ATTR;
+int first_switched_dcplb PDT_ATTR;
+
+struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
+struct cplb_boundary icplb_bounds[9] PDT_ATTR;
+
+int icplb_nr_bounds PDT_ATTR;
+int dcplb_nr_bounds PDT_ATTR;
+
+void __init generate_cplb_tables_cpu(unsigned int cpu)
+{
+ int i_d, i_i;
+ unsigned long addr;
+
+ struct cplb_entry *d_tbl = dcplb_tbl[cpu];
+ struct cplb_entry *i_tbl = icplb_tbl[cpu];
+
+ printk(KERN_INFO "NOMPU: setting up cplb tables\n");
+
+ i_d = i_i = 0;
+
+#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
+ /* Set up the zero page. */
+ d_tbl[i_d].addr = 0;
+ d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+ i_tbl[i_i].addr = 0;
+ i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+#endif
+
+ /* Cover kernel memory with 4M pages. */
+ addr = 0;
+
+ for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+ d_tbl[i_d].addr = addr;
+ d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+ i_tbl[i_i].addr = addr;
+ i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+ }
+
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ d_tbl[i_d].addr = addr;
+ d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+ i_tbl[i_i].addr = addr;
+ i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+#endif
+
+ /* Cover L1 memory. One 4M area for code and data each is enough. */
+ if (cpu == 0) {
+ if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
+ d_tbl[i_d].addr = L1_DATA_A_START;
+ d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+ }
+ i_tbl[i_i].addr = L1_CODE_START;
+ i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+ }
+#ifdef CONFIG_SMP
+ else {
+ if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
+ d_tbl[i_d].addr = COREB_L1_DATA_A_START;
+ d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+ }
+ i_tbl[i_i].addr = COREB_L1_CODE_START;
+ i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+ }
+#endif
+ first_switched_dcplb = i_d;
+ first_switched_icplb = i_i;
+
+ BUG_ON(first_switched_dcplb > MAX_CPLBS);
+ BUG_ON(first_switched_icplb > MAX_CPLBS);
+
+ while (i_d < MAX_CPLBS)
+ d_tbl[i_d++].data = 0;
+ while (i_i < MAX_CPLBS)
+ i_tbl[i_i++].data = 0;
+}
+
+void __init generate_cplb_tables_all(void)
+{
+ unsigned long uncached_end;
+ int i_d, i_i;
+
+ i_d = 0;
+ /* Normal RAM, including MTD FS. */
+#ifdef CONFIG_MTD_UCLINUX
+ uncached_end = memory_mtd_start + mtd_size;
+#else
+ uncached_end = memory_end;
+#endif
+ /*
+ * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
+ * so that we don't have to use 4kB pages and cause CPLB thrashing
+ */
+ if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
+ ((_ramend - uncached_end) >= 1 * 1024 * 1024))
+ dcplb_bounds[i_d].eaddr = uncached_end;
+ else
+ dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
+ dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
+ /* DMA uncached region. */
+ if (DMA_UNCACHED_REGION) {
+ dcplb_bounds[i_d].eaddr = _ramend;
+ dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
+ }
+ if (_ramend != physical_mem_end) {
+ /* Reserved memory. */
+ dcplb_bounds[i_d].eaddr = physical_mem_end;
+ dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
+ SDRAM_DGENERIC : SDRAM_DNON_CHBL);
+ }
+ /* Addressing hole up to the async bank. */
+ dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
+ dcplb_bounds[i_d++].data = 0;
+ /* ASYNC banks. */
+ dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
+ dcplb_bounds[i_d++].data = SDRAM_EBIU;
+ /* Addressing hole up to BootROM. */
+ dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
+ dcplb_bounds[i_d++].data = 0;
+ /* BootROM -- largest one should be less than 1 meg. */
+ dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+ dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
+ if (L2_LENGTH) {
+ /* Addressing hole up to L2 SRAM. */
+ dcplb_bounds[i_d].eaddr = L2_START;
+ dcplb_bounds[i_d++].data = 0;
+ /* L2 SRAM. */
+ dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
+ dcplb_bounds[i_d++].data = L2_DMEMORY;
+ }
+ dcplb_nr_bounds = i_d;
+ BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
+
+ i_i = 0;
+ /* Normal RAM, including MTD FS. */
+ icplb_bounds[i_i].eaddr = uncached_end;
+ icplb_bounds[i_i++].data = SDRAM_IGENERIC;
+ if (_ramend != physical_mem_end) {
+ /* DMA uncached region. */
+ if (DMA_UNCACHED_REGION) {
+ /* Normally this hole is caught by the async below. */
+ icplb_bounds[i_i].eaddr = _ramend;
+ icplb_bounds[i_i++].data = 0;
+ }
+ /* Reserved memory. */
+ icplb_bounds[i_i].eaddr = physical_mem_end;
+ icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
+ SDRAM_IGENERIC : SDRAM_INON_CHBL);
+ }
+ /* Addressing hole up to the async bank. */
+ icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
+ icplb_bounds[i_i++].data = 0;
+ /* ASYNC banks. */
+ icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
+ icplb_bounds[i_i++].data = SDRAM_EBIU;
+ /* Addressing hole up to BootROM. */
+ icplb_bounds[i_i].eaddr = BOOT_ROM_START;
+ icplb_bounds[i_i++].data = 0;
+ /* BootROM -- largest one should be less than 1 meg. */
+ icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+ icplb_bounds[i_i++].data = SDRAM_IGENERIC;
+
+ if (L2_LENGTH) {
+ /* Addressing hole up to L2 SRAM. */
+ icplb_bounds[i_i].eaddr = L2_START;
+ icplb_bounds[i_i++].data = 0;
+ /* L2 SRAM. */
+ icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
+ icplb_bounds[i_i++].data = L2_IMEMORY;
+ }
+ icplb_nr_bounds = i_i;
+ BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
+}
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
new file mode 100644
index 00000000..5b88861d
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -0,0 +1,202 @@
+/*
+ * Based on: arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+ * Author: Michael McTernan <mmcternan@airvana.com>
+ *
+ * Description: CPLB miss handler.
+ *
+ * Modified:
+ * Copyright 2008 Airvana Inc.
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel.h>
+#include <asm/blackfin.h>
+#include <asm/cplbinit.h>
+#include <asm/cplb.h>
+#include <asm/mmu_context.h>
+#include <asm/traps.h>
+
+/*
+ * WARNING
+ *
+ * This file is compiled with certain -ffixed-reg options. We have to
+ * make sure not to call any functions here that could clobber these
+ * registers.
+ */
+
+int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
+int nr_dcplb_supv_miss[NR_CPUS], nr_icplb_supv_miss[NR_CPUS];
+int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
+
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+#define MGR_ATTR __attribute__((l1_text))
+#else
+#define MGR_ATTR
+#endif
+
+static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
+ unsigned long addr)
+{
+ _disable_dcplb();
+ bfin_write32(DCPLB_DATA0 + idx * 4, data);
+ bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
+ _enable_dcplb();
+
+#ifdef CONFIG_CPLB_INFO
+ dcplb_tbl[cpu][idx].addr = addr;
+ dcplb_tbl[cpu][idx].data = data;
+#endif
+}
+
+static inline void write_icplb_data(int cpu, int idx, unsigned long data,
+ unsigned long addr)
+{
+ _disable_icplb();
+ bfin_write32(ICPLB_DATA0 + idx * 4, data);
+ bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
+ _enable_icplb();
+
+#ifdef CONFIG_CPLB_INFO
+ icplb_tbl[cpu][idx].addr = addr;
+ icplb_tbl[cpu][idx].data = data;
+#endif
+}
+
+/* Counters to implement round-robin replacement. */
+static int icplb_rr_index[NR_CPUS] PDT_ATTR;
+static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
+
+/*
+ * Find an ICPLB entry to be evicted and return its index.
+ */
+static int evict_one_icplb(int cpu)
+{
+ int i = first_switched_icplb + icplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_icplb;
+ icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
+ }
+ icplb_rr_index[cpu]++;
+ return i;
+}
+
+static int evict_one_dcplb(int cpu)
+{
+ int i = first_switched_dcplb + dcplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_dcplb;
+ dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
+ }
+ dcplb_rr_index[cpu]++;
+ return i;
+}
+
+MGR_ATTR static int icplb_miss(int cpu)
+{
+ unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
+ int status = bfin_read_ICPLB_STATUS();
+ int idx;
+ unsigned long i_data, base, addr1, eaddr;
+
+ nr_icplb_miss[cpu]++;
+ if (unlikely(status & FAULT_USERSUPV))
+ nr_icplb_supv_miss[cpu]++;
+
+ base = 0;
+ idx = 0;
+ do {
+ eaddr = icplb_bounds[idx].eaddr;
+ if (addr < eaddr)
+ break;
+ base = eaddr;
+ } while (++idx < icplb_nr_bounds);
+
+ if (unlikely(idx == icplb_nr_bounds))
+ return CPLB_NO_ADDR_MATCH;
+
+ i_data = icplb_bounds[idx].data;
+ if (unlikely(i_data == 0))
+ return CPLB_NO_ADDR_MATCH;
+
+ addr1 = addr & ~(SIZE_4M - 1);
+ addr &= ~(SIZE_1M - 1);
+ i_data |= PAGE_SIZE_1MB;
+ if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+ /*
+ * This works because
+ * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
+ */
+ i_data |= PAGE_SIZE_4MB;
+ addr = addr1;
+ }
+
+ /* Pick entry to evict */
+ idx = evict_one_icplb(cpu);
+
+ write_icplb_data(cpu, idx, i_data, addr);
+
+ return CPLB_RELOADED;
+}
+
+MGR_ATTR static int dcplb_miss(int cpu)
+{
+ unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
+ int status = bfin_read_DCPLB_STATUS();
+ int idx;
+ unsigned long d_data, base, addr1, eaddr;
+
+ nr_dcplb_miss[cpu]++;
+ if (unlikely(status & FAULT_USERSUPV))
+ nr_dcplb_supv_miss[cpu]++;
+
+ base = 0;
+ idx = 0;
+ do {
+ eaddr = dcplb_bounds[idx].eaddr;
+ if (addr < eaddr)
+ break;
+ base = eaddr;
+ } while (++idx < dcplb_nr_bounds);
+
+ if (unlikely(idx == dcplb_nr_bounds))
+ return CPLB_NO_ADDR_MATCH;
+
+ d_data = dcplb_bounds[idx].data;
+ if (unlikely(d_data == 0))
+ return CPLB_NO_ADDR_MATCH;
+
+ addr1 = addr & ~(SIZE_4M - 1);
+ addr &= ~(SIZE_1M - 1);
+ d_data |= PAGE_SIZE_1MB;
+ if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+ /*
+ * This works because
+ * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
+ */
+ d_data |= PAGE_SIZE_4MB;
+ addr = addr1;
+ }
+
+ /* Pick entry to evict */
+ idx = evict_one_dcplb(cpu);
+
+ write_dcplb_data(cpu, idx, d_data, addr);
+
+ return CPLB_RELOADED;
+}
+
+MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
+{
+ int cause = seqstat & 0x3f;
+ unsigned int cpu = raw_smp_processor_id();
+ switch (cause) {
+ case VEC_CPLB_I_M:
+ return icplb_miss(cpu);
+ case VEC_CPLB_M:
+ return dcplb_miss(cpu);
+ default:
+ return CPLB_UNKNOWN_ERR;
+ }
+}
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
new file mode 100644
index 00000000..0bdaa517
--- /dev/null
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -0,0 +1,177 @@
+/*
+ * arch/blackfin/kernel/cplbinfo.c - display CPLB status
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <asm/cplbinit.h>
+#include <asm/blackfin.h>
+
+static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
+#define page(flags) (((flags) & 0x30000) >> 16)
+#define strpage(flags) page_strtbl[page(flags)]
+
+struct cplbinfo_data {
+ loff_t pos;
+ char cplb_type;
+ u32 mem_control;
+ struct cplb_entry *tbl;
+ int switched;
+};
+
+static void cplbinfo_print_header(struct seq_file *m)
+{
+ seq_printf(m, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n");
+}
+
+static int cplbinfo_nomore(struct cplbinfo_data *cdata)
+{
+ return cdata->pos >= MAX_CPLBS;
+}
+
+static int cplbinfo_show(struct seq_file *m, void *p)
+{
+ struct cplbinfo_data *cdata;
+ unsigned long data, addr;
+ loff_t pos;
+
+ cdata = p;
+ pos = cdata->pos;
+ addr = cdata->tbl[pos].addr;
+ data = cdata->tbl[pos].data;
+
+ seq_printf(m,
+ "%d\t0x%08lx\t%05lx\t%s\t%c\t%c\t%c\t%c\n",
+ (int)pos, addr, data, strpage(data),
+ (data & CPLB_USER_RD) ? 'Y' : 'N',
+ (data & CPLB_USER_WR) ? 'Y' : 'N',
+ (data & CPLB_SUPV_WR) ? 'Y' : 'N',
+ pos < cdata->switched ? 'N' : 'Y');
+
+ return 0;
+}
+
+static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
+{
+ if (cdata->cplb_type == 'I') {
+ cdata->mem_control = bfin_read_IMEM_CONTROL();
+ cdata->tbl = icplb_tbl[cpu];
+ cdata->switched = first_switched_icplb;
+ } else {
+ cdata->mem_control = bfin_read_DMEM_CONTROL();
+ cdata->tbl = dcplb_tbl[cpu];
+ cdata->switched = first_switched_dcplb;
+ }
+}
+
+static void *cplbinfo_start(struct seq_file *m, loff_t *pos)
+{
+ struct cplbinfo_data *cdata = m->private;
+
+ if (!*pos) {
+ seq_printf(m, "%cCPLBs are %sabled: 0x%x\n", cdata->cplb_type,
+ (cdata->mem_control & ENDCPLB ? "en" : "dis"),
+ cdata->mem_control);
+ cplbinfo_print_header(m);
+ } else if (cplbinfo_nomore(cdata))
+ return NULL;
+
+ get_cpu();
+ return cdata;
+}
+
+static void *cplbinfo_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct cplbinfo_data *cdata = p;
+ cdata->pos = ++(*pos);
+ if (cplbinfo_nomore(cdata))
+ return NULL;
+ else
+ return cdata;
+}
+
+static void cplbinfo_stop(struct seq_file *m, void *p)
+{
+ put_cpu();
+}
+
+static const struct seq_operations cplbinfo_sops = {
+ .start = cplbinfo_start,
+ .next = cplbinfo_next,
+ .stop = cplbinfo_stop,
+ .show = cplbinfo_show,
+};
+
+#define CPLBINFO_DCPLB_FLAG 0x80000000
+
+static int cplbinfo_open(struct inode *inode, struct file *file)
+{
+ struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ char cplb_type;
+ unsigned int cpu;
+ int ret;
+ struct seq_file *m;
+ struct cplbinfo_data *cdata;
+
+ cpu = (unsigned int)pde->data;
+ cplb_type = cpu & CPLBINFO_DCPLB_FLAG ? 'D' : 'I';
+ cpu &= ~CPLBINFO_DCPLB_FLAG;
+
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ ret = seq_open_private(file, &cplbinfo_sops, sizeof(*cdata));
+ if (ret)
+ return ret;
+ m = file->private_data;
+ cdata = m->private;
+
+ cdata->pos = 0;
+ cdata->cplb_type = cplb_type;
+ cplbinfo_seq_init(cdata, cpu);
+
+ return 0;
+}
+
+static const struct file_operations cplbinfo_fops = {
+ .open = cplbinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static int __init cplbinfo_init(void)
+{
+ struct proc_dir_entry *cplb_dir, *cpu_dir;
+ char buf[10];
+ unsigned int cpu;
+
+ cplb_dir = proc_mkdir("cplbinfo", NULL);
+ if (!cplb_dir)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ sprintf(buf, "cpu%i", cpu);
+ cpu_dir = proc_mkdir(buf, cplb_dir);
+ if (!cpu_dir)
+ return -ENOMEM;
+
+ proc_create_data("icplb", S_IRUGO, cpu_dir, &cplbinfo_fops,
+ (void *)cpu);
+ proc_create_data("dcplb", S_IRUGO, cpu_dir, &cplbinfo_fops,
+ (void *)(cpu | CPLBINFO_DCPLB_FLAG));
+ }
+
+ return 0;
+}
+late_initcall(cplbinfo_init);
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
new file mode 100644
index 00000000..92f66482
--- /dev/null
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -0,0 +1,1889 @@
+/*
+ * debugfs interface to core/system MMRs
+ *
+ * Copyright 2007-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/gpio.h>
+#include <asm/gptimers.h>
+#include <asm/bfin_can.h>
+#include <asm/bfin_dma.h>
+#include <asm/bfin_ppi.h>
+#include <asm/bfin_serial.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/bfin_twi.h>
+
+/* Common code defines PORT_MUX on us, so redirect the MMR back locally */
+#ifdef BFIN_PORT_MUX
+#undef PORT_MUX
+#define PORT_MUX BFIN_PORT_MUX
+#endif
+
+#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)(addr))
+#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
+#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
+#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
+
+#define D_RO(name, bits) d_RO(#name, bits, name)
+#define D_WO(name, bits) d_WO(#name, bits, name)
+#define D32(name) d(#name, 32, name)
+#define D16(name) d(#name, 16, name)
+
+#define REGS_OFF(peri, mmr) offsetof(struct bfin_##peri##_regs, mmr)
+#define __REGS(peri, sname, rname) \
+ do { \
+ struct bfin_##peri##_regs r; \
+ void *addr = (void *)(base + REGS_OFF(peri, rname)); \
+ strcpy(_buf, sname); \
+ if (sizeof(r.rname) == 2) \
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, addr); \
+ else \
+ debugfs_create_x32(buf, S_IRUSR|S_IWUSR, parent, addr); \
+ } while (0)
+#define REGS_STR_PFX(buf, pfx, num) \
+ ({ \
+ buf + (num >= 0 ? \
+ sprintf(buf, #pfx "%i_", num) : \
+ sprintf(buf, #pfx "_")); \
+ })
+#define REGS_STR_PFX_C(buf, pfx, num) \
+ ({ \
+ buf + (num >= 0 ? \
+ sprintf(buf, #pfx "%c_", 'A' + num) : \
+ sprintf(buf, #pfx "_")); \
+ })
+
+/*
+ * Core registers (not memory mapped)
+ */
+extern u32 last_seqstat;
+
+static int debug_cclk_get(void *data, u64 *val)
+{
+ *val = get_cclk();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_debug_cclk, debug_cclk_get, NULL, "0x%08llx\n");
+
+static int debug_sclk_get(void *data, u64 *val)
+{
+ *val = get_sclk();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_debug_sclk, debug_sclk_get, NULL, "0x%08llx\n");
+
+#define DEFINE_SYSREG(sr, pre, post) \
+static int sysreg_##sr##_get(void *data, u64 *val) \
+{ \
+ unsigned long tmp; \
+ pre; \
+ __asm__ __volatile__("%0 = " #sr ";" : "=d"(tmp)); \
+ *val = tmp; \
+ return 0; \
+} \
+static int sysreg_##sr##_set(void *data, u64 val) \
+{ \
+ unsigned long tmp = val; \
+ __asm__ __volatile__(#sr " = %0;" : : "d"(tmp)); \
+ post; \
+ return 0; \
+} \
+DEFINE_SIMPLE_ATTRIBUTE(fops_sysreg_##sr, sysreg_##sr##_get, sysreg_##sr##_set, "0x%08llx\n")
+
+DEFINE_SYSREG(cycles, , );
+DEFINE_SYSREG(cycles2, __asm__ __volatile__("%0 = cycles;" : "=d"(tmp)), );
+DEFINE_SYSREG(emudat, , );
+DEFINE_SYSREG(seqstat, , );
+DEFINE_SYSREG(syscfg, , CSYNC());
+#define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr)
+
+/*
+ * CAN
+ */
+#define CAN_OFF(mmr) REGS_OFF(can, mmr)
+#define __CAN(uname, lname) __REGS(can, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_can(struct dentry *parent, unsigned long base, int num)
+{
+ static struct dentry *am, *mb;
+ int i, j;
+ char buf[32], *_buf = REGS_STR_PFX(buf, CAN, num);
+
+ if (!am) {
+ am = debugfs_create_dir("am", parent);
+ mb = debugfs_create_dir("mb", parent);
+ }
+
+ __CAN(MC1, mc1);
+ __CAN(MD1, md1);
+ __CAN(TRS1, trs1);
+ __CAN(TRR1, trr1);
+ __CAN(TA1, ta1);
+ __CAN(AA1, aa1);
+ __CAN(RMP1, rmp1);
+ __CAN(RML1, rml1);
+ __CAN(MBTIF1, mbtif1);
+ __CAN(MBRIF1, mbrif1);
+ __CAN(MBIM1, mbim1);
+ __CAN(RFH1, rfh1);
+ __CAN(OPSS1, opss1);
+
+ __CAN(MC2, mc2);
+ __CAN(MD2, md2);
+ __CAN(TRS2, trs2);
+ __CAN(TRR2, trr2);
+ __CAN(TA2, ta2);
+ __CAN(AA2, aa2);
+ __CAN(RMP2, rmp2);
+ __CAN(RML2, rml2);
+ __CAN(MBTIF2, mbtif2);
+ __CAN(MBRIF2, mbrif2);
+ __CAN(MBIM2, mbim2);
+ __CAN(RFH2, rfh2);
+ __CAN(OPSS2, opss2);
+
+ __CAN(CLOCK, clock);
+ __CAN(TIMING, timing);
+ __CAN(DEBUG, debug);
+ __CAN(STATUS, status);
+ __CAN(CEC, cec);
+ __CAN(GIS, gis);
+ __CAN(GIM, gim);
+ __CAN(GIF, gif);
+ __CAN(CONTROL, control);
+ __CAN(INTR, intr);
+ __CAN(VERSION, version);
+ __CAN(MBTD, mbtd);
+ __CAN(EWR, ewr);
+ __CAN(ESR, esr);
+ /*__CAN(UCREG, ucreg); no longer exists */
+ __CAN(UCCNT, uccnt);
+ __CAN(UCRC, ucrc);
+ __CAN(UCCNF, uccnf);
+ __CAN(VERSION2, version2);
+
+ for (i = 0; i < 32; ++i) {
+ sprintf(_buf, "AM%02iL", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
+ (u16 *)(base + CAN_OFF(msk[i].aml)));
+ sprintf(_buf, "AM%02iH", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
+ (u16 *)(base + CAN_OFF(msk[i].amh)));
+
+ for (j = 0; j < 3; ++j) {
+ sprintf(_buf, "MB%02i_DATA%i", i, j);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
+ (u16 *)(base + CAN_OFF(chl[i].data[j*2])));
+ }
+ sprintf(_buf, "MB%02i_LENGTH", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
+ (u16 *)(base + CAN_OFF(chl[i].dlc)));
+ sprintf(_buf, "MB%02i_TIMESTAMP", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
+ (u16 *)(base + CAN_OFF(chl[i].tsv)));
+ sprintf(_buf, "MB%02i_ID0", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
+ (u16 *)(base + CAN_OFF(chl[i].id0)));
+ sprintf(_buf, "MB%02i_ID1", i);
+ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
+ (u16 *)(base + CAN_OFF(chl[i].id1)));
+ }
+}
+#define CAN(num) bfin_debug_mmrs_can(parent, CAN##num##_MC1, num)
+
+/*
+ * DMA
+ */
+#define __DMA(uname, lname) __REGS(dma, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdma, const char *pfx)
+{
+ char buf[32], *_buf;
+
+ if (mdma)
+ _buf = buf + sprintf(buf, "%s_%c%i_", pfx, mdma, num);
+ else
+ _buf = buf + sprintf(buf, "%s%i_", pfx, num);
+
+ __DMA(NEXT_DESC_PTR, next_desc_ptr);
+ __DMA(START_ADDR, start_addr);
+ __DMA(CONFIG, config);
+ __DMA(X_COUNT, x_count);
+ __DMA(X_MODIFY, x_modify);
+ __DMA(Y_COUNT, y_count);
+ __DMA(Y_MODIFY, y_modify);
+ __DMA(CURR_DESC_PTR, curr_desc_ptr);
+ __DMA(CURR_ADDR, curr_addr);
+ __DMA(IRQ_STATUS, irq_status);
+ if (strcmp(pfx, "IMDMA") != 0)
+ __DMA(PERIPHERAL_MAP, peripheral_map);
+ __DMA(CURR_X_COUNT, curr_x_count);
+ __DMA(CURR_Y_COUNT, curr_y_count);
+}
+#define _DMA(num, base, mdma, pfx) bfin_debug_mmrs_dma(parent, base, num, mdma, pfx "DMA")
+#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
+#define _MDMA(num, x) \
+ do { \
+ _DMA(num, x##DMA_D##num##_NEXT_DESC_PTR, 'D', #x); \
+ _DMA(num, x##DMA_S##num##_NEXT_DESC_PTR, 'S', #x); \
+ } while (0)
+#define MDMA(num) _MDMA(num, M)
+#define IMDMA(num) _MDMA(num, IM)
+
+/*
+ * EPPI
+ */
+#define __EPPI(uname, lname) __REGS(eppi, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, EPPI, num);
+ __EPPI(STATUS, status);
+ __EPPI(HCOUNT, hcount);
+ __EPPI(HDELAY, hdelay);
+ __EPPI(VCOUNT, vcount);
+ __EPPI(VDELAY, vdelay);
+ __EPPI(FRAME, frame);
+ __EPPI(LINE, line);
+ __EPPI(CLKDIV, clkdiv);
+ __EPPI(CONTROL, control);
+ __EPPI(FS1W_HBL, fs1w_hbl);
+ __EPPI(FS1P_AVPL, fs1p_avpl);
+ __EPPI(FS2W_LVB, fs2w_lvb);
+ __EPPI(FS2P_LAVF, fs2p_lavf);
+ __EPPI(CLIP, clip);
+}
+#define EPPI(num) bfin_debug_mmrs_eppi(parent, EPPI##num##_STATUS, num)
+
+/*
+ * General Purpose Timers
+ */
+#define __GPTIMER(uname, lname) __REGS(gptimer, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
+ __GPTIMER(CONFIG, config);
+ __GPTIMER(COUNTER, counter);
+ __GPTIMER(PERIOD, period);
+ __GPTIMER(WIDTH, width);
+}
+#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
+
+#define GPTIMER_GROUP_OFF(mmr) REGS_OFF(gptimer_group, mmr)
+#define __GPTIMER_GROUP(uname, lname) __REGS(gptimer_group, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_gptimer_group(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf;
+
+ if (num == -1) {
+ _buf = buf + sprintf(buf, "TIMER_");
+ __GPTIMER_GROUP(ENABLE, enable);
+ __GPTIMER_GROUP(DISABLE, disable);
+ __GPTIMER_GROUP(STATUS, status);
+ } else {
+ /* These MMRs are a bit odd as the group # is a suffix */
+ _buf = buf + sprintf(buf, "TIMER_ENABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(enable));
+
+ _buf = buf + sprintf(buf, "TIMER_DISABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(disable));
+
+ _buf = buf + sprintf(buf, "TIMER_STATUS%i", num);
+ d(buf, 32, base + GPTIMER_GROUP_OFF(status));
+ }
+}
+#define GPTIMER_GROUP(mmr, num) bfin_debug_mmrs_gptimer_group(parent, mmr, num)
+
+/*
+ * Handshake MDMA
+ */
+#define __HMDMA(uname, lname) __REGS(hmdma, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, HMDMA, num);
+ __HMDMA(CONTROL, control);
+ __HMDMA(ECINIT, ecinit);
+ __HMDMA(BCINIT, bcinit);
+ __HMDMA(ECURGENT, ecurgent);
+ __HMDMA(ECOVERFLOW, ecoverflow);
+ __HMDMA(ECOUNT, ecount);
+ __HMDMA(BCOUNT, bcount);
+}
+#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
+
+/*
+ * Peripheral Interrupts (PINT/GPIO)
+ */
+#ifdef PINT0_MASK_SET
+#define __PINT(uname, lname) __REGS(pint, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_pint(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, PINT, num);
+ __PINT(MASK_SET, mask_set);
+ __PINT(MASK_CLEAR, mask_clear);
+ __PINT(REQUEST, request);
+ __PINT(ASSIGN, assign);
+ __PINT(EDGE_SET, edge_set);
+ __PINT(EDGE_CLEAR, edge_clear);
+ __PINT(INVERT_SET, invert_set);
+ __PINT(INVERT_CLEAR, invert_clear);
+ __PINT(PINSTATE, pinstate);
+ __PINT(LATCH, latch);
+}
+#define PINT(num) bfin_debug_mmrs_pint(parent, PINT##num##_MASK_SET, num)
+#endif
+
+/*
+ * Port/GPIO
+ */
+#define bfin_gpio_regs gpio_port_t
+#define __PORT(uname, lname) __REGS(gpio, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_port(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf;
+#ifdef __ADSPBF54x__
+ _buf = REGS_STR_PFX_C(buf, PORT, num);
+ __PORT(FER, port_fer);
+ __PORT(SET, data_set);
+ __PORT(CLEAR, data_clear);
+ __PORT(DIR_SET, dir_set);
+ __PORT(DIR_CLEAR, dir_clear);
+ __PORT(INEN, inen);
+ __PORT(MUX, port_mux);
+#else
+ _buf = buf + sprintf(buf, "PORT%cIO_", num);
+ __PORT(CLEAR, data_clear);
+ __PORT(SET, data_set);
+ __PORT(TOGGLE, toggle);
+ __PORT(MASKA, maska);
+ __PORT(MASKA_CLEAR, maska_clear);
+ __PORT(MASKA_SET, maska_set);
+ __PORT(MASKA_TOGGLE, maska_toggle);
+ __PORT(MASKB, maskb);
+ __PORT(MASKB_CLEAR, maskb_clear);
+ __PORT(MASKB_SET, maskb_set);
+ __PORT(MASKB_TOGGLE, maskb_toggle);
+ __PORT(DIR, dir);
+ __PORT(POLAR, polar);
+ __PORT(EDGE, edge);
+ __PORT(BOTH, both);
+ __PORT(INEN, inen);
+#endif
+ _buf[-1] = '\0';
+ d(buf, 16, base + REGS_OFF(gpio, data));
+}
+#define PORT(base, num) bfin_debug_mmrs_port(parent, base, num)
+
+/*
+ * PPI
+ */
+#define __PPI(uname, lname) __REGS(ppi, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, PPI, num);
+ __PPI(CONTROL, control);
+ __PPI(STATUS, status);
+ __PPI(COUNT, count);
+ __PPI(DELAY, delay);
+ __PPI(FRAME, frame);
+}
+#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_CONTROL, num)
+
+/*
+ * SPI
+ */
+#define __SPI(uname, lname) __REGS(spi, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_spi(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, SPI, num);
+ __SPI(CTL, ctl);
+ __SPI(FLG, flg);
+ __SPI(STAT, stat);
+ __SPI(TDBR, tdbr);
+ __SPI(RDBR, rdbr);
+ __SPI(BAUD, baud);
+ __SPI(SHADOW, shadow);
+}
+#define SPI(num) bfin_debug_mmrs_spi(parent, SPI##num##_REGBASE, num)
+
+/*
+ * SPORT
+ */
+static inline int sport_width(void *mmr)
+{
+ unsigned long lmmr = (unsigned long)mmr;
+ if ((lmmr & 0xff) == 0x10)
+ /* SPORT#_TX has 0x10 offset -> SPORT#_TCR2 has 0x04 offset */
+ lmmr -= 0xc;
+ else
+ /* SPORT#_RX has 0x18 offset -> SPORT#_RCR2 has 0x24 offset */
+ lmmr += 0xc;
+ /* extract SLEN field from control register 2 and add 1 */
+ return (bfin_read16(lmmr) & 0x1f) + 1;
+}
+static int sport_set(void *mmr, u64 val)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (sport_width(mmr) <= 16)
+ bfin_write16(mmr, val);
+ else
+ bfin_write32(mmr, val);
+ local_irq_restore(flags);
+ return 0;
+}
+static int sport_get(void *mmr, u64 *val)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (sport_width(mmr) <= 16)
+ *val = bfin_read16(mmr);
+ else
+ *val = bfin_read32(mmr);
+ local_irq_restore(flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_sport, sport_get, sport_set, "0x%08llx\n");
+/*DEFINE_SIMPLE_ATTRIBUTE(fops_sport_ro, sport_get, NULL, "0x%08llx\n");*/
+DEFINE_SIMPLE_ATTRIBUTE(fops_sport_wo, NULL, sport_set, "0x%08llx\n");
+#define SPORT_OFF(mmr) (SPORT0_##mmr - SPORT0_TCR1)
+#define _D_SPORT(name, perms, fops) \
+ do { \
+ strcpy(_buf, #name); \
+ debugfs_create_file(buf, perms, parent, (void *)(base + SPORT_OFF(name)), fops); \
+ } while (0)
+#define __SPORT_RW(name) _D_SPORT(name, S_IRUSR|S_IWUSR, &fops_sport)
+#define __SPORT_RO(name) _D_SPORT(name, S_IRUSR, &fops_sport_ro)
+#define __SPORT_WO(name) _D_SPORT(name, S_IWUSR, &fops_sport_wo)
+#define __SPORT(name, bits) \
+ do { \
+ strcpy(_buf, #name); \
+ debugfs_create_x##bits(buf, S_IRUSR|S_IWUSR, parent, (u##bits *)(base + SPORT_OFF(name))); \
+ } while (0)
+static void __init __maybe_unused
+bfin_debug_mmrs_sport(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, SPORT, num);
+ __SPORT(CHNL, 16);
+ __SPORT(MCMC1, 16);
+ __SPORT(MCMC2, 16);
+ __SPORT(MRCS0, 32);
+ __SPORT(MRCS1, 32);
+ __SPORT(MRCS2, 32);
+ __SPORT(MRCS3, 32);
+ __SPORT(MTCS0, 32);
+ __SPORT(MTCS1, 32);
+ __SPORT(MTCS2, 32);
+ __SPORT(MTCS3, 32);
+ __SPORT(RCLKDIV, 16);
+ __SPORT(RCR1, 16);
+ __SPORT(RCR2, 16);
+ __SPORT(RFSDIV, 16);
+ __SPORT_RW(RX);
+ __SPORT(STAT, 16);
+ __SPORT(TCLKDIV, 16);
+ __SPORT(TCR1, 16);
+ __SPORT(TCR2, 16);
+ __SPORT(TFSDIV, 16);
+ __SPORT_WO(TX);
+}
+#define SPORT(num) bfin_debug_mmrs_sport(parent, SPORT##num##_TCR1, num)
+
+/*
+ * TWI
+ */
+#define __TWI(uname, lname) __REGS(twi, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_twi(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, TWI, num);
+ __TWI(CLKDIV, clkdiv);
+ __TWI(CONTROL, control);
+ __TWI(SLAVE_CTL, slave_ctl);
+ __TWI(SLAVE_STAT, slave_stat);
+ __TWI(SLAVE_ADDR, slave_addr);
+ __TWI(MASTER_CTL, master_ctl);
+ __TWI(MASTER_STAT, master_stat);
+ __TWI(MASTER_ADDR, master_addr);
+ __TWI(INT_STAT, int_stat);
+ __TWI(INT_MASK, int_mask);
+ __TWI(FIFO_CTL, fifo_ctl);
+ __TWI(FIFO_STAT, fifo_stat);
+ __TWI(XMT_DATA8, xmt_data8);
+ __TWI(XMT_DATA16, xmt_data16);
+ __TWI(RCV_DATA8, rcv_data8);
+ __TWI(RCV_DATA16, rcv_data16);
+}
+#define TWI(num) bfin_debug_mmrs_twi(parent, TWI##num##_CLKDIV, num)
+
+/*
+ * UART
+ */
+#define __UART(uname, lname) __REGS(uart, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_uart(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, UART, num);
+#ifdef BFIN_UART_BF54X_STYLE
+ __UART(DLL, dll);
+ __UART(DLH, dlh);
+ __UART(GCTL, gctl);
+ __UART(LCR, lcr);
+ __UART(MCR, mcr);
+ __UART(LSR, lsr);
+ __UART(MSR, msr);
+ __UART(SCR, scr);
+ __UART(IER_SET, ier_set);
+ __UART(IER_CLEAR, ier_clear);
+ __UART(THR, thr);
+ __UART(RBR, rbr);
+#else
+ __UART(DLL, dll);
+ __UART(THR, thr);
+ __UART(RBR, rbr);
+ __UART(DLH, dlh);
+ __UART(IER, ier);
+ __UART(IIR, iir);
+ __UART(LCR, lcr);
+ __UART(MCR, mcr);
+ __UART(LSR, lsr);
+ __UART(MSR, msr);
+ __UART(SCR, scr);
+ __UART(GCTL, gctl);
+#endif
+}
+#define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num)
+
+/*
+ * The actual debugfs generation
+ */
+static struct dentry *debug_mmrs_dentry;
+
+static int __init bfin_debug_mmrs_init(void)
+{
+ struct dentry *top, *parent;
+
+ pr_info("debug-mmrs: setting up Blackfin MMR debugfs\n");
+
+ top = debugfs_create_dir("blackfin", NULL);
+ if (top == NULL)
+ return -1;
+
+ parent = debugfs_create_dir("core_regs", top);
+ debugfs_create_file("cclk", S_IRUSR, parent, NULL, &fops_debug_cclk);
+ debugfs_create_file("sclk", S_IRUSR, parent, NULL, &fops_debug_sclk);
+ debugfs_create_x32("last_seqstat", S_IRUSR, parent, &last_seqstat);
+ D_SYSREG(cycles);
+ D_SYSREG(cycles2);
+ D_SYSREG(emudat);
+ D_SYSREG(seqstat);
+ D_SYSREG(syscfg);
+
+ /* Core MMRs */
+ parent = debugfs_create_dir("ctimer", top);
+ D32(TCNTL);
+ D32(TCOUNT);
+ D32(TPERIOD);
+ D32(TSCALE);
+
+ parent = debugfs_create_dir("cec", top);
+ D32(EVT0);
+ D32(EVT1);
+ D32(EVT2);
+ D32(EVT3);
+ D32(EVT4);
+ D32(EVT5);
+ D32(EVT6);
+ D32(EVT7);
+ D32(EVT8);
+ D32(EVT9);
+ D32(EVT10);
+ D32(EVT11);
+ D32(EVT12);
+ D32(EVT13);
+ D32(EVT14);
+ D32(EVT15);
+ D32(EVT_OVERRIDE);
+ D32(IMASK);
+ D32(IPEND);
+ D32(ILAT);
+ D32(IPRIO);
+
+ parent = debugfs_create_dir("debug", top);
+ D32(DBGSTAT);
+ D32(DSPID);
+
+ parent = debugfs_create_dir("mmu", top);
+ D32(SRAM_BASE_ADDRESS);
+ D32(DCPLB_ADDR0);
+ D32(DCPLB_ADDR10);
+ D32(DCPLB_ADDR11);
+ D32(DCPLB_ADDR12);
+ D32(DCPLB_ADDR13);
+ D32(DCPLB_ADDR14);
+ D32(DCPLB_ADDR15);
+ D32(DCPLB_ADDR1);
+ D32(DCPLB_ADDR2);
+ D32(DCPLB_ADDR3);
+ D32(DCPLB_ADDR4);
+ D32(DCPLB_ADDR5);
+ D32(DCPLB_ADDR6);
+ D32(DCPLB_ADDR7);
+ D32(DCPLB_ADDR8);
+ D32(DCPLB_ADDR9);
+ D32(DCPLB_DATA0);
+ D32(DCPLB_DATA10);
+ D32(DCPLB_DATA11);
+ D32(DCPLB_DATA12);
+ D32(DCPLB_DATA13);
+ D32(DCPLB_DATA14);
+ D32(DCPLB_DATA15);
+ D32(DCPLB_DATA1);
+ D32(DCPLB_DATA2);
+ D32(DCPLB_DATA3);
+ D32(DCPLB_DATA4);
+ D32(DCPLB_DATA5);
+ D32(DCPLB_DATA6);
+ D32(DCPLB_DATA7);
+ D32(DCPLB_DATA8);
+ D32(DCPLB_DATA9);
+ D32(DCPLB_FAULT_ADDR);
+ D32(DCPLB_STATUS);
+ D32(DMEM_CONTROL);
+ D32(DTEST_COMMAND);
+ D32(DTEST_DATA0);
+ D32(DTEST_DATA1);
+
+ D32(ICPLB_ADDR0);
+ D32(ICPLB_ADDR1);
+ D32(ICPLB_ADDR2);
+ D32(ICPLB_ADDR3);
+ D32(ICPLB_ADDR4);
+ D32(ICPLB_ADDR5);
+ D32(ICPLB_ADDR6);
+ D32(ICPLB_ADDR7);
+ D32(ICPLB_ADDR8);
+ D32(ICPLB_ADDR9);
+ D32(ICPLB_ADDR10);
+ D32(ICPLB_ADDR11);
+ D32(ICPLB_ADDR12);
+ D32(ICPLB_ADDR13);
+ D32(ICPLB_ADDR14);
+ D32(ICPLB_ADDR15);
+ D32(ICPLB_DATA0);
+ D32(ICPLB_DATA1);
+ D32(ICPLB_DATA2);
+ D32(ICPLB_DATA3);
+ D32(ICPLB_DATA4);
+ D32(ICPLB_DATA5);
+ D32(ICPLB_DATA6);
+ D32(ICPLB_DATA7);
+ D32(ICPLB_DATA8);
+ D32(ICPLB_DATA9);
+ D32(ICPLB_DATA10);
+ D32(ICPLB_DATA11);
+ D32(ICPLB_DATA12);
+ D32(ICPLB_DATA13);
+ D32(ICPLB_DATA14);
+ D32(ICPLB_DATA15);
+ D32(ICPLB_FAULT_ADDR);
+ D32(ICPLB_STATUS);
+ D32(IMEM_CONTROL);
+ if (!ANOMALY_05000481) {
+ D32(ITEST_COMMAND);
+ D32(ITEST_DATA0);
+ D32(ITEST_DATA1);
+ }
+
+ parent = debugfs_create_dir("perf", top);
+ D32(PFCNTR0);
+ D32(PFCNTR1);
+ D32(PFCTL);
+
+ parent = debugfs_create_dir("trace", top);
+ D32(TBUF);
+ D32(TBUFCTL);
+ D32(TBUFSTAT);
+
+ parent = debugfs_create_dir("watchpoint", top);
+ D32(WPIACTL);
+ D32(WPIA0);
+ D32(WPIA1);
+ D32(WPIA2);
+ D32(WPIA3);
+ D32(WPIA4);
+ D32(WPIA5);
+ D32(WPIACNT0);
+ D32(WPIACNT1);
+ D32(WPIACNT2);
+ D32(WPIACNT3);
+ D32(WPIACNT4);
+ D32(WPIACNT5);
+ D32(WPDACTL);
+ D32(WPDA0);
+ D32(WPDA1);
+ D32(WPDACNT0);
+ D32(WPDACNT1);
+ D32(WPSTAT);
+
+ /* System MMRs */
+#ifdef ATAPI_CONTROL
+ parent = debugfs_create_dir("atapi", top);
+ D16(ATAPI_CONTROL);
+ D16(ATAPI_DEV_ADDR);
+ D16(ATAPI_DEV_RXBUF);
+ D16(ATAPI_DEV_TXBUF);
+ D16(ATAPI_DMA_TFRCNT);
+ D16(ATAPI_INT_MASK);
+ D16(ATAPI_INT_STATUS);
+ D16(ATAPI_LINE_STATUS);
+ D16(ATAPI_MULTI_TIM_0);
+ D16(ATAPI_MULTI_TIM_1);
+ D16(ATAPI_MULTI_TIM_2);
+ D16(ATAPI_PIO_TFRCNT);
+ D16(ATAPI_PIO_TIM_0);
+ D16(ATAPI_PIO_TIM_1);
+ D16(ATAPI_REG_TIM_0);
+ D16(ATAPI_SM_STATE);
+ D16(ATAPI_STATUS);
+ D16(ATAPI_TERMINATE);
+ D16(ATAPI_UDMAOUT_TFRCNT);
+ D16(ATAPI_ULTRA_TIM_0);
+ D16(ATAPI_ULTRA_TIM_1);
+ D16(ATAPI_ULTRA_TIM_2);
+ D16(ATAPI_ULTRA_TIM_3);
+ D16(ATAPI_UMAIN_TFRCNT);
+ D16(ATAPI_XFER_LEN);
+#endif
+
+#if defined(CAN_MC1) || defined(CAN0_MC1) || defined(CAN1_MC1)
+ parent = debugfs_create_dir("can", top);
+# ifdef CAN_MC1
+ bfin_debug_mmrs_can(parent, CAN_MC1, -1);
+# endif
+# ifdef CAN0_MC1
+ CAN(0);
+# endif
+# ifdef CAN1_MC1
+ CAN(1);
+# endif
+#endif
+
+#ifdef CNT_COMMAND
+ parent = debugfs_create_dir("counter", top);
+ D16(CNT_COMMAND);
+ D16(CNT_CONFIG);
+ D32(CNT_COUNTER);
+ D16(CNT_DEBOUNCE);
+ D16(CNT_IMASK);
+ D32(CNT_MAX);
+ D32(CNT_MIN);
+ D16(CNT_STATUS);
+#endif
+
+ parent = debugfs_create_dir("dmac", top);
+#ifdef DMAC_TC_CNT
+ D16(DMAC_TC_CNT);
+ D16(DMAC_TC_PER);
+#endif
+#ifdef DMAC0_TC_CNT
+ D16(DMAC0_TC_CNT);
+ D16(DMAC0_TC_PER);
+#endif
+#ifdef DMAC1_TC_CNT
+ D16(DMAC1_TC_CNT);
+ D16(DMAC1_TC_PER);
+#endif
+#ifdef DMAC1_PERIMUX
+ D16(DMAC1_PERIMUX);
+#endif
+
+#ifdef __ADSPBF561__
+ /* XXX: should rewrite the MMR map */
+# define DMA0_NEXT_DESC_PTR DMA2_0_NEXT_DESC_PTR
+# define DMA1_NEXT_DESC_PTR DMA2_1_NEXT_DESC_PTR
+# define DMA2_NEXT_DESC_PTR DMA2_2_NEXT_DESC_PTR
+# define DMA3_NEXT_DESC_PTR DMA2_3_NEXT_DESC_PTR
+# define DMA4_NEXT_DESC_PTR DMA2_4_NEXT_DESC_PTR
+# define DMA5_NEXT_DESC_PTR DMA2_5_NEXT_DESC_PTR
+# define DMA6_NEXT_DESC_PTR DMA2_6_NEXT_DESC_PTR
+# define DMA7_NEXT_DESC_PTR DMA2_7_NEXT_DESC_PTR
+# define DMA8_NEXT_DESC_PTR DMA2_8_NEXT_DESC_PTR
+# define DMA9_NEXT_DESC_PTR DMA2_9_NEXT_DESC_PTR
+# define DMA10_NEXT_DESC_PTR DMA2_10_NEXT_DESC_PTR
+# define DMA11_NEXT_DESC_PTR DMA2_11_NEXT_DESC_PTR
+# define DMA12_NEXT_DESC_PTR DMA1_0_NEXT_DESC_PTR
+# define DMA13_NEXT_DESC_PTR DMA1_1_NEXT_DESC_PTR
+# define DMA14_NEXT_DESC_PTR DMA1_2_NEXT_DESC_PTR
+# define DMA15_NEXT_DESC_PTR DMA1_3_NEXT_DESC_PTR
+# define DMA16_NEXT_DESC_PTR DMA1_4_NEXT_DESC_PTR
+# define DMA17_NEXT_DESC_PTR DMA1_5_NEXT_DESC_PTR
+# define DMA18_NEXT_DESC_PTR DMA1_6_NEXT_DESC_PTR
+# define DMA19_NEXT_DESC_PTR DMA1_7_NEXT_DESC_PTR
+# define DMA20_NEXT_DESC_PTR DMA1_8_NEXT_DESC_PTR
+# define DMA21_NEXT_DESC_PTR DMA1_9_NEXT_DESC_PTR
+# define DMA22_NEXT_DESC_PTR DMA1_10_NEXT_DESC_PTR
+# define DMA23_NEXT_DESC_PTR DMA1_11_NEXT_DESC_PTR
+#endif
+ parent = debugfs_create_dir("dma", top);
+ DMA(0);
+ DMA(1);
+ DMA(1);
+ DMA(2);
+ DMA(3);
+ DMA(4);
+ DMA(5);
+ DMA(6);
+ DMA(7);
+#ifdef DMA8_NEXT_DESC_PTR
+ DMA(8);
+ DMA(9);
+ DMA(10);
+ DMA(11);
+#endif
+#ifdef DMA12_NEXT_DESC_PTR
+ DMA(12);
+ DMA(13);
+ DMA(14);
+ DMA(15);
+ DMA(16);
+ DMA(17);
+ DMA(18);
+ DMA(19);
+#endif
+#ifdef DMA20_NEXT_DESC_PTR
+ DMA(20);
+ DMA(21);
+ DMA(22);
+ DMA(23);
+#endif
+
+ parent = debugfs_create_dir("ebiu_amc", top);
+ D32(EBIU_AMBCTL0);
+ D32(EBIU_AMBCTL1);
+ D16(EBIU_AMGCTL);
+#ifdef EBIU_MBSCTL
+ D16(EBIU_MBSCTL);
+ D32(EBIU_ARBSTAT);
+ D32(EBIU_MODE);
+ D16(EBIU_FCTL);
+#endif
+
+#ifdef EBIU_SDGCTL
+ parent = debugfs_create_dir("ebiu_sdram", top);
+# ifdef __ADSPBF561__
+ D32(EBIU_SDBCTL);
+# else
+ D16(EBIU_SDBCTL);
+# endif
+ D32(EBIU_SDGCTL);
+ D16(EBIU_SDRRC);
+ D16(EBIU_SDSTAT);
+#endif
+
+#ifdef EBIU_DDRACCT
+ parent = debugfs_create_dir("ebiu_ddr", top);
+ D32(EBIU_DDRACCT);
+ D32(EBIU_DDRARCT);
+ D32(EBIU_DDRBRC0);
+ D32(EBIU_DDRBRC1);
+ D32(EBIU_DDRBRC2);
+ D32(EBIU_DDRBRC3);
+ D32(EBIU_DDRBRC4);
+ D32(EBIU_DDRBRC5);
+ D32(EBIU_DDRBRC6);
+ D32(EBIU_DDRBRC7);
+ D32(EBIU_DDRBWC0);
+ D32(EBIU_DDRBWC1);
+ D32(EBIU_DDRBWC2);
+ D32(EBIU_DDRBWC3);
+ D32(EBIU_DDRBWC4);
+ D32(EBIU_DDRBWC5);
+ D32(EBIU_DDRBWC6);
+ D32(EBIU_DDRBWC7);
+ D32(EBIU_DDRCTL0);
+ D32(EBIU_DDRCTL1);
+ D32(EBIU_DDRCTL2);
+ D32(EBIU_DDRCTL3);
+ D32(EBIU_DDRGC0);
+ D32(EBIU_DDRGC1);
+ D32(EBIU_DDRGC2);
+ D32(EBIU_DDRGC3);
+ D32(EBIU_DDRMCCL);
+ D32(EBIU_DDRMCEN);
+ D32(EBIU_DDRQUE);
+ D32(EBIU_DDRTACT);
+ D32(EBIU_ERRADD);
+ D16(EBIU_ERRMST);
+ D16(EBIU_RSTCTL);
+#endif
+
+#ifdef EMAC_ADDRHI
+ parent = debugfs_create_dir("emac", top);
+ D32(EMAC_ADDRHI);
+ D32(EMAC_ADDRLO);
+ D32(EMAC_FLC);
+ D32(EMAC_HASHHI);
+ D32(EMAC_HASHLO);
+ D32(EMAC_MMC_CTL);
+ D32(EMAC_MMC_RIRQE);
+ D32(EMAC_MMC_RIRQS);
+ D32(EMAC_MMC_TIRQE);
+ D32(EMAC_MMC_TIRQS);
+ D32(EMAC_OPMODE);
+ D32(EMAC_RXC_ALIGN);
+ D32(EMAC_RXC_ALLFRM);
+ D32(EMAC_RXC_ALLOCT);
+ D32(EMAC_RXC_BROAD);
+ D32(EMAC_RXC_DMAOVF);
+ D32(EMAC_RXC_EQ64);
+ D32(EMAC_RXC_FCS);
+ D32(EMAC_RXC_GE1024);
+ D32(EMAC_RXC_LNERRI);
+ D32(EMAC_RXC_LNERRO);
+ D32(EMAC_RXC_LONG);
+ D32(EMAC_RXC_LT1024);
+ D32(EMAC_RXC_LT128);
+ D32(EMAC_RXC_LT256);
+ D32(EMAC_RXC_LT512);
+ D32(EMAC_RXC_MACCTL);
+ D32(EMAC_RXC_MULTI);
+ D32(EMAC_RXC_OCTET);
+ D32(EMAC_RXC_OK);
+ D32(EMAC_RXC_OPCODE);
+ D32(EMAC_RXC_PAUSE);
+ D32(EMAC_RXC_SHORT);
+ D32(EMAC_RXC_TYPED);
+ D32(EMAC_RXC_UNICST);
+ D32(EMAC_RX_IRQE);
+ D32(EMAC_RX_STAT);
+ D32(EMAC_RX_STKY);
+ D32(EMAC_STAADD);
+ D32(EMAC_STADAT);
+ D32(EMAC_SYSCTL);
+ D32(EMAC_SYSTAT);
+ D32(EMAC_TXC_1COL);
+ D32(EMAC_TXC_ABORT);
+ D32(EMAC_TXC_ALLFRM);
+ D32(EMAC_TXC_ALLOCT);
+ D32(EMAC_TXC_BROAD);
+ D32(EMAC_TXC_CRSERR);
+ D32(EMAC_TXC_DEFER);
+ D32(EMAC_TXC_DMAUND);
+ D32(EMAC_TXC_EQ64);
+ D32(EMAC_TXC_GE1024);
+ D32(EMAC_TXC_GT1COL);
+ D32(EMAC_TXC_LATECL);
+ D32(EMAC_TXC_LT1024);
+ D32(EMAC_TXC_LT128);
+ D32(EMAC_TXC_LT256);
+ D32(EMAC_TXC_LT512);
+ D32(EMAC_TXC_MACCTL);
+ D32(EMAC_TXC_MULTI);
+ D32(EMAC_TXC_OCTET);
+ D32(EMAC_TXC_OK);
+ D32(EMAC_TXC_UNICST);
+ D32(EMAC_TXC_XS_COL);
+ D32(EMAC_TXC_XS_DFR);
+ D32(EMAC_TX_IRQE);
+ D32(EMAC_TX_STAT);
+ D32(EMAC_TX_STKY);
+ D32(EMAC_VLAN1);
+ D32(EMAC_VLAN2);
+ D32(EMAC_WKUP_CTL);
+ D32(EMAC_WKUP_FFCMD);
+ D32(EMAC_WKUP_FFCRC0);
+ D32(EMAC_WKUP_FFCRC1);
+ D32(EMAC_WKUP_FFMSK0);
+ D32(EMAC_WKUP_FFMSK1);
+ D32(EMAC_WKUP_FFMSK2);
+ D32(EMAC_WKUP_FFMSK3);
+ D32(EMAC_WKUP_FFOFF);
+# ifdef EMAC_PTP_ACCR
+ D32(EMAC_PTP_ACCR);
+ D32(EMAC_PTP_ADDEND);
+ D32(EMAC_PTP_ALARMHI);
+ D32(EMAC_PTP_ALARMLO);
+ D16(EMAC_PTP_CTL);
+ D32(EMAC_PTP_FOFF);
+ D32(EMAC_PTP_FV1);
+ D32(EMAC_PTP_FV2);
+ D32(EMAC_PTP_FV3);
+ D16(EMAC_PTP_ID_OFF);
+ D32(EMAC_PTP_ID_SNAP);
+ D16(EMAC_PTP_IE);
+ D16(EMAC_PTP_ISTAT);
+ D32(EMAC_PTP_OFFSET);
+ D32(EMAC_PTP_PPS_PERIOD);
+ D32(EMAC_PTP_PPS_STARTHI);
+ D32(EMAC_PTP_PPS_STARTLO);
+ D32(EMAC_PTP_RXSNAPHI);
+ D32(EMAC_PTP_RXSNAPLO);
+ D32(EMAC_PTP_TIMEHI);
+ D32(EMAC_PTP_TIMELO);
+ D32(EMAC_PTP_TXSNAPHI);
+ D32(EMAC_PTP_TXSNAPLO);
+# endif
+#endif
+
+#if defined(EPPI0_STATUS) || defined(EPPI1_STATUS) || defined(EPPI2_STATUS)
+ parent = debugfs_create_dir("eppi", top);
+# ifdef EPPI0_STATUS
+ EPPI(0);
+# endif
+# ifdef EPPI1_STATUS
+ EPPI(1);
+# endif
+# ifdef EPPI2_STATUS
+ EPPI(2);
+# endif
+#endif
+
+ parent = debugfs_create_dir("gptimer", top);
+#ifdef TIMER_ENABLE
+ GPTIMER_GROUP(TIMER_ENABLE, -1);
+#endif
+#ifdef TIMER_ENABLE0
+ GPTIMER_GROUP(TIMER_ENABLE0, 0);
+#endif
+#ifdef TIMER_ENABLE1
+ GPTIMER_GROUP(TIMER_ENABLE1, 1);
+#endif
+ /* XXX: Should convert BF561 MMR names */
+#ifdef TMRS4_DISABLE
+ GPTIMER_GROUP(TMRS4_ENABLE, 0);
+ GPTIMER_GROUP(TMRS8_ENABLE, 1);
+#endif
+ GPTIMER(0);
+ GPTIMER(1);
+ GPTIMER(2);
+#ifdef TIMER3_CONFIG
+ GPTIMER(3);
+ GPTIMER(4);
+ GPTIMER(5);
+ GPTIMER(6);
+ GPTIMER(7);
+#endif
+#ifdef TIMER8_CONFIG
+ GPTIMER(8);
+ GPTIMER(9);
+ GPTIMER(10);
+#endif
+#ifdef TIMER11_CONFIG
+ GPTIMER(11);
+#endif
+
+#ifdef HMDMA0_CONTROL
+ parent = debugfs_create_dir("hmdma", top);
+ HMDMA(0);
+ HMDMA(1);
+#endif
+
+#ifdef HOST_CONTROL
+ parent = debugfs_create_dir("hostdp", top);
+ D16(HOST_CONTROL);
+ D16(HOST_STATUS);
+ D16(HOST_TIMEOUT);
+#endif
+
+#ifdef IMDMA_S0_CONFIG
+ parent = debugfs_create_dir("imdma", top);
+ IMDMA(0);
+ IMDMA(1);
+#endif
+
+#ifdef KPAD_CTL
+ parent = debugfs_create_dir("keypad", top);
+ D16(KPAD_CTL);
+ D16(KPAD_PRESCALE);
+ D16(KPAD_MSEL);
+ D16(KPAD_ROWCOL);
+ D16(KPAD_STAT);
+ D16(KPAD_SOFTEVAL);
+#endif
+
+ parent = debugfs_create_dir("mdma", top);
+ MDMA(0);
+ MDMA(1);
+#ifdef MDMA_D2_CONFIG
+ MDMA(2);
+ MDMA(3);
+#endif
+
+#ifdef MXVR_CONFIG
+ parent = debugfs_create_dir("mxvr", top);
+ D16(MXVR_CONFIG);
+# ifdef MXVR_PLL_CTL_0
+ D32(MXVR_PLL_CTL_0);
+# endif
+ D32(MXVR_STATE_0);
+ D32(MXVR_STATE_1);
+ D32(MXVR_INT_STAT_0);
+ D32(MXVR_INT_STAT_1);
+ D32(MXVR_INT_EN_0);
+ D32(MXVR_INT_EN_1);
+ D16(MXVR_POSITION);
+ D16(MXVR_MAX_POSITION);
+ D16(MXVR_DELAY);
+ D16(MXVR_MAX_DELAY);
+ D32(MXVR_LADDR);
+ D16(MXVR_GADDR);
+ D32(MXVR_AADDR);
+ D32(MXVR_ALLOC_0);
+ D32(MXVR_ALLOC_1);
+ D32(MXVR_ALLOC_2);
+ D32(MXVR_ALLOC_3);
+ D32(MXVR_ALLOC_4);
+ D32(MXVR_ALLOC_5);
+ D32(MXVR_ALLOC_6);
+ D32(MXVR_ALLOC_7);
+ D32(MXVR_ALLOC_8);
+ D32(MXVR_ALLOC_9);
+ D32(MXVR_ALLOC_10);
+ D32(MXVR_ALLOC_11);
+ D32(MXVR_ALLOC_12);
+ D32(MXVR_ALLOC_13);
+ D32(MXVR_ALLOC_14);
+ D32(MXVR_SYNC_LCHAN_0);
+ D32(MXVR_SYNC_LCHAN_1);
+ D32(MXVR_SYNC_LCHAN_2);
+ D32(MXVR_SYNC_LCHAN_3);
+ D32(MXVR_SYNC_LCHAN_4);
+ D32(MXVR_SYNC_LCHAN_5);
+ D32(MXVR_SYNC_LCHAN_6);
+ D32(MXVR_SYNC_LCHAN_7);
+ D32(MXVR_DMA0_CONFIG);
+ D32(MXVR_DMA0_START_ADDR);
+ D16(MXVR_DMA0_COUNT);
+ D32(MXVR_DMA0_CURR_ADDR);
+ D16(MXVR_DMA0_CURR_COUNT);
+ D32(MXVR_DMA1_CONFIG);
+ D32(MXVR_DMA1_START_ADDR);
+ D16(MXVR_DMA1_COUNT);
+ D32(MXVR_DMA1_CURR_ADDR);
+ D16(MXVR_DMA1_CURR_COUNT);
+ D32(MXVR_DMA2_CONFIG);
+ D32(MXVR_DMA2_START_ADDR);
+ D16(MXVR_DMA2_COUNT);
+ D32(MXVR_DMA2_CURR_ADDR);
+ D16(MXVR_DMA2_CURR_COUNT);
+ D32(MXVR_DMA3_CONFIG);
+ D32(MXVR_DMA3_START_ADDR);
+ D16(MXVR_DMA3_COUNT);
+ D32(MXVR_DMA3_CURR_ADDR);
+ D16(MXVR_DMA3_CURR_COUNT);
+ D32(MXVR_DMA4_CONFIG);
+ D32(MXVR_DMA4_START_ADDR);
+ D16(MXVR_DMA4_COUNT);
+ D32(MXVR_DMA4_CURR_ADDR);
+ D16(MXVR_DMA4_CURR_COUNT);
+ D32(MXVR_DMA5_CONFIG);
+ D32(MXVR_DMA5_START_ADDR);
+ D16(MXVR_DMA5_COUNT);
+ D32(MXVR_DMA5_CURR_ADDR);
+ D16(MXVR_DMA5_CURR_COUNT);
+ D32(MXVR_DMA6_CONFIG);
+ D32(MXVR_DMA6_START_ADDR);
+ D16(MXVR_DMA6_COUNT);
+ D32(MXVR_DMA6_CURR_ADDR);
+ D16(MXVR_DMA6_CURR_COUNT);
+ D32(MXVR_DMA7_CONFIG);
+ D32(MXVR_DMA7_START_ADDR);
+ D16(MXVR_DMA7_COUNT);
+ D32(MXVR_DMA7_CURR_ADDR);
+ D16(MXVR_DMA7_CURR_COUNT);
+ D16(MXVR_AP_CTL);
+ D32(MXVR_APRB_START_ADDR);
+ D32(MXVR_APRB_CURR_ADDR);
+ D32(MXVR_APTB_START_ADDR);
+ D32(MXVR_APTB_CURR_ADDR);
+ D32(MXVR_CM_CTL);
+ D32(MXVR_CMRB_START_ADDR);
+ D32(MXVR_CMRB_CURR_ADDR);
+ D32(MXVR_CMTB_START_ADDR);
+ D32(MXVR_CMTB_CURR_ADDR);
+ D32(MXVR_RRDB_START_ADDR);
+ D32(MXVR_RRDB_CURR_ADDR);
+ D32(MXVR_PAT_DATA_0);
+ D32(MXVR_PAT_EN_0);
+ D32(MXVR_PAT_DATA_1);
+ D32(MXVR_PAT_EN_1);
+ D16(MXVR_FRAME_CNT_0);
+ D16(MXVR_FRAME_CNT_1);
+ D32(MXVR_ROUTING_0);
+ D32(MXVR_ROUTING_1);
+ D32(MXVR_ROUTING_2);
+ D32(MXVR_ROUTING_3);
+ D32(MXVR_ROUTING_4);
+ D32(MXVR_ROUTING_5);
+ D32(MXVR_ROUTING_6);
+ D32(MXVR_ROUTING_7);
+ D32(MXVR_ROUTING_8);
+ D32(MXVR_ROUTING_9);
+ D32(MXVR_ROUTING_10);
+ D32(MXVR_ROUTING_11);
+ D32(MXVR_ROUTING_12);
+ D32(MXVR_ROUTING_13);
+ D32(MXVR_ROUTING_14);
+# ifdef MXVR_PLL_CTL_1
+ D32(MXVR_PLL_CTL_1);
+# endif
+ D16(MXVR_BLOCK_CNT);
+# ifdef MXVR_CLK_CTL
+ D32(MXVR_CLK_CTL);
+# endif
+# ifdef MXVR_CDRPLL_CTL
+ D32(MXVR_CDRPLL_CTL);
+# endif
+# ifdef MXVR_FMPLL_CTL
+ D32(MXVR_FMPLL_CTL);
+# endif
+# ifdef MXVR_PIN_CTL
+ D16(MXVR_PIN_CTL);
+# endif
+# ifdef MXVR_SCLK_CNT
+ D16(MXVR_SCLK_CNT);
+# endif
+#endif
+
+#ifdef NFC_ADDR
+ parent = debugfs_create_dir("nfc", top);
+ D_WO(NFC_ADDR, 16);
+ D_WO(NFC_CMD, 16);
+ D_RO(NFC_COUNT, 16);
+ D16(NFC_CTL);
+ D_WO(NFC_DATA_RD, 16);
+ D_WO(NFC_DATA_WR, 16);
+ D_RO(NFC_ECC0, 16);
+ D_RO(NFC_ECC1, 16);
+ D_RO(NFC_ECC2, 16);
+ D_RO(NFC_ECC3, 16);
+ D16(NFC_IRQMASK);
+ D16(NFC_IRQSTAT);
+ D_WO(NFC_PGCTL, 16);
+ D_RO(NFC_READ, 16);
+ D16(NFC_RST);
+ D_RO(NFC_STAT, 16);
+#endif
+
+#ifdef OTP_CONTROL
+ parent = debugfs_create_dir("otp", top);
+ D16(OTP_CONTROL);
+ D16(OTP_BEN);
+ D16(OTP_STATUS);
+ D32(OTP_TIMING);
+ D32(OTP_DATA0);
+ D32(OTP_DATA1);
+ D32(OTP_DATA2);
+ D32(OTP_DATA3);
+#endif
+
+#ifdef PINT0_MASK_SET
+ parent = debugfs_create_dir("pint", top);
+ PINT(0);
+ PINT(1);
+ PINT(2);
+ PINT(3);
+#endif
+
+#ifdef PIXC_CTL
+ parent = debugfs_create_dir("pixc", top);
+ D16(PIXC_CTL);
+ D16(PIXC_PPL);
+ D16(PIXC_LPF);
+ D16(PIXC_AHSTART);
+ D16(PIXC_AHEND);
+ D16(PIXC_AVSTART);
+ D16(PIXC_AVEND);
+ D16(PIXC_ATRANSP);
+ D16(PIXC_BHSTART);
+ D16(PIXC_BHEND);
+ D16(PIXC_BVSTART);
+ D16(PIXC_BVEND);
+ D16(PIXC_BTRANSP);
+ D16(PIXC_INTRSTAT);
+ D32(PIXC_RYCON);
+ D32(PIXC_GUCON);
+ D32(PIXC_BVCON);
+ D32(PIXC_CCBIAS);
+ D32(PIXC_TC);
+#endif
+
+ parent = debugfs_create_dir("pll", top);
+ D16(PLL_CTL);
+ D16(PLL_DIV);
+ D16(PLL_LOCKCNT);
+ D16(PLL_STAT);
+ D16(VR_CTL);
+ D32(CHIPID); /* it's part of this hardware block */
+
+#if defined(PPI_CONTROL) || defined(PPI0_CONTROL) || defined(PPI1_CONTROL)
+ parent = debugfs_create_dir("ppi", top);
+# ifdef PPI_CONTROL
+ bfin_debug_mmrs_ppi(parent, PPI_CONTROL, -1);
+# endif
+# ifdef PPI0_CONTROL
+ PPI(0);
+# endif
+# ifdef PPI1_CONTROL
+ PPI(1);
+# endif
+#endif
+
+#ifdef PWM_CTRL
+ parent = debugfs_create_dir("pwm", top);
+ D16(PWM_CTRL);
+ D16(PWM_STAT);
+ D16(PWM_TM);
+ D16(PWM_DT);
+ D16(PWM_GATE);
+ D16(PWM_CHA);
+ D16(PWM_CHB);
+ D16(PWM_CHC);
+ D16(PWM_SEG);
+ D16(PWM_SYNCWT);
+ D16(PWM_CHAL);
+ D16(PWM_CHBL);
+ D16(PWM_CHCL);
+ D16(PWM_LSI);
+ D16(PWM_STAT2);
+#endif
+
+#ifdef RSI_CONFIG
+ parent = debugfs_create_dir("rsi", top);
+ D32(RSI_ARGUMENT);
+ D16(RSI_CEATA_CONTROL);
+ D16(RSI_CLK_CONTROL);
+ D16(RSI_COMMAND);
+ D16(RSI_CONFIG);
+ D16(RSI_DATA_CNT);
+ D16(RSI_DATA_CONTROL);
+ D16(RSI_DATA_LGTH);
+ D32(RSI_DATA_TIMER);
+ D16(RSI_EMASK);
+ D16(RSI_ESTAT);
+ D32(RSI_FIFO);
+ D16(RSI_FIFO_CNT);
+ D32(RSI_MASK0);
+ D32(RSI_MASK1);
+ D16(RSI_PID0);
+ D16(RSI_PID1);
+ D16(RSI_PID2);
+ D16(RSI_PID3);
+ D16(RSI_PID4);
+ D16(RSI_PID5);
+ D16(RSI_PID6);
+ D16(RSI_PID7);
+ D16(RSI_PWR_CONTROL);
+ D16(RSI_RD_WAIT_EN);
+ D32(RSI_RESPONSE0);
+ D32(RSI_RESPONSE1);
+ D32(RSI_RESPONSE2);
+ D32(RSI_RESPONSE3);
+ D16(RSI_RESP_CMD);
+ D32(RSI_STATUS);
+ D_WO(RSI_STATUSCL, 16);
+#endif
+
+#ifdef RTC_ALARM
+ parent = debugfs_create_dir("rtc", top);
+ D32(RTC_ALARM);
+ D16(RTC_ICTL);
+ D16(RTC_ISTAT);
+ D16(RTC_PREN);
+ D32(RTC_STAT);
+ D16(RTC_SWCNT);
+#endif
+
+#ifdef SDH_CFG
+ parent = debugfs_create_dir("sdh", top);
+ D32(SDH_ARGUMENT);
+ D16(SDH_CFG);
+ D16(SDH_CLK_CTL);
+ D16(SDH_COMMAND);
+ D_RO(SDH_DATA_CNT, 16);
+ D16(SDH_DATA_CTL);
+ D16(SDH_DATA_LGTH);
+ D32(SDH_DATA_TIMER);
+ D16(SDH_E_MASK);
+ D16(SDH_E_STATUS);
+ D32(SDH_FIFO);
+ D_RO(SDH_FIFO_CNT, 16);
+ D32(SDH_MASK0);
+ D32(SDH_MASK1);
+ D_RO(SDH_PID0, 16);
+ D_RO(SDH_PID1, 16);
+ D_RO(SDH_PID2, 16);
+ D_RO(SDH_PID3, 16);
+ D_RO(SDH_PID4, 16);
+ D_RO(SDH_PID5, 16);
+ D_RO(SDH_PID6, 16);
+ D_RO(SDH_PID7, 16);
+ D16(SDH_PWR_CTL);
+ D16(SDH_RD_WAIT_EN);
+ D_RO(SDH_RESPONSE0, 32);
+ D_RO(SDH_RESPONSE1, 32);
+ D_RO(SDH_RESPONSE2, 32);
+ D_RO(SDH_RESPONSE3, 32);
+ D_RO(SDH_RESP_CMD, 16);
+ D_RO(SDH_STATUS, 32);
+ D_WO(SDH_STATUS_CLR, 16);
+#endif
+
+#ifdef SECURE_CONTROL
+ parent = debugfs_create_dir("security", top);
+ D16(SECURE_CONTROL);
+ D16(SECURE_STATUS);
+ D32(SECURE_SYSSWT);
+#endif
+
+ parent = debugfs_create_dir("sic", top);
+ D16(SWRST);
+ D16(SYSCR);
+ D16(SIC_RVECT);
+ D32(SIC_IAR0);
+ D32(SIC_IAR1);
+ D32(SIC_IAR2);
+#ifdef SIC_IAR3
+ D32(SIC_IAR3);
+#endif
+#ifdef SIC_IAR4
+ D32(SIC_IAR4);
+ D32(SIC_IAR5);
+ D32(SIC_IAR6);
+#endif
+#ifdef SIC_IAR7
+ D32(SIC_IAR7);
+#endif
+#ifdef SIC_IAR8
+ D32(SIC_IAR8);
+ D32(SIC_IAR9);
+ D32(SIC_IAR10);
+ D32(SIC_IAR11);
+#endif
+#ifdef SIC_IMASK
+ D32(SIC_IMASK);
+ D32(SIC_ISR);
+ D32(SIC_IWR);
+#endif
+#ifdef SIC_IMASK0
+ D32(SIC_IMASK0);
+ D32(SIC_IMASK1);
+ D32(SIC_ISR0);
+ D32(SIC_ISR1);
+ D32(SIC_IWR0);
+ D32(SIC_IWR1);
+#endif
+#ifdef SIC_IMASK2
+ D32(SIC_IMASK2);
+ D32(SIC_ISR2);
+ D32(SIC_IWR2);
+#endif
+#ifdef SICB_RVECT
+ D16(SICB_SWRST);
+ D16(SICB_SYSCR);
+ D16(SICB_RVECT);
+ D32(SICB_IAR0);
+ D32(SICB_IAR1);
+ D32(SICB_IAR2);
+ D32(SICB_IAR3);
+ D32(SICB_IAR4);
+ D32(SICB_IAR5);
+ D32(SICB_IAR6);
+ D32(SICB_IAR7);
+ D32(SICB_IMASK0);
+ D32(SICB_IMASK1);
+ D32(SICB_ISR0);
+ D32(SICB_ISR1);
+ D32(SICB_IWR0);
+ D32(SICB_IWR1);
+#endif
+
+ parent = debugfs_create_dir("spi", top);
+#ifdef SPI0_REGBASE
+ SPI(0);
+#endif
+#ifdef SPI1_REGBASE
+ SPI(1);
+#endif
+#ifdef SPI2_REGBASE
+ SPI(2);
+#endif
+
+ parent = debugfs_create_dir("sport", top);
+#ifdef SPORT0_STAT
+ SPORT(0);
+#endif
+#ifdef SPORT1_STAT
+ SPORT(1);
+#endif
+#ifdef SPORT2_STAT
+ SPORT(2);
+#endif
+#ifdef SPORT3_STAT
+ SPORT(3);
+#endif
+
+#if defined(TWI_CLKDIV) || defined(TWI0_CLKDIV) || defined(TWI1_CLKDIV)
+ parent = debugfs_create_dir("twi", top);
+# ifdef TWI_CLKDIV
+ bfin_debug_mmrs_twi(parent, TWI_CLKDIV, -1);
+# endif
+# ifdef TWI0_CLKDIV
+ TWI(0);
+# endif
+# ifdef TWI1_CLKDIV
+ TWI(1);
+# endif
+#endif
+
+ parent = debugfs_create_dir("uart", top);
+#ifdef BFIN_UART_DLL
+ bfin_debug_mmrs_uart(parent, BFIN_UART_DLL, -1);
+#endif
+#ifdef UART0_DLL
+ UART(0);
+#endif
+#ifdef UART1_DLL
+ UART(1);
+#endif
+#ifdef UART2_DLL
+ UART(2);
+#endif
+#ifdef UART3_DLL
+ UART(3);
+#endif
+
+#ifdef USB_FADDR
+ parent = debugfs_create_dir("usb", top);
+ D16(USB_FADDR);
+ D16(USB_POWER);
+ D16(USB_INTRTX);
+ D16(USB_INTRRX);
+ D16(USB_INTRTXE);
+ D16(USB_INTRRXE);
+ D16(USB_INTRUSB);
+ D16(USB_INTRUSBE);
+ D16(USB_FRAME);
+ D16(USB_INDEX);
+ D16(USB_TESTMODE);
+ D16(USB_GLOBINTR);
+ D16(USB_GLOBAL_CTL);
+ D16(USB_TX_MAX_PACKET);
+ D16(USB_CSR0);
+ D16(USB_TXCSR);
+ D16(USB_RX_MAX_PACKET);
+ D16(USB_RXCSR);
+ D16(USB_COUNT0);
+ D16(USB_RXCOUNT);
+ D16(USB_TXTYPE);
+ D16(USB_NAKLIMIT0);
+ D16(USB_TXINTERVAL);
+ D16(USB_RXTYPE);
+ D16(USB_RXINTERVAL);
+ D16(USB_TXCOUNT);
+ D16(USB_EP0_FIFO);
+ D16(USB_EP1_FIFO);
+ D16(USB_EP2_FIFO);
+ D16(USB_EP3_FIFO);
+ D16(USB_EP4_FIFO);
+ D16(USB_EP5_FIFO);
+ D16(USB_EP6_FIFO);
+ D16(USB_EP7_FIFO);
+ D16(USB_OTG_DEV_CTL);
+ D16(USB_OTG_VBUS_IRQ);
+ D16(USB_OTG_VBUS_MASK);
+ D16(USB_LINKINFO);
+ D16(USB_VPLEN);
+ D16(USB_HS_EOF1);
+ D16(USB_FS_EOF1);
+ D16(USB_LS_EOF1);
+ D16(USB_APHY_CNTRL);
+ D16(USB_APHY_CALIB);
+ D16(USB_APHY_CNTRL2);
+ D16(USB_PHY_TEST);
+ D16(USB_PLLOSC_CTRL);
+ D16(USB_SRP_CLKDIV);
+ D16(USB_EP_NI0_TXMAXP);
+ D16(USB_EP_NI0_TXCSR);
+ D16(USB_EP_NI0_RXMAXP);
+ D16(USB_EP_NI0_RXCSR);
+ D16(USB_EP_NI0_RXCOUNT);
+ D16(USB_EP_NI0_TXTYPE);
+ D16(USB_EP_NI0_TXINTERVAL);
+ D16(USB_EP_NI0_RXTYPE);
+ D16(USB_EP_NI0_RXINTERVAL);
+ D16(USB_EP_NI0_TXCOUNT);
+ D16(USB_EP_NI1_TXMAXP);
+ D16(USB_EP_NI1_TXCSR);
+ D16(USB_EP_NI1_RXMAXP);
+ D16(USB_EP_NI1_RXCSR);
+ D16(USB_EP_NI1_RXCOUNT);
+ D16(USB_EP_NI1_TXTYPE);
+ D16(USB_EP_NI1_TXINTERVAL);
+ D16(USB_EP_NI1_RXTYPE);
+ D16(USB_EP_NI1_RXINTERVAL);
+ D16(USB_EP_NI1_TXCOUNT);
+ D16(USB_EP_NI2_TXMAXP);
+ D16(USB_EP_NI2_TXCSR);
+ D16(USB_EP_NI2_RXMAXP);
+ D16(USB_EP_NI2_RXCSR);
+ D16(USB_EP_NI2_RXCOUNT);
+ D16(USB_EP_NI2_TXTYPE);
+ D16(USB_EP_NI2_TXINTERVAL);
+ D16(USB_EP_NI2_RXTYPE);
+ D16(USB_EP_NI2_RXINTERVAL);
+ D16(USB_EP_NI2_TXCOUNT);
+ D16(USB_EP_NI3_TXMAXP);
+ D16(USB_EP_NI3_TXCSR);
+ D16(USB_EP_NI3_RXMAXP);
+ D16(USB_EP_NI3_RXCSR);
+ D16(USB_EP_NI3_RXCOUNT);
+ D16(USB_EP_NI3_TXTYPE);
+ D16(USB_EP_NI3_TXINTERVAL);
+ D16(USB_EP_NI3_RXTYPE);
+ D16(USB_EP_NI3_RXINTERVAL);
+ D16(USB_EP_NI3_TXCOUNT);
+ D16(USB_EP_NI4_TXMAXP);
+ D16(USB_EP_NI4_TXCSR);
+ D16(USB_EP_NI4_RXMAXP);
+ D16(USB_EP_NI4_RXCSR);
+ D16(USB_EP_NI4_RXCOUNT);
+ D16(USB_EP_NI4_TXTYPE);
+ D16(USB_EP_NI4_TXINTERVAL);
+ D16(USB_EP_NI4_RXTYPE);
+ D16(USB_EP_NI4_RXINTERVAL);
+ D16(USB_EP_NI4_TXCOUNT);
+ D16(USB_EP_NI5_TXMAXP);
+ D16(USB_EP_NI5_TXCSR);
+ D16(USB_EP_NI5_RXMAXP);
+ D16(USB_EP_NI5_RXCSR);
+ D16(USB_EP_NI5_RXCOUNT);
+ D16(USB_EP_NI5_TXTYPE);
+ D16(USB_EP_NI5_TXINTERVAL);
+ D16(USB_EP_NI5_RXTYPE);
+ D16(USB_EP_NI5_RXINTERVAL);
+ D16(USB_EP_NI5_TXCOUNT);
+ D16(USB_EP_NI6_TXMAXP);
+ D16(USB_EP_NI6_TXCSR);
+ D16(USB_EP_NI6_RXMAXP);
+ D16(USB_EP_NI6_RXCSR);
+ D16(USB_EP_NI6_RXCOUNT);
+ D16(USB_EP_NI6_TXTYPE);
+ D16(USB_EP_NI6_TXINTERVAL);
+ D16(USB_EP_NI6_RXTYPE);
+ D16(USB_EP_NI6_RXINTERVAL);
+ D16(USB_EP_NI6_TXCOUNT);
+ D16(USB_EP_NI7_TXMAXP);
+ D16(USB_EP_NI7_TXCSR);
+ D16(USB_EP_NI7_RXMAXP);
+ D16(USB_EP_NI7_RXCSR);
+ D16(USB_EP_NI7_RXCOUNT);
+ D16(USB_EP_NI7_TXTYPE);
+ D16(USB_EP_NI7_TXINTERVAL);
+ D16(USB_EP_NI7_RXTYPE);
+ D16(USB_EP_NI7_RXINTERVAL);
+ D16(USB_EP_NI7_TXCOUNT);
+ D16(USB_DMA_INTERRUPT);
+ D16(USB_DMA0CONTROL);
+ D16(USB_DMA0ADDRLOW);
+ D16(USB_DMA0ADDRHIGH);
+ D16(USB_DMA0COUNTLOW);
+ D16(USB_DMA0COUNTHIGH);
+ D16(USB_DMA1CONTROL);
+ D16(USB_DMA1ADDRLOW);
+ D16(USB_DMA1ADDRHIGH);
+ D16(USB_DMA1COUNTLOW);
+ D16(USB_DMA1COUNTHIGH);
+ D16(USB_DMA2CONTROL);
+ D16(USB_DMA2ADDRLOW);
+ D16(USB_DMA2ADDRHIGH);
+ D16(USB_DMA2COUNTLOW);
+ D16(USB_DMA2COUNTHIGH);
+ D16(USB_DMA3CONTROL);
+ D16(USB_DMA3ADDRLOW);
+ D16(USB_DMA3ADDRHIGH);
+ D16(USB_DMA3COUNTLOW);
+ D16(USB_DMA3COUNTHIGH);
+ D16(USB_DMA4CONTROL);
+ D16(USB_DMA4ADDRLOW);
+ D16(USB_DMA4ADDRHIGH);
+ D16(USB_DMA4COUNTLOW);
+ D16(USB_DMA4COUNTHIGH);
+ D16(USB_DMA5CONTROL);
+ D16(USB_DMA5ADDRLOW);
+ D16(USB_DMA5ADDRHIGH);
+ D16(USB_DMA5COUNTLOW);
+ D16(USB_DMA5COUNTHIGH);
+ D16(USB_DMA6CONTROL);
+ D16(USB_DMA6ADDRLOW);
+ D16(USB_DMA6ADDRHIGH);
+ D16(USB_DMA6COUNTLOW);
+ D16(USB_DMA6COUNTHIGH);
+ D16(USB_DMA7CONTROL);
+ D16(USB_DMA7ADDRLOW);
+ D16(USB_DMA7ADDRHIGH);
+ D16(USB_DMA7COUNTLOW);
+ D16(USB_DMA7COUNTHIGH);
+#endif
+
+#ifdef WDOG_CNT
+ parent = debugfs_create_dir("watchdog", top);
+ D32(WDOG_CNT);
+ D16(WDOG_CTL);
+ D32(WDOG_STAT);
+#endif
+#ifdef WDOGA_CNT
+ parent = debugfs_create_dir("watchdog", top);
+ D32(WDOGA_CNT);
+ D16(WDOGA_CTL);
+ D32(WDOGA_STAT);
+ D32(WDOGB_CNT);
+ D16(WDOGB_CTL);
+ D32(WDOGB_STAT);
+#endif
+
+ /* BF533 glue */
+#ifdef FIO_FLAG_D
+#define PORTFIO FIO_FLAG_D
+#endif
+ /* BF561 glue */
+#ifdef FIO0_FLAG_D
+#define PORTFIO FIO0_FLAG_D
+#endif
+#ifdef FIO1_FLAG_D
+#define PORTGIO FIO1_FLAG_D
+#endif
+#ifdef FIO2_FLAG_D
+#define PORTHIO FIO2_FLAG_D
+#endif
+ parent = debugfs_create_dir("port", top);
+#ifdef PORTFIO
+ PORT(PORTFIO, 'F');
+#endif
+#ifdef PORTGIO
+ PORT(PORTGIO, 'G');
+#endif
+#ifdef PORTHIO
+ PORT(PORTHIO, 'H');
+#endif
+
+#ifdef __ADSPBF51x__
+ D16(PORTF_FER);
+ D16(PORTF_DRIVE);
+ D16(PORTF_HYSTERESIS);
+ D16(PORTF_MUX);
+
+ D16(PORTG_FER);
+ D16(PORTG_DRIVE);
+ D16(PORTG_HYSTERESIS);
+ D16(PORTG_MUX);
+
+ D16(PORTH_FER);
+ D16(PORTH_DRIVE);
+ D16(PORTH_HYSTERESIS);
+ D16(PORTH_MUX);
+
+ D16(MISCPORT_DRIVE);
+ D16(MISCPORT_HYSTERESIS);
+#endif /* BF51x */
+
+#ifdef __ADSPBF52x__
+ D16(PORTF_FER);
+ D16(PORTF_DRIVE);
+ D16(PORTF_HYSTERESIS);
+ D16(PORTF_MUX);
+ D16(PORTF_SLEW);
+
+ D16(PORTG_FER);
+ D16(PORTG_DRIVE);
+ D16(PORTG_HYSTERESIS);
+ D16(PORTG_MUX);
+ D16(PORTG_SLEW);
+
+ D16(PORTH_FER);
+ D16(PORTH_DRIVE);
+ D16(PORTH_HYSTERESIS);
+ D16(PORTH_MUX);
+ D16(PORTH_SLEW);
+
+ D16(MISCPORT_DRIVE);
+ D16(MISCPORT_HYSTERESIS);
+ D16(MISCPORT_SLEW);
+#endif /* BF52x */
+
+#ifdef BF537_FAMILY
+ D16(PORTF_FER);
+ D16(PORTG_FER);
+ D16(PORTH_FER);
+ D16(PORT_MUX);
+#endif /* BF534 BF536 BF537 */
+
+#ifdef BF538_FAMILY
+ D16(PORTCIO_FER);
+ D16(PORTCIO);
+ D16(PORTCIO_CLEAR);
+ D16(PORTCIO_SET);
+ D16(PORTCIO_TOGGLE);
+ D16(PORTCIO_DIR);
+ D16(PORTCIO_INEN);
+
+ D16(PORTDIO);
+ D16(PORTDIO_CLEAR);
+ D16(PORTDIO_DIR);
+ D16(PORTDIO_FER);
+ D16(PORTDIO_INEN);
+ D16(PORTDIO_SET);
+ D16(PORTDIO_TOGGLE);
+
+ D16(PORTEIO);
+ D16(PORTEIO_CLEAR);
+ D16(PORTEIO_DIR);
+ D16(PORTEIO_FER);
+ D16(PORTEIO_INEN);
+ D16(PORTEIO_SET);
+ D16(PORTEIO_TOGGLE);
+#endif /* BF538 BF539 */
+
+#ifdef __ADSPBF54x__
+ {
+ int num;
+ unsigned long base;
+
+ base = PORTA_FER;
+ for (num = 0; num < 10; ++num) {
+ PORT(base, num);
+ base += sizeof(struct bfin_gpio_regs);
+ }
+
+ }
+#endif /* BF54x */
+
+ debug_mmrs_dentry = top;
+
+ return 0;
+}
+module_init(bfin_debug_mmrs_init);
+
+static void __exit bfin_debug_mmrs_exit(void)
+{
+ debugfs_remove_recursive(debug_mmrs_dentry);
+}
+module_exit(bfin_debug_mmrs_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
new file mode 100644
index 00000000..f0d1118f
--- /dev/null
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -0,0 +1,149 @@
+/*
+ * Dynamic DMA mapping support
+ *
+ * Copyright 2005-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/export.h>
+
+static spinlock_t dma_page_lock;
+static unsigned long *dma_page;
+static unsigned int dma_pages;
+static unsigned long dma_base;
+static unsigned long dma_size;
+static unsigned int dma_initialized;
+
+static void dma_alloc_init(unsigned long start, unsigned long end)
+{
+ spin_lock_init(&dma_page_lock);
+ dma_initialized = 0;
+
+ dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
+ memset(dma_page, 0, PAGE_SIZE);
+ dma_base = PAGE_ALIGN(start);
+ dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
+ dma_pages = dma_size >> PAGE_SHIFT;
+ memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
+ dma_initialized = 1;
+
+ printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
+ dma_page, dma_pages, dma_base);
+}
+
+static inline unsigned int get_pages(size_t size)
+{
+ return ((size - 1) >> PAGE_SHIFT) + 1;
+}
+
+static unsigned long __alloc_dma_pages(unsigned int pages)
+{
+ unsigned long ret = 0, flags;
+ int i, count = 0;
+
+ if (dma_initialized == 0)
+ dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
+
+ spin_lock_irqsave(&dma_page_lock, flags);
+
+ for (i = 0; i < dma_pages;) {
+ if (test_bit(i++, dma_page) == 0) {
+ if (++count == pages) {
+ while (count--)
+ __set_bit(--i, dma_page);
+
+ ret = dma_base + (i << PAGE_SHIFT);
+ break;
+ }
+ } else
+ count = 0;
+ }
+ spin_unlock_irqrestore(&dma_page_lock, flags);
+ return ret;
+}
+
+static void __free_dma_pages(unsigned long addr, unsigned int pages)
+{
+ unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
+ unsigned long flags;
+ int i;
+
+ if ((page + pages) > dma_pages) {
+ printk(KERN_ERR "%s: freeing outside range.\n", __func__);
+ BUG();
+ }
+
+ spin_lock_irqsave(&dma_page_lock, flags);
+ for (i = page; i < page + pages; i++)
+ __clear_bit(i, dma_page);
+
+ spin_unlock_irqrestore(&dma_page_lock, flags);
+}
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
+
+ ret = (void *)__alloc_dma_pages(get_pages(size));
+
+ if (ret) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_phys(ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void
+dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ __free_dma_pages((unsigned long)vaddr, get_pages(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Streaming DMA mappings
+ */
+void __dma_sync(dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ __dma_sync_inline(addr, size, dir);
+}
+EXPORT_SYMBOL(__dma_sync);
+
+int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++, sg++) {
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
+ __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++) {
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
+ __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
+ }
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c
new file mode 100644
index 00000000..5cfbaa29
--- /dev/null
+++ b/arch/blackfin/kernel/dumpstack.c
@@ -0,0 +1,174 @@
+/* Provide basic stack dumping functions
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel.h>
+#include <linux/thread_info.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <asm/trace.h>
+
+/*
+ * Checks to see if the address pointed to is either a
+ * 16-bit CALL instruction, or a 32-bit CALL instruction
+ */
+static bool is_bfin_call(unsigned short *addr)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, addr))
+ return false;
+
+ if ((opcode >= 0x0060 && opcode <= 0x0067) ||
+ (opcode >= 0x0070 && opcode <= 0x0077) ||
+ (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF))
+ return true;
+
+ return false;
+
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+#ifdef CONFIG_PRINTK
+ unsigned int *addr, *endstack, *fp = 0, *frame;
+ unsigned short *ins_addr;
+ char buf[150];
+ unsigned int i, j, ret_addr, frame_no = 0;
+
+ /*
+ * If we have been passed a specific stack, use that one otherwise
+ * if we have been passed a task structure, use that, otherwise
+ * use the stack of where the variable "stack" exists
+ */
+
+ if (stack == NULL) {
+ if (task) {
+ /* We know this is a kernel stack, so this is the start/end */
+ stack = (unsigned long *)task->thread.ksp;
+ endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
+ } else {
+ /* print out the existing stack info */
+ stack = (unsigned long *)&stack;
+ endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
+ }
+ } else
+ endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
+
+ printk(KERN_NOTICE "Stack info:\n");
+ decode_address(buf, (unsigned int)stack);
+ printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
+
+ if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
+ printk(KERN_NOTICE "Invalid stack pointer\n");
+ return;
+ }
+
+ /* First thing is to look for a frame pointer */
+ for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
+ if (*addr & 0x1)
+ continue;
+ ins_addr = (unsigned short *)*addr;
+ ins_addr--;
+ if (is_bfin_call(ins_addr))
+ fp = addr - 1;
+
+ if (fp) {
+ /* Let's check to see if it is a frame pointer */
+ while (fp >= (addr - 1) && fp < endstack
+ && fp && ((unsigned int) fp & 0x3) == 0)
+ fp = (unsigned int *)*fp;
+ if (fp == 0 || fp == endstack) {
+ fp = addr - 1;
+ break;
+ }
+ fp = 0;
+ }
+ }
+ if (fp) {
+ frame = fp;
+ printk(KERN_NOTICE " FP: (0x%p)\n", fp);
+ } else
+ frame = 0;
+
+ /*
+ * Now that we think we know where things are, we
+ * walk the stack again, this time printing things out
+ * incase there is no frame pointer, we still look for
+ * valid return addresses
+ */
+
+ /* First time print out data, next time, print out symbols */
+ for (j = 0; j <= 1; j++) {
+ if (j)
+ printk(KERN_NOTICE "Return addresses in stack:\n");
+ else
+ printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
+
+ fp = frame;
+ frame_no = 0;
+
+ for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
+ addr < endstack; addr++, i++) {
+
+ ret_addr = 0;
+ if (!j && i % 8 == 0)
+ printk(KERN_NOTICE "%p:", addr);
+
+ /* if it is an odd address, or zero, just skip it */
+ if (*addr & 0x1 || !*addr)
+ goto print;
+
+ ins_addr = (unsigned short *)*addr;
+
+ /* Go back one instruction, and see if it is a CALL */
+ ins_addr--;
+ ret_addr = is_bfin_call(ins_addr);
+ print:
+ if (!j && stack == (unsigned long *)addr)
+ printk("[%08x]", *addr);
+ else if (ret_addr)
+ if (j) {
+ decode_address(buf, (unsigned int)*addr);
+ if (frame == addr) {
+ printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
+ continue;
+ }
+ printk(KERN_NOTICE " address : %s\n", buf);
+ } else
+ printk("<%08x>", *addr);
+ else if (fp == addr) {
+ if (j)
+ frame = addr+1;
+ else
+ printk("(%08x)", *addr);
+
+ fp = (unsigned int *)*addr;
+ frame_no++;
+
+ } else if (!j)
+ printk(" %08x ", *addr);
+ }
+ if (!j)
+ printk("\n");
+ }
+#endif
+}
+EXPORT_SYMBOL(show_stack);
+
+void dump_stack(void)
+{
+ unsigned long stack;
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int tflags;
+#endif
+ trace_buffer_save(tflags);
+ dump_bfin_trace_buffer();
+ show_stack(current, &stack);
+ trace_buffer_restore(tflags);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
new file mode 100644
index 00000000..84ed8375
--- /dev/null
+++ b/arch/blackfin/kernel/early_printk.c
@@ -0,0 +1,272 @@
+/*
+ * allow a console to be used for early printk
+ * derived from arch/x86/kernel/early_printk.c
+ *
+ * Copyright 2007-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/reboot.h>
+#include <asm/blackfin.h>
+#include <asm/irq_handler.h>
+#include <asm/early_printk.h>
+
+#ifdef CONFIG_SERIAL_BFIN
+extern struct console *bfin_earlyserial_init(unsigned int port,
+ unsigned int cflag);
+#endif
+#ifdef CONFIG_BFIN_JTAG_COMM
+extern struct console *bfin_jc_early_init(void);
+#endif
+
+static struct console *early_console;
+
+/* Default console */
+#define DEFAULT_PORT 0
+#define DEFAULT_CFLAG CS8|B57600
+
+/* Default console for early crashes */
+#define DEFAULT_EARLY_PORT "serial,uart0,57600"
+
+#ifdef CONFIG_SERIAL_CORE
+/* What should get here is "0,57600" */
+static struct console * __init earlyserial_init(char *buf)
+{
+ int baud, bit;
+ char parity;
+ unsigned int serial_port = DEFAULT_PORT;
+ unsigned int cflag = DEFAULT_CFLAG;
+
+ serial_port = simple_strtoul(buf, &buf, 10);
+ buf++;
+
+ cflag = 0;
+ baud = simple_strtoul(buf, &buf, 10);
+ switch (baud) {
+ case 1200:
+ cflag |= B1200;
+ break;
+ case 2400:
+ cflag |= B2400;
+ break;
+ case 4800:
+ cflag |= B4800;
+ break;
+ case 9600:
+ cflag |= B9600;
+ break;
+ case 19200:
+ cflag |= B19200;
+ break;
+ case 38400:
+ cflag |= B38400;
+ break;
+ case 115200:
+ cflag |= B115200;
+ break;
+ default:
+ cflag |= B57600;
+ }
+
+ parity = buf[0];
+ buf++;
+ switch (parity) {
+ case 'e':
+ cflag |= PARENB;
+ break;
+ case 'o':
+ cflag |= PARODD;
+ break;
+ }
+
+ bit = simple_strtoul(buf, &buf, 10);
+ switch (bit) {
+ case 5:
+ cflag |= CS5;
+ break;
+ case 6:
+ cflag |= CS6;
+ break;
+ case 7:
+ cflag |= CS7;
+ break;
+ default:
+ cflag |= CS8;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN
+ return bfin_earlyserial_init(serial_port, cflag);
+#else
+ return NULL;
+#endif
+
+}
+#endif
+
+int __init setup_early_printk(char *buf)
+{
+
+ /* Crashing in here would be really bad, so check both the var
+ and the pointer before we start using it
+ */
+ if (!buf)
+ return 0;
+
+ if (!*buf)
+ return 0;
+
+ if (early_console != NULL)
+ return 0;
+
+#ifdef CONFIG_SERIAL_BFIN
+ /* Check for Blackfin Serial */
+ if (!strncmp(buf, "serial,uart", 11)) {
+ buf += 11;
+ early_console = earlyserial_init(buf);
+ }
+#endif
+
+#ifdef CONFIG_BFIN_JTAG_COMM
+ /* Check for Blackfin JTAG */
+ if (!strncmp(buf, "jtag", 4)) {
+ buf += 4;
+ early_console = bfin_jc_early_init();
+ }
+#endif
+
+#ifdef CONFIG_FB
+ /* TODO: add framebuffer console support */
+#endif
+
+ if (likely(early_console)) {
+ early_console->flags |= CON_BOOT;
+
+ register_console(early_console);
+ printk(KERN_INFO "early printk enabled on %s%d\n",
+ early_console->name,
+ early_console->index);
+ }
+
+ return 0;
+}
+
+/*
+ * Set up a temporary Event Vector Table, so if something bad happens before
+ * the kernel is fully started, it doesn't vector off into somewhere we don't
+ * know
+ */
+
+asmlinkage void __init init_early_exception_vectors(void)
+{
+ u32 evt;
+ SSYNC();
+
+ /*
+ * This starts up the shadow buffer, incase anything crashes before
+ * setup arch
+ */
+ mark_shadow_error();
+ early_shadow_puts(linux_banner);
+ early_shadow_stamp();
+
+ if (CPUID != bfin_cpuid()) {
+ early_shadow_puts("Running on wrong machine type, expected");
+ early_shadow_reg(CPUID, 16);
+ early_shadow_puts(", but running on");
+ early_shadow_reg(bfin_cpuid(), 16);
+ early_shadow_puts("\n");
+ }
+
+ /* cannot program in software:
+ * evt0 - emulation (jtag)
+ * evt1 - reset
+ */
+ for (evt = EVT2; evt <= EVT15; evt += 4)
+ bfin_write32(evt, early_trap);
+ CSYNC();
+
+ /* Set all the return from interrupt, exception, NMI to a known place
+ * so if we do a RETI, RETX or RETN by mistake - we go somewhere known
+ * Note - don't change RETS - we are in a subroutine, or
+ * RETE - since it might screw up if emulator is attached
+ */
+ asm("\tRETI = %0; RETX = %0; RETN = %0;\n"
+ : : "p"(early_trap));
+
+}
+
+__attribute__((__noreturn__))
+asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
+{
+ /* This can happen before the uart is initialized, so initialize
+ * the UART now (but only if we are running on the processor we think
+ * we are compiled for - otherwise we write to MMRs that don't exist,
+ * and cause other problems. Nothing comes out the UART, but it does
+ * end up in the __buf_log.
+ */
+ if (likely(early_console == NULL) && CPUID == bfin_cpuid())
+ setup_early_printk(DEFAULT_EARLY_PORT);
+
+ if (!shadow_console_enabled()) {
+ /* crap - we crashed before setup_arch() */
+ early_shadow_puts("panic before setup_arch\n");
+ early_shadow_puts("IPEND:");
+ early_shadow_reg(fp->ipend, 16);
+ if (fp->seqstat & SEQSTAT_EXCAUSE) {
+ early_shadow_puts("\nEXCAUSE:");
+ early_shadow_reg(fp->seqstat & SEQSTAT_EXCAUSE, 8);
+ }
+ if (fp->seqstat & SEQSTAT_HWERRCAUSE) {
+ early_shadow_puts("\nHWERRCAUSE:");
+ early_shadow_reg(
+ (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14, 8);
+ }
+ early_shadow_puts("\nErr @");
+ if (fp->ipend & EVT_EVX)
+ early_shadow_reg(fp->retx, 32);
+ else
+ early_shadow_reg(fp->pc, 32);
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ early_shadow_puts("\nTrace:");
+ if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
+ while (bfin_read_TBUFSTAT() & TBUFCNT) {
+ early_shadow_puts("\nT :");
+ early_shadow_reg(bfin_read_TBUF(), 32);
+ early_shadow_puts("\n S :");
+ early_shadow_reg(bfin_read_TBUF(), 32);
+ }
+ }
+#endif
+ early_shadow_puts("\nUse bfin-elf-addr2line to determine "
+ "function names\n");
+ /*
+ * We should panic(), but we can't - since panic calls printk,
+ * and printk uses memcpy.
+ * we want to reboot, but if the machine type is different,
+ * can't due to machine specific reboot sequences
+ */
+ if (CPUID == bfin_cpuid()) {
+ early_shadow_puts("Trying to restart\n");
+ machine_restart("");
+ }
+
+ early_shadow_puts("Halting, since it is not safe to restart\n");
+ while (1)
+ asm volatile ("EMUEXCPT; IDLE;\n");
+
+ } else {
+ printk(KERN_EMERG "Early panic\n");
+ show_regs(fp);
+ dump_bfin_trace_buffer();
+ }
+
+ panic("Died early");
+}
+
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
new file mode 100644
index 00000000..686478f5
--- /dev/null
+++ b/arch/blackfin/kernel/entry.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/blackfin.h>
+#include <asm/asm-offsets.h>
+
+#include <asm/context.S>
+
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+.section .l1.text
+#else
+.text
+#endif
+
+ENTRY(_ret_from_fork)
+#ifdef CONFIG_IPIPE
+ /*
+ * Hw IRQs are off on entry, and we don't want the scheduling tail
+ * code to starve high priority domains from interrupts while it
+ * runs. Therefore we first stall the root stage to have the
+ * virtual interrupt state reflect IMASK.
+ */
+ p0.l = ___ipipe_root_status;
+ p0.h = ___ipipe_root_status;
+ r4 = [p0];
+ bitset(r4, 0);
+ [p0] = r4;
+ /*
+ * Then we may enable hw IRQs, allowing preemption from high
+ * priority domains. schedule_tail() will do local_irq_enable()
+ * since Blackfin does not define __ARCH_WANT_UNLOCKED_CTXSW, so
+ * there is no need to unstall the root domain by ourselves
+ * afterwards.
+ */
+ p0.l = _bfin_irq_flags;
+ p0.h = _bfin_irq_flags;
+ r4 = [p0];
+ sti r4;
+#endif /* CONFIG_IPIPE */
+ SP += -12;
+ pseudo_long_call _schedule_tail, p5;
+ SP += 12;
+ r0 = [sp + PT_IPEND];
+ cc = bittst(r0,1);
+ if cc jump .Lin_kernel;
+ RESTORE_CONTEXT
+ rti;
+.Lin_kernel:
+ bitclr(r0,1);
+ [sp + PT_IPEND] = r0;
+ /* do a 'fake' RTI by jumping to [RETI]
+ * to avoid clearing supervisor mode in child
+ */
+ r0 = [sp + PT_PC];
+ [sp + PT_P0] = r0;
+
+ RESTORE_ALL_SYS
+ jump (p0);
+ENDPROC(_ret_from_fork)
+
+ENTRY(_sys_fork)
+ r0 = -EINVAL;
+#if (ANOMALY_05000371)
+ nop;
+ nop;
+ nop;
+#endif
+ rts;
+ENDPROC(_sys_fork)
+
+ENTRY(_sys_vfork)
+ r0 = sp;
+ r0 += 24;
+ [--sp] = rets;
+ SP += -12;
+ pseudo_long_call _bfin_vfork, p2;
+ SP += 12;
+ rets = [sp++];
+ rts;
+ENDPROC(_sys_vfork)
+
+ENTRY(_sys_clone)
+ r0 = sp;
+ r0 += 24;
+ [--sp] = rets;
+ SP += -12;
+ pseudo_long_call _bfin_clone, p2;
+ SP += 12;
+ rets = [sp++];
+ rts;
+ENDPROC(_sys_clone)
+
+ENTRY(_sys_rt_sigreturn)
+ r0 = sp;
+ r0 += 24;
+ [--sp] = rets;
+ SP += -12;
+ pseudo_long_call _do_rt_sigreturn, p2;
+ SP += 12;
+ rets = [sp++];
+ rts;
+ENDPROC(_sys_rt_sigreturn)
diff --git a/arch/blackfin/kernel/exception.c b/arch/blackfin/kernel/exception.c
new file mode 100644
index 00000000..9208b5fd
--- /dev/null
+++ b/arch/blackfin/kernel/exception.c
@@ -0,0 +1,45 @@
+/* Basic functions for adding/removing custom exception handlers
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/module.h>
+#include <asm/irq_handler.h>
+
+int bfin_request_exception(unsigned int exception, void (*handler)(void))
+{
+ void (*curr_handler)(void);
+
+ if (exception > 0x3F)
+ return -EINVAL;
+
+ curr_handler = ex_table[exception];
+
+ if (curr_handler != ex_replaceable)
+ return -EBUSY;
+
+ ex_table[exception] = handler;
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_request_exception);
+
+int bfin_free_exception(unsigned int exception, void (*handler)(void))
+{
+ void (*curr_handler)(void);
+
+ if (exception > 0x3F)
+ return -EINVAL;
+
+ curr_handler = ex_table[exception];
+
+ if (curr_handler != handler)
+ return -EBUSY;
+
+ ex_table[exception] = ex_replaceable;
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_free_exception);
diff --git a/arch/blackfin/kernel/fixed_code.S b/arch/blackfin/kernel/fixed_code.S
new file mode 100644
index 00000000..0565917f
--- /dev/null
+++ b/arch/blackfin/kernel/fixed_code.S
@@ -0,0 +1,155 @@
+/*
+ * This file contains sequences of code that will be copied to a
+ * fixed location, defined in <asm/fixed_code.h>. The interrupt
+ * handlers ensure that these sequences appear to be atomic when
+ * executed from userspace.
+ * These are aligned to 16 bytes, so that we have some space to replace
+ * these sequences with something else (e.g. kernel traps if we ever do
+ * BF561 SMP).
+ *
+ * Copyright 2007-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/unistd.h>
+#include <asm/entry.h>
+
+__INIT
+
+ENTRY(_fixed_code_start)
+
+.align 16
+ENTRY(_sigreturn_stub)
+ P0 = __NR_rt_sigreturn;
+ EXCPT 0;
+ /* Speculative execution paranoia. */
+0: JUMP.S 0b;
+ENDPROC (_sigreturn_stub)
+
+.align 16
+ /*
+ * Atomic swap, 8 bit.
+ * Inputs: P0: memory address to use
+ * R1: value to store
+ * Output: R0: old contents of the memory address, zero extended.
+ */
+ENTRY(_atomic_xchg32)
+ R0 = [P0];
+ [P0] = R1;
+ rts;
+ENDPROC (_atomic_xchg32)
+
+.align 16
+ /*
+ * Compare and swap, 32 bit.
+ * Inputs: P0: memory address to use
+ * R1: compare value
+ * R2: new value to store
+ * The new value is stored if the contents of the memory
+ * address is equal to the compare value.
+ * Output: R0: old contents of the memory address.
+ */
+ENTRY(_atomic_cas32)
+ R0 = [P0];
+ CC = R0 == R1;
+ IF !CC JUMP 1f;
+ [P0] = R2;
+1:
+ rts;
+ENDPROC (_atomic_cas32)
+
+.align 16
+ /*
+ * Atomic add, 32 bit.
+ * Inputs: P0: memory address to use
+ * R0: value to add
+ * Outputs: R0: new contents of the memory address.
+ * R1: previous contents of the memory address.
+ */
+ENTRY(_atomic_add32)
+ R1 = [P0];
+ R0 = R1 + R0;
+ [P0] = R0;
+ rts;
+ENDPROC (_atomic_add32)
+
+.align 16
+ /*
+ * Atomic sub, 32 bit.
+ * Inputs: P0: memory address to use
+ * R0: value to subtract
+ * Outputs: R0: new contents of the memory address.
+ * R1: previous contents of the memory address.
+ */
+ENTRY(_atomic_sub32)
+ R1 = [P0];
+ R0 = R1 - R0;
+ [P0] = R0;
+ rts;
+ENDPROC (_atomic_sub32)
+
+.align 16
+ /*
+ * Atomic ior, 32 bit.
+ * Inputs: P0: memory address to use
+ * R0: value to ior
+ * Outputs: R0: new contents of the memory address.
+ * R1: previous contents of the memory address.
+ */
+ENTRY(_atomic_ior32)
+ R1 = [P0];
+ R0 = R1 | R0;
+ [P0] = R0;
+ rts;
+ENDPROC (_atomic_ior32)
+
+.align 16
+ /*
+ * Atomic and, 32 bit.
+ * Inputs: P0: memory address to use
+ * R0: value to and
+ * Outputs: R0: new contents of the memory address.
+ * R1: previous contents of the memory address.
+ */
+ENTRY(_atomic_and32)
+ R1 = [P0];
+ R0 = R1 & R0;
+ [P0] = R0;
+ rts;
+ENDPROC (_atomic_and32)
+
+.align 16
+ /*
+ * Atomic xor, 32 bit.
+ * Inputs: P0: memory address to use
+ * R0: value to xor
+ * Outputs: R0: new contents of the memory address.
+ * R1: previous contents of the memory address.
+ */
+ENTRY(_atomic_xor32)
+ R1 = [P0];
+ R0 = R1 ^ R0;
+ [P0] = R0;
+ rts;
+ENDPROC (_atomic_xor32)
+
+.align 16
+ /*
+ * safe_user_instruction
+ * Four NOPS are enough to allow the pipeline to speculativily load
+ * execute anything it wants. After that, things have gone bad, and
+ * we are stuck - so panic. Since we might be in user space, we can't
+ * call panic, so just cause a unhandled exception, this should cause
+ * a dump of the trace buffer so we can tell were we are, and a reboot
+ */
+ENTRY(_safe_user_instruction)
+ NOP; NOP; NOP; NOP;
+ EXCPT 0x4;
+ENDPROC(_safe_user_instruction)
+
+ENTRY(_fixed_code_end)
+
+__FINIT
diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c
new file mode 100644
index 00000000..a88daddb
--- /dev/null
+++ b/arch/blackfin/kernel/flat.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2007 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/flat.h>
+
+#define FLAT_BFIN_RELOC_TYPE_16_BIT 0
+#define FLAT_BFIN_RELOC_TYPE_16H_BIT 1
+#define FLAT_BFIN_RELOC_TYPE_32_BIT 2
+
+unsigned long bfin_get_addr_from_rp(unsigned long *ptr,
+ unsigned long relval,
+ unsigned long flags,
+ unsigned long *persistent)
+{
+ unsigned short *usptr = (unsigned short *)ptr;
+ int type = (relval >> 26) & 7;
+ unsigned long val;
+
+ switch (type) {
+ case FLAT_BFIN_RELOC_TYPE_16_BIT:
+ case FLAT_BFIN_RELOC_TYPE_16H_BIT:
+ usptr = (unsigned short *)ptr;
+ pr_debug("*usptr = %x", get_unaligned(usptr));
+ val = get_unaligned(usptr);
+ val += *persistent;
+ break;
+
+ case FLAT_BFIN_RELOC_TYPE_32_BIT:
+ pr_debug("*ptr = %lx", get_unaligned(ptr));
+ val = get_unaligned(ptr);
+ break;
+
+ default:
+ pr_debug("BINFMT_FLAT: Unknown relocation type %x\n", type);
+ return 0;
+ }
+
+ /*
+ * Stack-relative relocs contain the offset into the stack, we
+ * have to add the stack's start address here and return 1 from
+ * flat_addr_absolute to prevent the normal address calculations
+ */
+ if (relval & (1 << 29))
+ return val + current->mm->context.end_brk;
+
+ if ((flags & FLAT_FLAG_GOTPIC) == 0)
+ val = htonl(val);
+ return val;
+}
+EXPORT_SYMBOL(bfin_get_addr_from_rp);
+
+/*
+ * Insert the address ADDR into the symbol reference at RP;
+ * RELVAL is the raw relocation-table entry from which RP is derived
+ */
+void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr,
+ unsigned long relval)
+{
+ unsigned short *usptr = (unsigned short *)ptr;
+ int type = (relval >> 26) & 7;
+
+ switch (type) {
+ case FLAT_BFIN_RELOC_TYPE_16_BIT:
+ put_unaligned(addr, usptr);
+ pr_debug("new value %x at %p", get_unaligned(usptr), usptr);
+ break;
+
+ case FLAT_BFIN_RELOC_TYPE_16H_BIT:
+ put_unaligned(addr >> 16, usptr);
+ pr_debug("new value %x", get_unaligned(usptr));
+ break;
+
+ case FLAT_BFIN_RELOC_TYPE_32_BIT:
+ put_unaligned(addr, ptr);
+ pr_debug("new ptr =%lx", get_unaligned(ptr));
+ break;
+ }
+}
+EXPORT_SYMBOL(bfin_put_addr_at_rp);
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
new file mode 100644
index 00000000..7eed00bb
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -0,0 +1,225 @@
+/*
+ * mcount and friends -- ftrace stuff
+ *
+ * Copyright (C) 2009-2010 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+.text
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Simple stub so we can boot the kernel until runtime patching has
+ * disabled all calls to this. Then it'll be unused.
+ */
+ENTRY(__mcount)
+# if ANOMALY_05000371
+ nop; nop; nop; nop;
+# endif
+ rts;
+ENDPROC(__mcount)
+
+/* GCC will have called us before setting up the function prologue, so we
+ * can clobber the normal scratch registers, but we need to make sure to
+ * save/restore the registers used for argument passing (R0-R2) in case
+ * the profiled function is using them. With data registers, R3 is the
+ * only one we can blow away. With pointer registers, we have P0-P2.
+ *
+ * Upon entry, the RETS will point to the top of the current profiled
+ * function. And since GCC pushed the previous RETS for us, the previous
+ * function will be waiting there. mmmm pie.
+ */
+ENTRY(_ftrace_caller)
+# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ /* optional micro optimization: return if stopped */
+ p1.l = _function_trace_stop;
+ p1.h = _function_trace_stop;
+ r3 = [p1];
+ cc = r3 == 0;
+ if ! cc jump _ftrace_stub (bp);
+# endif
+
+ /* save first/second/third function arg and the return register */
+ [--sp] = r2;
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ /* function_trace_call(unsigned long ip, unsigned long parent_ip):
+ * ip: this point was called by ...
+ * parent_ip: ... this function
+ * the ip itself will need adjusting for the mcount call
+ */
+ r0 = rets;
+ r1 = [sp + 16]; /* skip the 4 local regs on stack */
+ r0 += -MCOUNT_INSN_SIZE;
+
+.globl _ftrace_call
+_ftrace_call:
+ call _ftrace_stub
+
+# ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl _ftrace_graph_call
+_ftrace_graph_call:
+ nop; /* jump _ftrace_graph_caller; */
+# endif
+
+ /* restore state and get out of dodge */
+.Lfinish_trace:
+ rets = [sp++];
+ r1 = [sp++];
+ r0 = [sp++];
+ r2 = [sp++];
+
+.globl _ftrace_stub
+_ftrace_stub:
+ rts;
+ENDPROC(_ftrace_caller)
+
+#else
+
+/* See documentation for _ftrace_caller */
+ENTRY(__mcount)
+# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ /* optional micro optimization: return if stopped */
+ p1.l = _function_trace_stop;
+ p1.h = _function_trace_stop;
+ r3 = [p1];
+ cc = r3 == 0;
+ if ! cc jump _ftrace_stub (bp);
+# endif
+
+ /* save third function arg early so we can do testing below */
+ [--sp] = r2;
+
+ /* load the function pointer to the tracer */
+ p0.l = _ftrace_trace_function;
+ p0.h = _ftrace_trace_function;
+ r3 = [p0];
+
+ /* optional micro optimization: don't call the stub tracer */
+ r2.l = _ftrace_stub;
+ r2.h = _ftrace_stub;
+ cc = r2 == r3;
+ if ! cc jump .Ldo_trace;
+
+# ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* if the ftrace_graph_return function pointer is not set to
+ * the ftrace_stub entry, call prepare_ftrace_return().
+ */
+ p0.l = _ftrace_graph_return;
+ p0.h = _ftrace_graph_return;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+
+ /* similarly, if the ftrace_graph_entry function pointer is not
+ * set to the ftrace_graph_entry_stub entry, ...
+ */
+ p0.l = _ftrace_graph_entry;
+ p0.h = _ftrace_graph_entry;
+ r2.l = _ftrace_graph_entry_stub;
+ r2.h = _ftrace_graph_entry_stub;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+# endif
+
+ r2 = [sp++];
+ rts;
+
+.Ldo_trace:
+
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ /* setup the tracer function */
+ p0 = r3;
+
+ /* function_trace_call(unsigned long ip, unsigned long parent_ip):
+ * ip: this point was called by ...
+ * parent_ip: ... this function
+ * the ip itself will need adjusting for the mcount call
+ */
+ r0 = rets;
+ r1 = [sp + 16]; /* skip the 4 local regs on stack */
+ r0 += -MCOUNT_INSN_SIZE;
+
+ /* call the tracer */
+ call (p0);
+
+ /* restore state and get out of dodge */
+.Lfinish_trace:
+ rets = [sp++];
+ r1 = [sp++];
+ r0 = [sp++];
+ r2 = [sp++];
+
+.globl _ftrace_stub
+_ftrace_stub:
+ rts;
+ENDPROC(__mcount)
+
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* The prepare_ftrace_return() function is similar to the trace function
+ * except it takes a pointer to the location of the frompc. This is so
+ * the prepare_ftrace_return() can hijack it temporarily for probing
+ * purposes.
+ */
+ENTRY(_ftrace_graph_caller)
+# ifndef CONFIG_DYNAMIC_FTRACE
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
+ r0 = sp; /* unsigned long *parent */
+ r1 = rets; /* unsigned long self_addr */
+# else
+ r0 = sp; /* unsigned long *parent */
+ r1 = [sp]; /* unsigned long self_addr */
+# endif
+# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+ r2 = fp; /* unsigned long frame_pointer */
+# endif
+ r0 += 16; /* skip the 4 local regs on stack */
+ r1 += -MCOUNT_INSN_SIZE;
+ call _prepare_ftrace_return;
+
+ jump .Lfinish_trace;
+ENDPROC(_ftrace_graph_caller)
+
+/* Undo the rewrite caused by ftrace_graph_caller(). The common function
+ * ftrace_return_to_handler() will return the original rets so we can
+ * restore it and be on our way.
+ */
+ENTRY(_return_to_handler)
+ /* make sure original return values are saved */
+ [--sp] = p0;
+ [--sp] = r0;
+ [--sp] = r1;
+
+ /* get original return address */
+# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+ r0 = fp; /* Blackfin is sane, so omit this */
+# endif
+ call _ftrace_return_to_handler;
+ rets = r0;
+
+ /* anomaly 05000371 - make sure we have at least three instructions
+ * between rets setting and the return
+ */
+ r1 = [sp++];
+ r0 = [sp++];
+ p0 = [sp++];
+ rts;
+ENDPROC(_return_to_handler)
+#endif
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
new file mode 100644
index 00000000..9277905b
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace.c
@@ -0,0 +1,128 @@
+/*
+ * ftrace graph code
+ *
+ * Copyright (C) 2009-2010 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+static const unsigned char mnop[] = {
+ 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
+ 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
+};
+
+static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
+ unsigned long dst)
+{
+ uint32_t pcrel = (dst - src) >> 1;
+ insn[0] = pcrel >> 16;
+ insn[1] = 0xe3;
+ insn[2] = pcrel;
+ insn[3] = pcrel >> 8;
+}
+#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
+
+static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
+ unsigned long len)
+{
+ int ret = probe_kernel_write((void *)ip, (void *)code, len);
+ flush_icache_range(ip, ip + len);
+ return ret;
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ /* Turn the mcount call site into two MNOPs as those are 32bit insns */
+ return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ /* Restore the mcount call site */
+ unsigned char call[8];
+ call[0] = 0x67; /* [--SP] = RETS; */
+ call[1] = 0x01;
+ bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
+ call[6] = 0x27; /* RETS = [SP++]; */
+ call[7] = 0x01;
+ return ftrace_modify_code(rec->ip, call, sizeof(call));
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned char call[4];
+ unsigned long ip = (unsigned long)&ftrace_call;
+ bfin_make_pcrel24(call, ip, func);
+ return ftrace_modify_code(ip, call, sizeof(call));
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* return value is done indirectly via data */
+ *(unsigned long *)data = 0;
+
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+# ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)&ftrace_graph_call;
+ uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
+ jump_pcrel12 |= 0x2000;
+ return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
+}
+
+# endif
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY)
+ return;
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ return;
+ }
+
+ /* all is well in the world ! hijack RETS ... */
+ *parent = return_hooker;
+}
+
+#endif
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
new file mode 100644
index 00000000..06459f4b
--- /dev/null
+++ b/arch/blackfin/kernel/gptimers.c
@@ -0,0 +1,300 @@
+/*
+ * gptimers.c - Blackfin General Purpose Timer core API
+ *
+ * Copyright (c) 2005-2008 Analog Devices Inc.
+ * Copyright (C) 2005 John DeHority
+ * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de)
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <asm/blackfin.h>
+#include <asm/gptimers.h>
+
+#ifdef DEBUG
+# define tassert(expr)
+#else
+# define tassert(expr) \
+ if (!(expr)) \
+ printk(KERN_DEBUG "%s:%s:%i: Assertion failed: " #expr "\n", __FILE__, __func__, __LINE__);
+#endif
+
+#define BFIN_TIMER_NUM_GROUP (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
+
+static struct bfin_gptimer_regs * const timer_regs[MAX_BLACKFIN_GPTIMERS] =
+{
+ (void *)TIMER0_CONFIG,
+ (void *)TIMER1_CONFIG,
+ (void *)TIMER2_CONFIG,
+#if (MAX_BLACKFIN_GPTIMERS > 3)
+ (void *)TIMER3_CONFIG,
+ (void *)TIMER4_CONFIG,
+ (void *)TIMER5_CONFIG,
+ (void *)TIMER6_CONFIG,
+ (void *)TIMER7_CONFIG,
+# if (MAX_BLACKFIN_GPTIMERS > 8)
+ (void *)TIMER8_CONFIG,
+ (void *)TIMER9_CONFIG,
+ (void *)TIMER10_CONFIG,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
+ (void *)TIMER11_CONFIG,
+# endif
+# endif
+#endif
+};
+
+static struct bfin_gptimer_group_regs * const group_regs[BFIN_TIMER_NUM_GROUP] =
+{
+ (void *)TIMER0_GROUP_REG,
+#if (MAX_BLACKFIN_GPTIMERS > 8)
+ (void *)TIMER8_GROUP_REG,
+#endif
+};
+
+static uint32_t const trun_mask[MAX_BLACKFIN_GPTIMERS] =
+{
+ TIMER_STATUS_TRUN0,
+ TIMER_STATUS_TRUN1,
+ TIMER_STATUS_TRUN2,
+#if (MAX_BLACKFIN_GPTIMERS > 3)
+ TIMER_STATUS_TRUN3,
+ TIMER_STATUS_TRUN4,
+ TIMER_STATUS_TRUN5,
+ TIMER_STATUS_TRUN6,
+ TIMER_STATUS_TRUN7,
+# if (MAX_BLACKFIN_GPTIMERS > 8)
+ TIMER_STATUS_TRUN8,
+ TIMER_STATUS_TRUN9,
+ TIMER_STATUS_TRUN10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
+ TIMER_STATUS_TRUN11,
+# endif
+# endif
+#endif
+};
+
+static uint32_t const tovf_mask[MAX_BLACKFIN_GPTIMERS] =
+{
+ TIMER_STATUS_TOVF0,
+ TIMER_STATUS_TOVF1,
+ TIMER_STATUS_TOVF2,
+#if (MAX_BLACKFIN_GPTIMERS > 3)
+ TIMER_STATUS_TOVF3,
+ TIMER_STATUS_TOVF4,
+ TIMER_STATUS_TOVF5,
+ TIMER_STATUS_TOVF6,
+ TIMER_STATUS_TOVF7,
+# if (MAX_BLACKFIN_GPTIMERS > 8)
+ TIMER_STATUS_TOVF8,
+ TIMER_STATUS_TOVF9,
+ TIMER_STATUS_TOVF10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
+ TIMER_STATUS_TOVF11,
+# endif
+# endif
+#endif
+};
+
+static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
+{
+ TIMER_STATUS_TIMIL0,
+ TIMER_STATUS_TIMIL1,
+ TIMER_STATUS_TIMIL2,
+#if (MAX_BLACKFIN_GPTIMERS > 3)
+ TIMER_STATUS_TIMIL3,
+ TIMER_STATUS_TIMIL4,
+ TIMER_STATUS_TIMIL5,
+ TIMER_STATUS_TIMIL6,
+ TIMER_STATUS_TIMIL7,
+# if (MAX_BLACKFIN_GPTIMERS > 8)
+ TIMER_STATUS_TIMIL8,
+ TIMER_STATUS_TIMIL9,
+ TIMER_STATUS_TIMIL10,
+# if (MAX_BLACKFIN_GPTIMERS > 11)
+ TIMER_STATUS_TIMIL11,
+# endif
+# endif
+#endif
+};
+
+void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write(&timer_regs[timer_id]->width, value);
+ SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_pwidth);
+
+uint32_t get_gptimer_pwidth(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return bfin_read(&timer_regs[timer_id]->width);
+}
+EXPORT_SYMBOL(get_gptimer_pwidth);
+
+void set_gptimer_period(unsigned int timer_id, uint32_t period)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write(&timer_regs[timer_id]->period, period);
+ SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_period);
+
+uint32_t get_gptimer_period(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return bfin_read(&timer_regs[timer_id]->period);
+}
+EXPORT_SYMBOL(get_gptimer_period);
+
+uint32_t get_gptimer_count(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return bfin_read(&timer_regs[timer_id]->counter);
+}
+EXPORT_SYMBOL(get_gptimer_count);
+
+uint32_t get_gptimer_status(unsigned int group)
+{
+ tassert(group < BFIN_TIMER_NUM_GROUP);
+ return bfin_read(&group_regs[group]->status);
+}
+EXPORT_SYMBOL(get_gptimer_status);
+
+void set_gptimer_status(unsigned int group, uint32_t value)
+{
+ tassert(group < BFIN_TIMER_NUM_GROUP);
+ bfin_write(&group_regs[group]->status, value);
+ SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_status);
+
+static uint32_t read_gptimer_status(unsigned int timer_id)
+{
+ return bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status);
+}
+
+int get_gptimer_intr(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return !!(read_gptimer_status(timer_id) & timil_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_intr);
+
+void clear_gptimer_intr(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, timil_mask[timer_id]);
+}
+EXPORT_SYMBOL(clear_gptimer_intr);
+
+int get_gptimer_over(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return !!(read_gptimer_status(timer_id) & tovf_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_over);
+
+void clear_gptimer_over(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, tovf_mask[timer_id]);
+}
+EXPORT_SYMBOL(clear_gptimer_over);
+
+int get_gptimer_run(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return !!(read_gptimer_status(timer_id) & trun_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_run);
+
+void set_gptimer_config(unsigned int timer_id, uint16_t config)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write(&timer_regs[timer_id]->config, config);
+ SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_config);
+
+uint16_t get_gptimer_config(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ return bfin_read(&timer_regs[timer_id]->config);
+}
+EXPORT_SYMBOL(get_gptimer_config);
+
+void enable_gptimers(uint16_t mask)
+{
+ int i;
+ tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
+ for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
+ bfin_write(&group_regs[i]->enable, mask & 0xFF);
+ mask >>= 8;
+ }
+ SSYNC();
+}
+EXPORT_SYMBOL(enable_gptimers);
+
+static void _disable_gptimers(uint16_t mask)
+{
+ int i;
+ uint16_t m = mask;
+ tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
+ for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
+ bfin_write(&group_regs[i]->disable, m & 0xFF);
+ m >>= 8;
+ }
+}
+
+void disable_gptimers(uint16_t mask)
+{
+ int i;
+ _disable_gptimers(mask);
+ for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
+ if (mask & (1 << i))
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(i)]->status, trun_mask[i]);
+ SSYNC();
+}
+EXPORT_SYMBOL(disable_gptimers);
+
+void disable_gptimers_sync(uint16_t mask)
+{
+ _disable_gptimers(mask);
+ SSYNC();
+}
+EXPORT_SYMBOL(disable_gptimers_sync);
+
+void set_gptimer_pulse_hi(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write_or(&timer_regs[timer_id]->config, TIMER_PULSE_HI);
+ SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_pulse_hi);
+
+void clear_gptimer_pulse_hi(unsigned int timer_id)
+{
+ tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+ bfin_write_and(&timer_regs[timer_id]->config, ~TIMER_PULSE_HI);
+ SSYNC();
+}
+EXPORT_SYMBOL(clear_gptimer_pulse_hi);
+
+uint16_t get_enabled_gptimers(void)
+{
+ int i;
+ uint16_t result = 0;
+ for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i)
+ result |= (bfin_read(&group_regs[i]->enable) << (i << 3));
+ return result;
+}
+EXPORT_SYMBOL(get_enabled_gptimers);
+
+MODULE_AUTHOR("Axel Weiss (awe@aglaia-gmbh.de)");
+MODULE_DESCRIPTION("Blackfin General Purpose Timers API");
+MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
new file mode 100644
index 00000000..d3970e8a
--- /dev/null
+++ b/arch/blackfin/kernel/init_task.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry.
+ */
+union thread_union init_thread_union
+ __init_task_data = {
+INIT_THREAD_INFO(init_task)};
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
new file mode 100644
index 00000000..f657b381
--- /dev/null
+++ b/arch/blackfin/kernel/ipipe.c
@@ -0,0 +1,397 @@
+/* -*- linux-c -*-
+ * linux/arch/blackfin/kernel/ipipe.c
+ *
+ * Copyright (C) 2005-2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-pipe support for the Blackfin.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/unistd.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/irq_handler.h>
+
+DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
+
+static void __ipipe_no_irqtail(void);
+
+unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
+EXPORT_SYMBOL(__ipipe_irq_tail_hook);
+
+unsigned long __ipipe_core_clock;
+EXPORT_SYMBOL(__ipipe_core_clock);
+
+unsigned long __ipipe_freq_scale;
+EXPORT_SYMBOL(__ipipe_freq_scale);
+
+atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
+EXPORT_SYMBOL(__ipipe_irq_lvmask);
+
+static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
+{
+ desc->ipipe_ack(irq, desc);
+}
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+ unsigned irq;
+
+ __ipipe_core_clock = get_cclk(); /* Fetch this once. */
+ __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
+
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ ipipe_virtualize_irq(ipipe_root_domain,
+ irq,
+ (ipipe_irq_handler_t)&asm_do_IRQ,
+ NULL,
+ &__ipipe_ack_irq,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+}
+
+/*
+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
+ * interrupt protection log is maintained here for each domain. Hw
+ * interrupts are masked on entry.
+ */
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
+{
+ struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
+ struct ipipe_domain *this_domain, *next_domain;
+ struct list_head *head, *pos;
+ struct ipipe_irqdesc *idesc;
+ int m_ack, s = -1;
+
+ /*
+ * Software-triggered IRQs do not need any ack. The contents
+ * of the register frame should only be used when processing
+ * the timer interrupt, but not for handling any other
+ * interrupt.
+ */
+ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
+ this_domain = __ipipe_current_domain;
+ idesc = &this_domain->irqs[irq];
+
+ if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
+ head = &this_domain->p_link;
+ else {
+ head = __ipipe_pipeline.next;
+ next_domain = list_entry(head, struct ipipe_domain, p_link);
+ idesc = &next_domain->irqs[irq];
+ if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
+ if (!m_ack && idesc->acknowledge != NULL)
+ idesc->acknowledge(irq, irq_to_desc(irq));
+ if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+ s = __test_and_set_bit(IPIPE_STALL_FLAG,
+ &p->status);
+ __ipipe_dispatch_wired(next_domain, irq);
+ goto out;
+ }
+ }
+
+ /* Ack the interrupt. */
+
+ pos = head;
+ while (pos != &__ipipe_pipeline) {
+ next_domain = list_entry(pos, struct ipipe_domain, p_link);
+ idesc = &next_domain->irqs[irq];
+ if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
+ __ipipe_set_irq_pending(next_domain, irq);
+ if (!m_ack && idesc->acknowledge != NULL) {
+ idesc->acknowledge(irq, irq_to_desc(irq));
+ m_ack = 1;
+ }
+ }
+ if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
+ break;
+ pos = next_domain->p_link.next;
+ }
+
+ /*
+ * Now walk the pipeline, yielding control to the highest
+ * priority domain that has pending interrupt(s) or
+ * immediately to the current domain if the interrupt has been
+ * marked as 'sticky'. This search does not go beyond the
+ * current domain in the pipeline. We also enforce the
+ * additional root stage lock (blackfin-specific).
+ */
+ if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
+
+ /*
+ * If the interrupt preempted the head domain, then do not
+ * even try to walk the pipeline, unless an interrupt is
+ * pending for it.
+ */
+ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
+ !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
+ goto out;
+
+ __ipipe_walk_pipeline(head);
+out:
+ if (!s)
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+}
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int prio = __ipipe_get_irq_priority(irq);
+
+ desc->depth = 0;
+ if (ipd != &ipipe_root &&
+ atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
+ __set_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_enable_irqdesc);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ int prio = __ipipe_get_irq_priority(irq);
+
+ if (ipd != &ipipe_root &&
+ atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
+ __clear_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_disable_irqdesc);
+
+asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
+{
+ struct ipipe_percpu_domain_data *p;
+ void (*hook)(void);
+ int ret;
+
+ WARN_ON_ONCE(irqs_disabled_hw());
+
+ /*
+ * We need to run the IRQ tail hook each time we intercept a
+ * syscall, because we know that important operations might be
+ * pending there (e.g. Xenomai deferred rescheduling).
+ */
+ hook = (__typeof__(hook))__ipipe_irq_tail_hook;
+ hook();
+
+ /*
+ * This routine either returns:
+ * 0 -- if the syscall is to be passed to Linux;
+ * >0 -- if the syscall should not be passed to Linux, and no
+ * tail work should be performed;
+ * <0 -- if the syscall should not be passed to Linux but the
+ * tail work has to be performed (for handling signals etc).
+ */
+
+ if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
+ !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+ return 0;
+
+ ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
+
+ hard_local_irq_disable();
+
+ /*
+ * This is the end of the syscall path, so we may
+ * safely assume a valid Linux task stack here.
+ */
+ if (current->ipipe_flags & PF_EVTRET) {
+ current->ipipe_flags &= ~PF_EVTRET;
+ __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
+ }
+
+ if (!__ipipe_root_domain_p)
+ ret = -1;
+ else {
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
+ }
+
+ hard_local_irq_enable();
+
+ return -ret;
+}
+
+static void __ipipe_no_irqtail(void)
+{
+}
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+ info->sys_nr_cpus = num_online_cpus();
+ info->sys_cpu_freq = ipipe_cpu_freq();
+ info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
+ info->sys_hrtimer_freq = __ipipe_core_clock;
+ info->sys_hrclock_freq = __ipipe_core_clock;
+
+ return 0;
+}
+
+/*
+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
+ * just like if it has been actually received from a hw source. Also
+ * works for virtual interrupts.
+ */
+int ipipe_trigger_irq(unsigned irq)
+{
+ unsigned long flags;
+
+#ifdef CONFIG_IPIPE_DEBUG
+ if (irq >= IPIPE_NR_IRQS ||
+ (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
+ return -EINVAL;
+#endif
+
+ flags = hard_local_irq_save();
+ __ipipe_handle_irq(irq, NULL);
+ hard_local_irq_restore(flags);
+
+ return 1;
+}
+
+asmlinkage void __ipipe_sync_root(void)
+{
+ void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+
+ BUG_ON(irqs_disabled());
+
+ flags = hard_local_irq_save();
+
+ if (irq_tail_hook)
+ irq_tail_hook();
+
+ clear_thread_flag(TIF_IRQ_SYNC);
+
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
+
+ hard_local_irq_restore(flags);
+}
+
+void ___ipipe_sync_pipeline(void)
+{
+ if (__ipipe_root_domain_p &&
+ test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+ return;
+
+ __ipipe_sync_stage();
+}
+
+void __ipipe_disable_root_irqs_hw(void)
+{
+ /*
+ * This code is called by the ins{bwl} routines (see
+ * arch/blackfin/lib/ins.S), which are heavily used by the
+ * network stack. It masks all interrupts but those handled by
+ * non-root domains, so that we keep decent network transfer
+ * rates for Linux without inducing pathological jitter for
+ * the real-time domain.
+ */
+ bfin_sti(__ipipe_irq_lvmask);
+ __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+}
+
+void __ipipe_enable_root_irqs_hw(void)
+{
+ __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+ bfin_sti(bfin_irq_flags);
+}
+
+/*
+ * We could use standard atomic bitops in the following root status
+ * manipulation routines, but let's prepare for SMP support in the
+ * same move, preventing CPU migration as required.
+ */
+void __ipipe_stall_root(void)
+{
+ unsigned long *p, flags;
+
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_stall_root);
+
+unsigned long __ipipe_test_and_stall_root(void)
+{
+ unsigned long *p, flags;
+ int x;
+
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore(flags);
+
+ return x;
+}
+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
+
+unsigned long __ipipe_test_root(void)
+{
+ const unsigned long *p;
+ unsigned long flags;
+ int x;
+
+ flags = hard_local_irq_save_smp();
+ p = &__ipipe_root_status;
+ x = test_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore_smp(flags);
+
+ return x;
+}
+EXPORT_SYMBOL(__ipipe_test_root);
+
+void __ipipe_lock_root(void)
+{
+ unsigned long *p, flags;
+
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_SYNCDEFER_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_lock_root);
+
+void __ipipe_unlock_root(void)
+{
+ unsigned long *p, flags;
+
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_unlock_root);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
new file mode 100644
index 00000000..ff3d7471
--- /dev/null
+++ b/arch/blackfin/kernel/irqchip.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2005-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq_handler.h>
+#include <asm/trace.h>
+#include <asm/pda.h>
+
+static atomic_t irq_err_count;
+void ack_bad_irq(unsigned int irq)
+{
+ atomic_inc(&irq_err_count);
+ printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+}
+
+static struct irq_desc bad_irq_desc = {
+ .handle_irq = handle_bad_irq,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
+};
+
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
+#ifdef CONFIG_PROC_FS
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (i < NR_IRQS) {
+ struct irq_desc *desc = irq_to_desc(i);
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ", i);
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
+ seq_printf(p, " %s", action->name);
+ for (action = action->next; action; action = action->next)
+ seq_printf(p, " %s", action->name);
+
+ seq_putc(p, '\n');
+ skip:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "NMI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
+ seq_printf(p, " CORE Non Maskable Interrupt\n");
+ seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static void check_stack_overflow(int irq)
+{
+ /* Debugging check for stack overflow: is there less than STACK_WARN free? */
+ long sp = __get_SP() & (THREAD_SIZE - 1);
+
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ dump_stack();
+ pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
+ irq, sp - sizeof(struct thread_info));
+ }
+}
+#else
+static inline void check_stack_overflow(int irq) { }
+#endif
+
+#ifndef CONFIG_IPIPE
+static void maybe_lower_to_irq14(void)
+{
+ unsigned short pending, other_ints;
+
+ /*
+ * If we're the only interrupt running (ignoring IRQ15 which
+ * is for syscalls), lower our priority to IRQ14 so that
+ * softirqs run at that level. If there's another,
+ * lower-level interrupt, irq_exit will defer softirqs to
+ * that. If the interrupt pipeline is enabled, we are already
+ * running at IRQ14 priority, so we don't need this code.
+ */
+ CSYNC();
+ pending = bfin_read_IPEND() & ~0x8000;
+ other_ints = pending & (pending - 1);
+ if (other_ints == 0)
+ lower_to_irq14();
+}
+#else
+static inline void maybe_lower_to_irq14(void) { }
+#endif
+
+/*
+ * do_IRQ handles all hardware IRQs. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ check_stack_overflow(irq);
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ handle_bad_irq(irq, &bad_irq_desc);
+ else
+ generic_handle_irq(irq);
+
+ maybe_lower_to_irq14();
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+}
+
+void __init init_IRQ(void)
+{
+ init_arch_irq();
+
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ /* Now that evt_ivhw is set up, turn this on */
+ trace_buff_offset = 0;
+ bfin_write_TBUFCTL(BFIN_TRACE_ON);
+ printk(KERN_INFO "Hardware Trace expanded to %ik\n",
+ 1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN);
+#endif
+}
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
new file mode 100644
index 00000000..9b80b152
--- /dev/null
+++ b/arch/blackfin/kernel/kgdb.c
@@ -0,0 +1,480 @@
+/*
+ * arch/blackfin/kernel/kgdb.c - Blackfin kgdb pieces
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ptrace.h> /* for linux pt_regs struct */
+#include <linux/kgdb.h>
+#include <linux/uaccess.h>
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ gdb_regs[BFIN_R0] = regs->r0;
+ gdb_regs[BFIN_R1] = regs->r1;
+ gdb_regs[BFIN_R2] = regs->r2;
+ gdb_regs[BFIN_R3] = regs->r3;
+ gdb_regs[BFIN_R4] = regs->r4;
+ gdb_regs[BFIN_R5] = regs->r5;
+ gdb_regs[BFIN_R6] = regs->r6;
+ gdb_regs[BFIN_R7] = regs->r7;
+ gdb_regs[BFIN_P0] = regs->p0;
+ gdb_regs[BFIN_P1] = regs->p1;
+ gdb_regs[BFIN_P2] = regs->p2;
+ gdb_regs[BFIN_P3] = regs->p3;
+ gdb_regs[BFIN_P4] = regs->p4;
+ gdb_regs[BFIN_P5] = regs->p5;
+ gdb_regs[BFIN_SP] = regs->reserved;
+ gdb_regs[BFIN_FP] = regs->fp;
+ gdb_regs[BFIN_I0] = regs->i0;
+ gdb_regs[BFIN_I1] = regs->i1;
+ gdb_regs[BFIN_I2] = regs->i2;
+ gdb_regs[BFIN_I3] = regs->i3;
+ gdb_regs[BFIN_M0] = regs->m0;
+ gdb_regs[BFIN_M1] = regs->m1;
+ gdb_regs[BFIN_M2] = regs->m2;
+ gdb_regs[BFIN_M3] = regs->m3;
+ gdb_regs[BFIN_B0] = regs->b0;
+ gdb_regs[BFIN_B1] = regs->b1;
+ gdb_regs[BFIN_B2] = regs->b2;
+ gdb_regs[BFIN_B3] = regs->b3;
+ gdb_regs[BFIN_L0] = regs->l0;
+ gdb_regs[BFIN_L1] = regs->l1;
+ gdb_regs[BFIN_L2] = regs->l2;
+ gdb_regs[BFIN_L3] = regs->l3;
+ gdb_regs[BFIN_A0_DOT_X] = regs->a0x;
+ gdb_regs[BFIN_A0_DOT_W] = regs->a0w;
+ gdb_regs[BFIN_A1_DOT_X] = regs->a1x;
+ gdb_regs[BFIN_A1_DOT_W] = regs->a1w;
+ gdb_regs[BFIN_ASTAT] = regs->astat;
+ gdb_regs[BFIN_RETS] = regs->rets;
+ gdb_regs[BFIN_LC0] = regs->lc0;
+ gdb_regs[BFIN_LT0] = regs->lt0;
+ gdb_regs[BFIN_LB0] = regs->lb0;
+ gdb_regs[BFIN_LC1] = regs->lc1;
+ gdb_regs[BFIN_LT1] = regs->lt1;
+ gdb_regs[BFIN_LB1] = regs->lb1;
+ gdb_regs[BFIN_CYCLES] = 0;
+ gdb_regs[BFIN_CYCLES2] = 0;
+ gdb_regs[BFIN_USP] = regs->usp;
+ gdb_regs[BFIN_SEQSTAT] = regs->seqstat;
+ gdb_regs[BFIN_SYSCFG] = regs->syscfg;
+ gdb_regs[BFIN_RETI] = regs->pc;
+ gdb_regs[BFIN_RETX] = regs->retx;
+ gdb_regs[BFIN_RETN] = regs->retn;
+ gdb_regs[BFIN_RETE] = regs->rete;
+ gdb_regs[BFIN_PC] = regs->pc;
+ gdb_regs[BFIN_CC] = (regs->astat >> 5) & 1;
+ gdb_regs[BFIN_EXTRA1] = 0;
+ gdb_regs[BFIN_EXTRA2] = 0;
+ gdb_regs[BFIN_EXTRA3] = 0;
+ gdb_regs[BFIN_IPEND] = regs->ipend;
+}
+
+/*
+ * Extracts ebp, esp and eip values understandable by gdb from the values
+ * saved by switch_to.
+ * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp
+ * prior to entering switch_to is 8 greater than the value that is saved.
+ * If switch_to changes, change following code appropriately.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ gdb_regs[BFIN_SP] = p->thread.ksp;
+ gdb_regs[BFIN_PC] = p->thread.pc;
+ gdb_regs[BFIN_SEQSTAT] = p->thread.seqstat;
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ regs->r0 = gdb_regs[BFIN_R0];
+ regs->r1 = gdb_regs[BFIN_R1];
+ regs->r2 = gdb_regs[BFIN_R2];
+ regs->r3 = gdb_regs[BFIN_R3];
+ regs->r4 = gdb_regs[BFIN_R4];
+ regs->r5 = gdb_regs[BFIN_R5];
+ regs->r6 = gdb_regs[BFIN_R6];
+ regs->r7 = gdb_regs[BFIN_R7];
+ regs->p0 = gdb_regs[BFIN_P0];
+ regs->p1 = gdb_regs[BFIN_P1];
+ regs->p2 = gdb_regs[BFIN_P2];
+ regs->p3 = gdb_regs[BFIN_P3];
+ regs->p4 = gdb_regs[BFIN_P4];
+ regs->p5 = gdb_regs[BFIN_P5];
+ regs->fp = gdb_regs[BFIN_FP];
+ regs->i0 = gdb_regs[BFIN_I0];
+ regs->i1 = gdb_regs[BFIN_I1];
+ regs->i2 = gdb_regs[BFIN_I2];
+ regs->i3 = gdb_regs[BFIN_I3];
+ regs->m0 = gdb_regs[BFIN_M0];
+ regs->m1 = gdb_regs[BFIN_M1];
+ regs->m2 = gdb_regs[BFIN_M2];
+ regs->m3 = gdb_regs[BFIN_M3];
+ regs->b0 = gdb_regs[BFIN_B0];
+ regs->b1 = gdb_regs[BFIN_B1];
+ regs->b2 = gdb_regs[BFIN_B2];
+ regs->b3 = gdb_regs[BFIN_B3];
+ regs->l0 = gdb_regs[BFIN_L0];
+ regs->l1 = gdb_regs[BFIN_L1];
+ regs->l2 = gdb_regs[BFIN_L2];
+ regs->l3 = gdb_regs[BFIN_L3];
+ regs->a0x = gdb_regs[BFIN_A0_DOT_X];
+ regs->a0w = gdb_regs[BFIN_A0_DOT_W];
+ regs->a1x = gdb_regs[BFIN_A1_DOT_X];
+ regs->a1w = gdb_regs[BFIN_A1_DOT_W];
+ regs->rets = gdb_regs[BFIN_RETS];
+ regs->lc0 = gdb_regs[BFIN_LC0];
+ regs->lt0 = gdb_regs[BFIN_LT0];
+ regs->lb0 = gdb_regs[BFIN_LB0];
+ regs->lc1 = gdb_regs[BFIN_LC1];
+ regs->lt1 = gdb_regs[BFIN_LT1];
+ regs->lb1 = gdb_regs[BFIN_LB1];
+ regs->usp = gdb_regs[BFIN_USP];
+ regs->syscfg = gdb_regs[BFIN_SYSCFG];
+ regs->retx = gdb_regs[BFIN_RETX];
+ regs->retn = gdb_regs[BFIN_RETN];
+ regs->rete = gdb_regs[BFIN_RETE];
+ regs->pc = gdb_regs[BFIN_PC];
+
+#if 0 /* can't change these */
+ regs->astat = gdb_regs[BFIN_ASTAT];
+ regs->seqstat = gdb_regs[BFIN_SEQSTAT];
+ regs->ipend = gdb_regs[BFIN_IPEND];
+#endif
+}
+
+static struct hw_breakpoint {
+ unsigned int occupied:1;
+ unsigned int skip:1;
+ unsigned int enabled:1;
+ unsigned int type:1;
+ unsigned int dataacc:2;
+ unsigned short count;
+ unsigned int addr;
+} breakinfo[HW_WATCHPOINT_NUM];
+
+static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
+{
+ int breakno;
+ int bfin_type;
+ int dataacc = 0;
+
+ switch (type) {
+ case BP_HARDWARE_BREAKPOINT:
+ bfin_type = TYPE_INST_WATCHPOINT;
+ break;
+ case BP_WRITE_WATCHPOINT:
+ dataacc = 1;
+ bfin_type = TYPE_DATA_WATCHPOINT;
+ break;
+ case BP_READ_WATCHPOINT:
+ dataacc = 2;
+ bfin_type = TYPE_DATA_WATCHPOINT;
+ break;
+ case BP_ACCESS_WATCHPOINT:
+ dataacc = 3;
+ bfin_type = TYPE_DATA_WATCHPOINT;
+ break;
+ default:
+ return -ENOSPC;
+ }
+
+ /* Because hardware data watchpoint impelemented in current
+ * Blackfin can not trigger an exception event as the hardware
+ * instrction watchpoint does, we ignaore all data watch point here.
+ * They can be turned on easily after future blackfin design
+ * supports this feature.
+ */
+ for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
+ if (bfin_type == breakinfo[breakno].type
+ && !breakinfo[breakno].occupied) {
+ breakinfo[breakno].occupied = 1;
+ breakinfo[breakno].skip = 0;
+ breakinfo[breakno].enabled = 1;
+ breakinfo[breakno].addr = addr;
+ breakinfo[breakno].dataacc = dataacc;
+ breakinfo[breakno].count = 0;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
+{
+ int breakno;
+ int bfin_type;
+
+ switch (type) {
+ case BP_HARDWARE_BREAKPOINT:
+ bfin_type = TYPE_INST_WATCHPOINT;
+ break;
+ case BP_WRITE_WATCHPOINT:
+ case BP_READ_WATCHPOINT:
+ case BP_ACCESS_WATCHPOINT:
+ bfin_type = TYPE_DATA_WATCHPOINT;
+ break;
+ default:
+ return 0;
+ }
+ for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
+ if (bfin_type == breakinfo[breakno].type
+ && breakinfo[breakno].occupied
+ && breakinfo[breakno].addr == addr) {
+ breakinfo[breakno].occupied = 0;
+ breakinfo[breakno].enabled = 0;
+ }
+
+ return 0;
+}
+
+static void bfin_remove_all_hw_break(void)
+{
+ int breakno;
+
+ memset(breakinfo, 0, sizeof(struct hw_breakpoint)*HW_WATCHPOINT_NUM);
+
+ for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
+ breakinfo[breakno].type = TYPE_INST_WATCHPOINT;
+ for (; breakno < HW_WATCHPOINT_NUM; breakno++)
+ breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
+}
+
+static void bfin_correct_hw_break(void)
+{
+ int breakno;
+ unsigned int wpiactl = 0;
+ unsigned int wpdactl = 0;
+ int enable_wp = 0;
+
+ for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
+ if (breakinfo[breakno].enabled) {
+ enable_wp = 1;
+
+ switch (breakno) {
+ case 0:
+ wpiactl |= WPIAEN0|WPICNTEN0;
+ bfin_write_WPIA0(breakinfo[breakno].addr);
+ bfin_write_WPIACNT0(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 1:
+ wpiactl |= WPIAEN1|WPICNTEN1;
+ bfin_write_WPIA1(breakinfo[breakno].addr);
+ bfin_write_WPIACNT1(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 2:
+ wpiactl |= WPIAEN2|WPICNTEN2;
+ bfin_write_WPIA2(breakinfo[breakno].addr);
+ bfin_write_WPIACNT2(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 3:
+ wpiactl |= WPIAEN3|WPICNTEN3;
+ bfin_write_WPIA3(breakinfo[breakno].addr);
+ bfin_write_WPIACNT3(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 4:
+ wpiactl |= WPIAEN4|WPICNTEN4;
+ bfin_write_WPIA4(breakinfo[breakno].addr);
+ bfin_write_WPIACNT4(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 5:
+ wpiactl |= WPIAEN5|WPICNTEN5;
+ bfin_write_WPIA5(breakinfo[breakno].addr);
+ bfin_write_WPIACNT5(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 6:
+ wpdactl |= WPDAEN0|WPDCNTEN0|WPDSRC0;
+ wpdactl |= breakinfo[breakno].dataacc
+ << WPDACC0_OFFSET;
+ bfin_write_WPDA0(breakinfo[breakno].addr);
+ bfin_write_WPDACNT0(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ case 7:
+ wpdactl |= WPDAEN1|WPDCNTEN1|WPDSRC1;
+ wpdactl |= breakinfo[breakno].dataacc
+ << WPDACC1_OFFSET;
+ bfin_write_WPDA1(breakinfo[breakno].addr);
+ bfin_write_WPDACNT1(breakinfo[breakno].count
+ + breakinfo->skip);
+ break;
+ }
+ }
+
+ /* Should enable WPPWR bit first before set any other
+ * WPIACTL and WPDACTL bits */
+ if (enable_wp) {
+ bfin_write_WPIACTL(WPPWR);
+ CSYNC();
+ bfin_write_WPIACTL(wpiactl|WPPWR);
+ bfin_write_WPDACTL(wpdactl);
+ CSYNC();
+ }
+}
+
+static void bfin_disable_hw_debug(struct pt_regs *regs)
+{
+ /* Disable hardware debugging while we are in kgdb */
+ bfin_write_WPIACTL(0);
+ bfin_write_WPDACTL(0);
+ CSYNC();
+}
+
+#ifdef CONFIG_SMP
+void kgdb_passive_cpu_callback(void *info)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
+}
+
+void kgdb_roundup_cpu(int cpu, unsigned long flags)
+{
+ smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
+}
+#endif
+
+#ifdef CONFIG_IPIPE
+static unsigned long kgdb_arch_imask;
+#endif
+
+void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+ if (kgdb_single_step)
+ preempt_enable();
+
+#ifdef CONFIG_IPIPE
+ if (kgdb_arch_imask) {
+ cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
+ kgdb_arch_imask = 0;
+ }
+#endif
+}
+
+int kgdb_arch_handle_exception(int vector, int signo,
+ int err_code, char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ long addr;
+ char *ptr;
+ int newPC;
+ int i;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ case 's':
+ if (kgdb_contthread && kgdb_contthread != current) {
+ strcpy(remcom_out_buffer, "E00");
+ break;
+ }
+
+ kgdb_contthread = NULL;
+
+ /* try to read optional parameter, pc unchanged if no parm */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr)) {
+ regs->retx = addr;
+ }
+ newPC = regs->retx;
+
+ /* clear the trace bit */
+ regs->syscfg &= 0xfffffffe;
+
+ /* set the trace bit if we're stepping */
+ if (remcom_in_buffer[0] == 's') {
+ regs->syscfg |= 0x1;
+ kgdb_single_step = regs->ipend;
+ kgdb_single_step >>= 6;
+ for (i = 10; i > 0; i--, kgdb_single_step >>= 1)
+ if (kgdb_single_step & 1)
+ break;
+ /* i indicate event priority of current stopped instruction
+ * user space instruction is 0, IVG15 is 1, IVTMR is 10.
+ * kgdb_single_step > 0 means in single step mode
+ */
+ kgdb_single_step = i + 1;
+
+ preempt_disable();
+#ifdef CONFIG_IPIPE
+ kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
+ cpu_pda[raw_smp_processor_id()].ex_imask = 0;
+#endif
+ }
+
+ bfin_correct_hw_break();
+
+ return 0;
+ } /* switch */
+ return -1; /* this means that we do not want to exit from the handler */
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0xa1},
+ .flags = KGDB_HW_BREAKPOINT,
+ .set_hw_breakpoint = bfin_set_hw_break,
+ .remove_hw_breakpoint = bfin_remove_hw_break,
+ .disable_hw_break = bfin_disable_hw_debug,
+ .remove_all_hw_break = bfin_remove_all_hw_break,
+ .correct_hw_break = bfin_correct_hw_break,
+};
+
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+ unsigned long __addr = (unsigned long)(addr); \
+ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
+
+int kgdb_validate_break_address(unsigned long addr)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end)
+ return 0;
+ if (IN_MEM(addr, BREAK_INSTR_SIZE, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
+ return 0;
+ if (cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
+ return 0;
+#ifdef CONFIG_SMP
+ else if (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return 0;
+#endif
+ if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH))
+ return 0;
+
+ return -EFAULT;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->retx = ip;
+}
+
+int kgdb_arch_init(void)
+{
+ kgdb_single_step = 0;
+#ifdef CONFIG_IPIPE
+ kgdb_arch_imask = 0;
+#endif
+
+ bfin_remove_all_hw_break();
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+}
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
new file mode 100644
index 00000000..18ab004a
--- /dev/null
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -0,0 +1,114 @@
+/*
+ * arch/blackfin/kernel/kgdb_test.c - Blackfin kgdb tests
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+
+#include <asm/current.h>
+#include <asm/uaccess.h>
+
+#include <asm/blackfin.h>
+
+/* Symbols are here for kgdb test to poke directly */
+static char cmdline[256];
+static size_t len;
+
+#ifndef CONFIG_SMP
+static int num1 __attribute__((l1_data));
+
+void kgdb_l1_test(void) __attribute__((l1_text));
+
+void kgdb_l1_test(void)
+{
+ pr_alert("L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
+ pr_alert("L1 : code function addr = 0x%p\n", kgdb_l1_test);
+ num1 = num1 + 10;
+ pr_alert("L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
+}
+#endif
+
+#if L2_LENGTH
+
+static int num2 __attribute__((l2));
+void kgdb_l2_test(void) __attribute__((l2));
+
+void kgdb_l2_test(void)
+{
+ pr_alert("L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
+ pr_alert("L2 : code function addr = 0x%p\n", kgdb_l2_test);
+ num2 = num2 + 20;
+ pr_alert("L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
+}
+
+#endif
+
+noinline int kgdb_test(char *name, int len, int count, int z)
+{
+ pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z);
+ count = z;
+ return count;
+}
+
+static ssize_t
+kgdb_test_proc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ kgdb_test("hello world!", 12, 0x55, 0x10);
+#ifndef CONFIG_SMP
+ kgdb_l1_test();
+#endif
+#if L2_LENGTH
+ kgdb_l2_test();
+#endif
+
+ return 0;
+}
+
+static ssize_t
+kgdb_test_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ len = min_t(size_t, 255, count);
+ memcpy(cmdline, buffer, count);
+ cmdline[len] = 0;
+
+ return len;
+}
+
+static const struct file_operations kgdb_test_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = kgdb_test_proc_read,
+ .write = kgdb_test_proc_write,
+ .llseek = noop_llseek,
+};
+
+static int __init kgdbtest_init(void)
+{
+ struct proc_dir_entry *entry;
+
+#if L2_LENGTH
+ num2 = 0;
+#endif
+
+ entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
+ if (entry == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit kgdbtest_exit(void)
+{
+ remove_proc_entry("kgdbtest", NULL);
+}
+
+module_init(kgdbtest_init);
+module_exit(kgdbtest_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
new file mode 100644
index 00000000..4489efc5
--- /dev/null
+++ b/arch/blackfin/kernel/module.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#define pr_fmt(fmt) "module %s: " fmt, mod->name
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/dma.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+/* Transfer the section to the L1 memory */
+int
+module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ /*
+ * XXX: sechdrs are vmalloced in kernel/module.c
+ * and would be vfreed just after module is loaded,
+ * so we hack to keep the only information we needed
+ * in mod->arch to correctly free L1 I/D sram later.
+ * NOTE: this breaks the semantic of mod->arch structure.
+ */
+ Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+ void *dest;
+
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ const char *shname = secstrings + s->sh_name;
+
+ if (s->sh_size == 0)
+ continue;
+
+ if (!strcmp(".l1.text", shname) ||
+ (!strcmp(".text", shname) &&
+ (hdr->e_flags & EF_BFIN_CODE_IN_L1))) {
+
+ dest = l1_inst_sram_alloc(s->sh_size);
+ mod->arch.text_l1 = dest;
+ if (dest == NULL) {
+ pr_err("L1 inst memory allocation failed\n");
+ return -1;
+ }
+ dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
+
+ } else if (!strcmp(".l1.data", shname) ||
+ (!strcmp(".data", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L1))) {
+
+ dest = l1_data_sram_alloc(s->sh_size);
+ mod->arch.data_a_l1 = dest;
+ if (dest == NULL) {
+ pr_err("L1 data memory allocation failed\n");
+ return -1;
+ }
+ memcpy(dest, (void *)s->sh_addr, s->sh_size);
+
+ } else if (!strcmp(".l1.bss", shname) ||
+ (!strcmp(".bss", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L1))) {
+
+ dest = l1_data_sram_zalloc(s->sh_size);
+ mod->arch.bss_a_l1 = dest;
+ if (dest == NULL) {
+ pr_err("L1 data memory allocation failed\n");
+ return -1;
+ }
+
+ } else if (!strcmp(".l1.data.B", shname)) {
+
+ dest = l1_data_B_sram_alloc(s->sh_size);
+ mod->arch.data_b_l1 = dest;
+ if (dest == NULL) {
+ pr_err("L1 data memory allocation failed\n");
+ return -1;
+ }
+ memcpy(dest, (void *)s->sh_addr, s->sh_size);
+
+ } else if (!strcmp(".l1.bss.B", shname)) {
+
+ dest = l1_data_B_sram_alloc(s->sh_size);
+ mod->arch.bss_b_l1 = dest;
+ if (dest == NULL) {
+ pr_err("L1 data memory allocation failed\n");
+ return -1;
+ }
+ memset(dest, 0, s->sh_size);
+
+ } else if (!strcmp(".l2.text", shname) ||
+ (!strcmp(".text", shname) &&
+ (hdr->e_flags & EF_BFIN_CODE_IN_L2))) {
+
+ dest = l2_sram_alloc(s->sh_size);
+ mod->arch.text_l2 = dest;
+ if (dest == NULL) {
+ pr_err("L2 SRAM allocation failed\n");
+ return -1;
+ }
+ memcpy(dest, (void *)s->sh_addr, s->sh_size);
+
+ } else if (!strcmp(".l2.data", shname) ||
+ (!strcmp(".data", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L2))) {
+
+ dest = l2_sram_alloc(s->sh_size);
+ mod->arch.data_l2 = dest;
+ if (dest == NULL) {
+ pr_err("L2 SRAM allocation failed\n");
+ return -1;
+ }
+ memcpy(dest, (void *)s->sh_addr, s->sh_size);
+
+ } else if (!strcmp(".l2.bss", shname) ||
+ (!strcmp(".bss", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L2))) {
+
+ dest = l2_sram_zalloc(s->sh_size);
+ mod->arch.bss_l2 = dest;
+ if (dest == NULL) {
+ pr_err("L2 SRAM allocation failed\n");
+ return -1;
+ }
+
+ } else
+ continue;
+
+ s->sh_flags &= ~SHF_ALLOC;
+ s->sh_addr = (unsigned long)dest;
+ }
+
+ return 0;
+}
+
+/*************************************************************************/
+/* FUNCTION : apply_relocate_add */
+/* ABSTRACT : Blackfin specific relocation handling for the loadable */
+/* modules. Modules are expected to be .o files. */
+/* Arithmetic relocations are handled. */
+/* We do not expect LSETUP to be split and hence is not */
+/* handled. */
+/* R_BFIN_BYTE and R_BFIN_BYTE2 are also not handled as the */
+/* gas does not generate it. */
+/*************************************************************************/
+int
+apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ unsigned long location, value, size;
+
+ pr_debug("applying relocate section %u to %u\n",
+ relsec, sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = sechdrs[sechdrs[relsec].sh_info].sh_addr +
+ rel[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *) sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ value = sym->st_value;
+ value += rel[i].r_addend;
+
+#ifdef CONFIG_SMP
+ if (location >= COREB_L1_DATA_A_START) {
+ pr_err("cannot relocate in L1: %u (SMP kernel)\n",
+ ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+#endif
+
+ pr_debug("location is %lx, value is %lx type is %d\n",
+ location, value, ELF32_R_TYPE(rel[i].r_info));
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+
+ case R_BFIN_HUIMM16:
+ value >>= 16;
+ case R_BFIN_LUIMM16:
+ case R_BFIN_RIMM16:
+ size = 2;
+ break;
+ case R_BFIN_BYTE4_DATA:
+ size = 4;
+ break;
+
+ case R_BFIN_PCREL24:
+ case R_BFIN_PCREL24_JUMP_L:
+ case R_BFIN_PCREL12_JUMP:
+ case R_BFIN_PCREL12_JUMP_S:
+ case R_BFIN_PCREL10:
+ pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
+ ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+
+ default:
+ pr_err("unknown relocation: %u\n",
+ ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+
+ switch (bfin_mem_access_type(location, size)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ memcpy((void *)location, &value, size);
+ break;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy((void *)location, &value, size);
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy((void *)location, &value, size);
+ break;
+ default:
+ pr_err("invalid relocation for %#lx\n", location);
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+int
+module_finalize(const Elf_Ehdr * hdr,
+ const Elf_Shdr * sechdrs, struct module *mod)
+{
+ unsigned int i, strindex = 0, symindex = 0;
+ char *secstrings;
+ long err = 0;
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ }
+ }
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+ const char *shname = secstrings + sechdrs[i].sh_name;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Only support RELA relocation types */
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ if (!strcmp(".rela.l2.text", shname) ||
+ !strcmp(".rela.l1.text", shname) ||
+ (!strcmp(".rela.text", shname) &&
+ (hdr->e_flags & (EF_BFIN_CODE_IN_L1 | EF_BFIN_CODE_IN_L2)))) {
+
+ err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
+ symindex, i, mod);
+ if (err < 0)
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ l1_inst_sram_free(mod->arch.text_l1);
+ l1_data_A_sram_free(mod->arch.data_a_l1);
+ l1_data_A_sram_free(mod->arch.bss_a_l1);
+ l1_data_B_sram_free(mod->arch.data_b_l1);
+ l1_data_B_sram_free(mod->arch.bss_b_l1);
+ l2_sram_free(mod->arch.text_l2);
+ l2_sram_free(mod->arch.data_l2);
+ l2_sram_free(mod->arch.bss_l2);
+}
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
new file mode 100644
index 00000000..9919d292
--- /dev/null
+++ b/arch/blackfin/kernel/nmi.c
@@ -0,0 +1,287 @@
+/*
+ * Blackfin nmi_watchdog Driver
+ *
+ * Originally based on bfin_wdt.c
+ * Copyright 2010-2010 Analog Devices Inc.
+ * Graff Yang <graf.yang@analog.com>
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hardirq.h>
+#include <linux/syscore_ops.h>
+#include <linux/pm.h>
+#include <linux/nmi.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <asm/blackfin.h>
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/bfin_watchdog.h>
+
+#define DRV_NAME "nmi-wdt"
+
+#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
+#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
+static int nmi_wdt_cpu = 1;
+
+static unsigned int timeout = NMI_WDT_TIMEOUT;
+static int nmi_active;
+
+static unsigned short wdoga_ctl;
+static unsigned int wdoga_cnt;
+static struct corelock_slot saved_corelock;
+static atomic_t nmi_touched[NR_CPUS];
+static struct timer_list ntimer;
+
+enum {
+ COREA_ENTER_NMI = 0,
+ COREA_EXIT_NMI,
+ COREB_EXIT_NMI,
+
+ NMI_EVENT_NR,
+};
+static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
+
+/* we are in nmi, non-atomic bit ops is safe */
+static inline void set_nmi_event(int event)
+{
+ __set_bit(event, &nmi_event);
+}
+
+static inline void wait_nmi_event(int event)
+{
+ while (!test_bit(event, &nmi_event))
+ barrier();
+ __clear_bit(event, &nmi_event);
+}
+
+static inline void send_corea_nmi(void)
+{
+ wdoga_ctl = bfin_read_WDOGA_CTL();
+ wdoga_cnt = bfin_read_WDOGA_CNT();
+
+ bfin_write_WDOGA_CTL(WDEN_DISABLE);
+ bfin_write_WDOGA_CNT(0);
+ bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
+}
+
+static inline void restore_corea_nmi(void)
+{
+ bfin_write_WDOGA_CTL(WDEN_DISABLE);
+ bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
+
+ bfin_write_WDOGA_CNT(wdoga_cnt);
+ bfin_write_WDOGA_CTL(wdoga_ctl);
+}
+
+static inline void save_corelock(void)
+{
+ saved_corelock = corelock;
+ corelock.lock = 0;
+}
+
+static inline void restore_corelock(void)
+{
+ corelock = saved_corelock;
+}
+
+
+static inline void nmi_wdt_keepalive(void)
+{
+ bfin_write_WDOGB_STAT(0);
+}
+
+static inline void nmi_wdt_stop(void)
+{
+ bfin_write_WDOGB_CTL(WDEN_DISABLE);
+}
+
+/* before calling this function, you must stop the WDT */
+static inline void nmi_wdt_clear(void)
+{
+ /* clear TRO bit, disable event generation */
+ bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
+}
+
+static inline void nmi_wdt_start(void)
+{
+ bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
+}
+
+static inline int nmi_wdt_running(void)
+{
+ return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
+}
+
+static inline int nmi_wdt_set_timeout(unsigned long t)
+{
+ u32 cnt, max_t, sclk;
+ int run;
+
+ sclk = get_sclk();
+ max_t = -1 / sclk;
+ cnt = t * sclk;
+ if (t > max_t) {
+ pr_warning("NMI: timeout value is too large\n");
+ return -EINVAL;
+ }
+
+ run = nmi_wdt_running();
+ nmi_wdt_stop();
+ bfin_write_WDOGB_CNT(cnt);
+ if (run)
+ nmi_wdt_start();
+
+ timeout = t;
+
+ return 0;
+}
+
+int check_nmi_wdt_touched(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
+ cpumask_t mask;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ if (!atomic_read(&nmi_touched[this_cpu]))
+ return 0;
+
+ atomic_set(&nmi_touched[this_cpu], 0);
+
+ cpumask_clear_cpu(this_cpu, &mask);
+ for_each_cpu(cpu, &mask) {
+ invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
+ (unsigned long)(&nmi_touched[cpu]));
+ if (!atomic_read(&nmi_touched[cpu]))
+ return 0;
+ atomic_set(&nmi_touched[cpu], 0);
+ }
+
+ return 1;
+}
+
+static void nmi_wdt_timer(unsigned long data)
+{
+ if (check_nmi_wdt_touched())
+ nmi_wdt_keepalive();
+
+ mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
+}
+
+static int __init init_nmi_wdt(void)
+{
+ nmi_wdt_set_timeout(timeout);
+ nmi_wdt_start();
+ nmi_active = true;
+
+ init_timer(&ntimer);
+ ntimer.function = nmi_wdt_timer;
+ ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
+ add_timer(&ntimer);
+
+ pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
+ return 0;
+}
+device_initcall(init_nmi_wdt);
+
+void touch_nmi_watchdog(void)
+{
+ atomic_set(&nmi_touched[smp_processor_id()], 1);
+}
+
+/* Suspend/resume support */
+#ifdef CONFIG_PM
+static int nmi_wdt_suspend(void)
+{
+ nmi_wdt_stop();
+ return 0;
+}
+
+static void nmi_wdt_resume(void)
+{
+ if (nmi_active)
+ nmi_wdt_start();
+}
+
+static struct syscore_ops nmi_syscore_ops = {
+ .resume = nmi_wdt_resume,
+ .suspend = nmi_wdt_suspend,
+};
+
+static int __init init_nmi_wdt_syscore(void)
+{
+ if (nmi_active)
+ register_syscore_ops(&nmi_syscore_ops);
+
+ return 0;
+}
+late_initcall(init_nmi_wdt_syscore);
+
+#endif /* CONFIG_PM */
+
+
+asmlinkage notrace void do_nmi(struct pt_regs *fp)
+{
+ unsigned int cpu = smp_processor_id();
+ nmi_enter();
+
+ cpu_pda[cpu].__nmi_count += 1;
+
+ if (cpu == nmi_wdt_cpu) {
+ /* CoreB goes here first */
+
+ /* reload the WDOG_STAT */
+ nmi_wdt_keepalive();
+
+ /* clear nmi interrupt for CoreB */
+ nmi_wdt_stop();
+ nmi_wdt_clear();
+
+ /* trigger NMI interrupt of CoreA */
+ send_corea_nmi();
+
+ /* waiting CoreB to enter NMI */
+ wait_nmi_event(COREA_ENTER_NMI);
+
+ /* recover WDOGA's settings */
+ restore_corea_nmi();
+
+ save_corelock();
+
+ /* corelock is save/cleared, CoreA is dummping messages */
+
+ wait_nmi_event(COREA_EXIT_NMI);
+ } else {
+ /* OK, CoreA entered NMI */
+ set_nmi_event(COREA_ENTER_NMI);
+ }
+
+ pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ dump_bfin_trace_buffer();
+ show_stack(current, (unsigned long *)fp);
+
+ if (cpu == nmi_wdt_cpu) {
+ pr_emerg("This fault is not recoverable, sorry!\n");
+
+ /* CoreA dump finished, restore the corelock */
+ restore_corelock();
+
+ set_nmi_event(COREB_EXIT_NMI);
+ } else {
+ /* CoreB dump finished, notice the CoreA we are done */
+ set_nmi_event(COREA_EXIT_NMI);
+
+ /* synchronize with CoreA */
+ wait_nmi_event(COREB_EXIT_NMI);
+ }
+
+ nmi_exit();
+}
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
new file mode 100644
index 00000000..e47d19ae
--- /dev/null
+++ b/arch/blackfin/kernel/perf_event.c
@@ -0,0 +1,499 @@
+/*
+ * Blackfin performance counters
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Ripped from SuperH version:
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Heavily based on the x86 and PowerPC implementations.
+ *
+ * x86:
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ *
+ * ppc:
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <asm/bfin_pfmon.h>
+
+/*
+ * We have two counters, and each counter can support an event type.
+ * The 'o' is PFCNTx=1 and 's' is PFCNTx=0
+ *
+ * 0x04 o pc invariant branches
+ * 0x06 o mispredicted branches
+ * 0x09 o predicted branches taken
+ * 0x0B o EXCPT insn
+ * 0x0C o CSYNC/SSYNC insn
+ * 0x0D o Insns committed
+ * 0x0E o Interrupts taken
+ * 0x0F o Misaligned address exceptions
+ * 0x80 o Code memory fetches stalled due to DMA
+ * 0x83 o 64bit insn fetches delivered
+ * 0x9A o data cache fills (bank a)
+ * 0x9B o data cache fills (bank b)
+ * 0x9C o data cache lines evicted (bank a)
+ * 0x9D o data cache lines evicted (bank b)
+ * 0x9E o data cache high priority fills
+ * 0x9F o data cache low priority fills
+ * 0x00 s loop 0 iterations
+ * 0x01 s loop 1 iterations
+ * 0x0A s CSYNC/SSYNC stalls
+ * 0x10 s DAG read/after write hazards
+ * 0x13 s RAW data hazards
+ * 0x81 s code TAG stalls
+ * 0x82 s code fill stalls
+ * 0x90 s processor to memory stalls
+ * 0x91 s data memory stalls not hidden by 0x90
+ * 0x92 s data store buffer full stalls
+ * 0x93 s data memory write buffer full stalls due to high->low priority
+ * 0x95 s data memory fill buffer stalls
+ * 0x96 s data TAG collision stalls
+ * 0x97 s data collision stalls
+ * 0x98 s data stalls
+ * 0x99 s data stalls sent to processor
+ */
+
+static const int event_map[] = {
+ /* use CYCLES cpu register */
+ [PERF_COUNT_HW_CPU_CYCLES] = -1,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x83,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [C(L1D)] = { /* Data bank A */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS) ] = 0x9A,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS) ] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS) ] = 0x83,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+const char *perf_pmu_name(void)
+{
+ return "bfin";
+}
+EXPORT_SYMBOL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ return ARRAY_SIZE(event_map);
+}
+EXPORT_SYMBOL(perf_num_counters);
+
+static u64 bfin_pfmon_read(int idx)
+{
+ return bfin_read32(PFCNTR0 + (idx * 4));
+}
+
+static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
+{
+ bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
+}
+
+static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
+{
+ u32 val, mask;
+
+ val = PFPWR;
+ if (idx) {
+ mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
+ /* The packed config is for event0, so shift it to event1 slots */
+ val |= (hwc->config << (PFMON1_P - PFMON0_P));
+ val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
+ bfin_write_PFCNTR1(0);
+ } else {
+ mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
+ val |= hwc->config;
+ bfin_write_PFCNTR0(0);
+ }
+
+ bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
+}
+
+static void bfin_pfmon_disable_all(void)
+{
+ bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
+}
+
+static void bfin_pfmon_enable_all(void)
+{
+ bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
+}
+
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+static int hw_perf_cache_event(int config, int *evp)
+{
+ unsigned long type, op, result;
+ int ev;
+
+ /* unpack config */
+ type = config & 0xff;
+ op = (config >> 8) & 0xff;
+ result = (config >> 16) & 0xff;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ev = cache_events[type][op][result];
+ if (ev == 0)
+ return -EOPNOTSUPP;
+ if (ev == -1)
+ return -EINVAL;
+ *evp = ev;
+ return 0;
+}
+
+static void bfin_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
+ int shift = 0;
+
+ /*
+ * Depending on the counter configuration, they may or may not
+ * be chained, in which case the previous counter value can be
+ * updated underneath us if the lower-half overflows.
+ *
+ * Our tactic to handle this is to first atomically read and
+ * exchange a new raw count - then add that new-prev delta
+ * count to the generic counter atomically.
+ *
+ * As there is no interrupt associated with the overflow events,
+ * this is the simplest approach for maintaining consistency.
+ */
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = bfin_pfmon_read(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (counter-)time and add that to the generic counter.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+static void bfin_pmu_stop(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ bfin_pfmon_disable(hwc, idx);
+ cpuc->events[idx] = NULL;
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+ bfin_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void bfin_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ cpuc->events[idx] = event;
+ event->hw.state = 0;
+ bfin_pfmon_enable(hwc, idx);
+}
+
+static void bfin_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ bfin_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int bfin_pmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ int ret = -EAGAIN;
+
+ perf_pmu_disable(event->pmu);
+
+ if (__test_and_set_bit(idx, cpuc->used_mask)) {
+ idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
+ if (idx == MAX_HWEVENTS)
+ goto out;
+
+ __set_bit(idx, cpuc->used_mask);
+ hwc->idx = idx;
+ }
+
+ bfin_pfmon_disable(hwc, idx);
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ bfin_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ ret = 0;
+out:
+ perf_pmu_enable(event->pmu);
+ return ret;
+}
+
+static void bfin_pmu_read(struct perf_event *event)
+{
+ bfin_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int bfin_pmu_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ int config = -1;
+ int ret;
+
+ if (attr->exclude_hv || attr->exclude_idle)
+ return -EPERM;
+
+ /*
+ * All of the on-chip counters are "limited", in that they have
+ * no interrupts, and are therefore unable to do sampling without
+ * further work and timer assistance.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ ret = 0;
+ switch (attr->type) {
+ case PERF_TYPE_RAW:
+ config = PFMON(0, attr->config & PFMON_MASK) |
+ PFCNT(0, !(attr->config & 0x100));
+ break;
+ case PERF_TYPE_HW_CACHE:
+ ret = hw_perf_cache_event(attr->config, &config);
+ break;
+ case PERF_TYPE_HARDWARE:
+ if (attr->config >= ARRAY_SIZE(event_map))
+ return -EINVAL;
+
+ config = event_map[attr->config];
+ break;
+ }
+
+ if (config == -1)
+ return -EINVAL;
+
+ if (!attr->exclude_kernel)
+ config |= PFCEN(0, PFCEN_ENABLE_SUPV);
+ if (!attr->exclude_user)
+ config |= PFCEN(0, PFCEN_ENABLE_USER);
+
+ hwc->config |= config;
+
+ return ret;
+}
+
+static void bfin_pmu_enable(struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int i;
+
+ for (i = 0; i < MAX_HWEVENTS; ++i) {
+ event = cpuc->events[i];
+ if (!event)
+ continue;
+ hwc = &event->hw;
+ bfin_pfmon_enable(hwc, hwc->idx);
+ }
+
+ bfin_pfmon_enable_all();
+}
+
+static void bfin_pmu_disable(struct pmu *pmu)
+{
+ bfin_pfmon_disable_all();
+}
+
+static struct pmu pmu = {
+ .pmu_enable = bfin_pmu_enable,
+ .pmu_disable = bfin_pmu_disable,
+ .event_init = bfin_pmu_event_init,
+ .add = bfin_pmu_add,
+ .del = bfin_pmu_del,
+ .start = bfin_pmu_start,
+ .stop = bfin_pmu_stop,
+ .read = bfin_pmu_read,
+};
+
+static void bfin_pmu_setup(int cpu)
+{
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+ memset(cpuhw, 0, sizeof(struct cpu_hw_events));
+}
+
+static int __cpuinit
+bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ bfin_write_PFCTL(0);
+ bfin_pmu_setup(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int __init bfin_pmu_init(void)
+{
+ int ret;
+
+ ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ if (!ret)
+ perf_cpu_notifier(bfin_pmu_notifier);
+
+ return ret;
+}
+early_initcall(bfin_pmu_init);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
new file mode 100644
index 00000000..c0f4fe28
--- /dev/null
+++ b/arch/blackfin/kernel/process.c
@@ -0,0 +1,513 @@
+/*
+ * Blackfin architecture-dependent process handling
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/user.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include <asm/blackfin.h>
+#include <asm/fixed_code.h>
+#include <asm/mem_map.h>
+#include <asm/irq.h>
+
+asmlinkage void ret_from_fork(void);
+
+/* Points to the SDRAM backup memory for the stack that is currently in
+ * L1 scratchpad memory.
+ */
+void *current_l1_stack_save;
+
+/* The number of tasks currently using a L1 stack area. The SRAM is
+ * allocated/deallocated whenever this changes from/to zero.
+ */
+int nr_l1stack_tasks;
+
+/* Start and length of the area in L1 scratchpad memory which we've allocated
+ * for process stacks.
+ */
+void *l1_stack_base;
+unsigned long l1_stack_len;
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void) = NULL;
+EXPORT_SYMBOL(pm_idle);
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+/*
+ * The idle loop on BFIN
+ */
+#ifdef CONFIG_IDLE_L1
+static void default_idle(void)__attribute__((l1_text));
+void cpu_idle(void)__attribute__((l1_text));
+#endif
+
+/*
+ * This is our default idle handler. We need to disable
+ * interrupts here to ensure we don't miss a wakeup call.
+ */
+static void default_idle(void)
+{
+#ifdef CONFIG_IPIPE
+ ipipe_suspend_domain();
+#endif
+ hard_local_irq_disable();
+ if (!need_resched())
+ idle_with_irq_disabled();
+
+ hard_local_irq_enable();
+}
+
+/*
+ * The idle thread. We try to conserve power, while trying to keep
+ * overall latency low. The architecture specific idle is passed
+ * a value to indicate the level of "idleness" of the system.
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ void (*idle)(void) = pm_idle;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
+#endif
+ if (!idle)
+ idle = default_idle;
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+ while (!need_resched())
+ idle();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+ schedule_preempt_disabled();
+ }
+}
+
+/*
+ * This gets run with P1 containing the
+ * function to call, and R1 containing
+ * the "args". Note P0 is clobbered on the way here.
+ */
+void kernel_thread_helper(void);
+__asm__(".section .text\n"
+ ".align 4\n"
+ "_kernel_thread_helper:\n\t"
+ "\tsp += -12;\n\t"
+ "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
+
+/*
+ * Create a kernel thread.
+ */
+pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.r1 = (unsigned long)arg;
+ regs.p1 = (unsigned long)fn;
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.orig_p0 = -1;
+ /* Set bit 2 to tell ret_from_fork we should be returning to kernel
+ mode. */
+ regs.ipend = 0x8002;
+ __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
+ NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * pass the data segment into user programs if it exists,
+ * it can't hurt anything as far as I can tell
+ */
+void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+{
+ regs->pc = new_ip;
+ if (current->mm)
+ regs->p5 = current->mm->start_data;
+#ifndef CONFIG_SMP
+ task_thread_info(current)->l1_task_info.stack_start =
+ (void *)current->mm->context.stack_start;
+ task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
+ memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
+ sizeof(*L1_SCRATCH_TASK_INFO));
+#endif
+ wrusp(new_sp);
+}
+EXPORT_SYMBOL_GPL(start_thread);
+
+void flush_thread(void)
+{
+}
+
+asmlinkage int bfin_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
+ NULL);
+}
+
+asmlinkage int bfin_clone(struct pt_regs *regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+
+#ifdef __ARCH_SYNC_CORE_DCACHE
+ if (current->rt.nr_cpus_allowed == num_possible_cpus())
+ set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
+#endif
+
+ /* syscall2 puts clone_flags in r0 and usp in r1 */
+ clone_flags = regs->r0;
+ newsp = regs->r1;
+ if (!newsp)
+ newsp = rdusp();
+ else
+ newsp -= 12;
+ return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
+}
+
+int
+copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long topstk,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+
+ childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
+ *childregs = *regs;
+ childregs->r0 = 0;
+
+ p->thread.usp = usp;
+ p->thread.ksp = (unsigned long)childregs;
+ p->thread.pc = (unsigned long)ret_from_fork;
+
+ return 0;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(const char __user *name,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp)
+{
+ int error;
+ char *filename;
+ struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return error;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)p;
+ fp = p->thread.usp;
+ do {
+ if (fp < stack_page + sizeof(struct thread_info) ||
+ fp >= 8184 + stack_page)
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *)fp;
+ }
+ while (count++ < 16);
+ return 0;
+}
+
+void finish_atomic_sections (struct pt_regs *regs)
+{
+ int __user *up0 = (int __user *)regs->p0;
+
+ switch (regs->pc) {
+ default:
+ /* not in middle of an atomic step, so resume like normal */
+ return;
+
+ case ATOMIC_XCHG32 + 2:
+ put_user(regs->r1, up0);
+ break;
+
+ case ATOMIC_CAS32 + 2:
+ case ATOMIC_CAS32 + 4:
+ if (regs->r0 == regs->r1)
+ case ATOMIC_CAS32 + 6:
+ put_user(regs->r2, up0);
+ break;
+
+ case ATOMIC_ADD32 + 2:
+ regs->r0 = regs->r1 + regs->r0;
+ /* fall through */
+ case ATOMIC_ADD32 + 4:
+ put_user(regs->r0, up0);
+ break;
+
+ case ATOMIC_SUB32 + 2:
+ regs->r0 = regs->r1 - regs->r0;
+ /* fall through */
+ case ATOMIC_SUB32 + 4:
+ put_user(regs->r0, up0);
+ break;
+
+ case ATOMIC_IOR32 + 2:
+ regs->r0 = regs->r1 | regs->r0;
+ /* fall through */
+ case ATOMIC_IOR32 + 4:
+ put_user(regs->r0, up0);
+ break;
+
+ case ATOMIC_AND32 + 2:
+ regs->r0 = regs->r1 & regs->r0;
+ /* fall through */
+ case ATOMIC_AND32 + 4:
+ put_user(regs->r0, up0);
+ break;
+
+ case ATOMIC_XOR32 + 2:
+ regs->r0 = regs->r1 ^ regs->r0;
+ /* fall through */
+ case ATOMIC_XOR32 + 4:
+ put_user(regs->r0, up0);
+ break;
+ }
+
+ /*
+ * We've finished the atomic section, and the only thing left for
+ * userspace is to do a RTS, so we might as well handle that too
+ * since we need to update the PC anyways.
+ */
+ regs->pc = regs->rets;
+}
+
+static inline
+int in_mem(unsigned long addr, unsigned long size,
+ unsigned long start, unsigned long end)
+{
+ return addr >= start && addr + size <= end;
+}
+static inline
+int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return const_size &&
+ in_mem(addr, size, const_addr + off, const_addr + const_size);
+}
+static inline
+int in_mem_const(unsigned long addr, unsigned long size,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return in_mem_const_off(addr, size, 0, const_addr, const_size);
+}
+#define ASYNC_ENABLED(bnum, bctlnum) \
+({ \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
+ 1; \
+})
+/*
+ * We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space. Make sure we validate accesses
+ * that cross async banks too.
+ * 0 - found, but unusable
+ * 1 - found & usable
+ * 2 - not found
+ */
+static
+int in_async(unsigned long addr, unsigned long size)
+{
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
+ if (!ASYNC_ENABLED(0, 0))
+ return 0;
+ if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
+ return 1;
+ size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
+ addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
+ }
+ if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
+ if (!ASYNC_ENABLED(1, 0))
+ return 0;
+ if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
+ return 1;
+ size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
+ addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
+ }
+ if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
+ if (!ASYNC_ENABLED(2, 1))
+ return 0;
+ if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
+ return 1;
+ size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
+ addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
+ }
+ if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ if (ASYNC_ENABLED(3, 1))
+ return 0;
+ if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
+ return 1;
+ return 0;
+ }
+
+ /* not within async bounds */
+ return 2;
+}
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size)
+{
+ int cpu = raw_smp_processor_id();
+
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return -EFAULT;
+
+ if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#endif
+ if (in_mem_const(addr, size, L2_START, L2_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (addr >= SYSMMR_BASE)
+ return BFIN_MEM_ACCESS_CORE_ONLY;
+
+ switch (in_async(addr, size)) {
+ case 0: return -EFAULT;
+ case 1: return BFIN_MEM_ACCESS_CORE;
+ case 2: /* fall through */;
+ }
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_DMA;
+
+ return -EFAULT;
+}
+
+#if defined(CONFIG_ACCESS_CHECK)
+#ifdef CONFIG_ACCESS_OK_L1
+__attribute__((l1_text))
+#endif
+/* Return 1 if access to memory range is OK, 0 otherwise */
+int _access_ok(unsigned long addr, unsigned long size)
+{
+ int aret;
+
+ if (size == 0)
+ return 1;
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return 0;
+ if (segment_eq(get_fs(), KERNEL_DS))
+ return 1;
+#ifdef CONFIG_MTD_UCLINUX
+ if (1)
+#else
+ if (0)
+#endif
+ {
+ if (in_mem(addr, size, memory_start, memory_end))
+ return 1;
+ if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
+ return 1;
+# ifndef CONFIG_ROMFS_ON_MTD
+ if (0)
+# endif
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
+ return 1;
+ } else {
+ if (in_mem(addr, size, memory_start, physical_mem_end))
+ return 1;
+ }
+
+ if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
+ return 1;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return 1;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
+ return 1;
+#endif
+
+#ifndef CONFIG_EXCEPTION_L1_SCRATCH
+ if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
+ return 1;
+#endif
+
+ aret = in_async(addr, size);
+ if (aret < 2)
+ return aret;
+
+ if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
+ return 1;
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(_access_ok);
+#endif /* CONFIG_ACCESS_CHECK */
diff --git a/arch/blackfin/kernel/pseudodbg.c b/arch/blackfin/kernel/pseudodbg.c
new file mode 100644
index 00000000..db85bc94
--- /dev/null
+++ b/arch/blackfin/kernel/pseudodbg.c
@@ -0,0 +1,191 @@
+/* The fake debug assert instructions
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+
+const char * const greg_names[] = {
+ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
+ "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP",
+ "I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3",
+ "B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3",
+ "A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS",
+ "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>",
+ "LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2",
+ "USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT",
+};
+
+static const char *get_allreg_name(int grp, int reg)
+{
+ return greg_names[(grp << 3) | reg];
+}
+
+/*
+ * Unfortunately, the pt_regs structure is not laid out the same way as the
+ * hardware register file, so we need to do some fix ups.
+ *
+ * CYCLES is not stored in the pt_regs structure - so, we just read it from
+ * the hardware.
+ *
+ * Don't support:
+ * - All reserved registers
+ * - All in group 7 are (supervisors only)
+ */
+
+static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg)
+{
+ long *val = &fp->r0;
+ unsigned long tmp;
+
+ /* Only do Dregs and Pregs for now */
+ if (grp == 5 ||
+ (grp == 4 && (reg == 4 || reg == 5)) ||
+ (grp == 7))
+ return false;
+
+ if (grp == 0 || (grp == 1 && reg < 6))
+ val -= (reg + 8 * grp);
+ else if (grp == 1 && reg == 6)
+ val = &fp->usp;
+ else if (grp == 1 && reg == 7)
+ val = &fp->fp;
+ else if (grp == 2) {
+ val = &fp->i0;
+ val -= reg;
+ } else if (grp == 3 && reg >= 4) {
+ val = &fp->l0;
+ val -= (reg - 4);
+ } else if (grp == 3 && reg < 4) {
+ val = &fp->b0;
+ val -= reg;
+ } else if (grp == 4 && reg < 4) {
+ val = &fp->a0x;
+ val -= reg;
+ } else if (grp == 4 && reg == 6)
+ val = &fp->astat;
+ else if (grp == 4 && reg == 7)
+ val = &fp->rets;
+ else if (grp == 6 && reg < 6) {
+ val = &fp->lc0;
+ val -= reg;
+ } else if (grp == 6 && reg == 6) {
+ __asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp));
+ val = &tmp;
+ } else if (grp == 6 && reg == 7) {
+ __asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp));
+ val = &tmp;
+ }
+
+ *value = *val;
+ return true;
+
+}
+
+#define PseudoDbg_Assert_opcode 0xf0000000
+#define PseudoDbg_Assert_expected_bits 0
+#define PseudoDbg_Assert_expected_mask 0xffff
+#define PseudoDbg_Assert_regtest_bits 16
+#define PseudoDbg_Assert_regtest_mask 0x7
+#define PseudoDbg_Assert_grp_bits 19
+#define PseudoDbg_Assert_grp_mask 0x7
+#define PseudoDbg_Assert_dbgop_bits 22
+#define PseudoDbg_Assert_dbgop_mask 0x3
+#define PseudoDbg_Assert_dontcare_bits 24
+#define PseudoDbg_Assert_dontcare_mask 0x7
+#define PseudoDbg_Assert_code_bits 27
+#define PseudoDbg_Assert_code_mask 0x1f
+
+/*
+ * DBGA - debug assert
+ */
+bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode)
+{
+ int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask);
+ int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask);
+ int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask);
+ int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask);
+ long value;
+
+ if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode)
+ return false;
+
+ if (!fix_up_reg(fp, &value, grp, regtest))
+ return false;
+
+ if (dbgop == 0 || dbgop == 2) {
+ /* DBGA ( regs_lo , uimm16 ) */
+ /* DBGAL ( regs , uimm16 ) */
+ if (expected != (value & 0xFFFF)) {
+ pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n",
+ get_allreg_name(grp, regtest),
+ expected, (unsigned int)(value & 0xFFFF));
+ return false;
+ }
+
+ } else if (dbgop == 1 || dbgop == 3) {
+ /* DBGA ( regs_hi , uimm16 ) */
+ /* DBGAH ( regs , uimm16 ) */
+ if (expected != ((value >> 16) & 0xFFFF)) {
+ pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n",
+ get_allreg_name(grp, regtest),
+ expected, (unsigned int)((value >> 16) & 0xFFFF));
+ return false;
+ }
+ }
+
+ fp->pc += 4;
+ return true;
+}
+
+#define PseudoDbg_opcode 0xf8000000
+#define PseudoDbg_reg_bits 0
+#define PseudoDbg_reg_mask 0x7
+#define PseudoDbg_grp_bits 3
+#define PseudoDbg_grp_mask 0x7
+#define PseudoDbg_fn_bits 6
+#define PseudoDbg_fn_mask 0x3
+#define PseudoDbg_code_bits 8
+#define PseudoDbg_code_mask 0xff
+
+/*
+ * DBG - debug (dump a register value out)
+ */
+bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode)
+{
+ int grp, fn, reg;
+ long value, value1;
+
+ if ((opcode & 0xFF000000) != PseudoDbg_opcode)
+ return false;
+
+ opcode >>= 16;
+ grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask);
+ fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask);
+ reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask);
+
+ if (fn == 3 && (reg == 0 || reg == 1)) {
+ if (!fix_up_reg(fp, &value, 4, 2 * reg))
+ return false;
+ if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1))
+ return false;
+
+ pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1);
+ fp->pc += 2;
+ return true;
+
+ } else if (fn == 0) {
+ if (!fix_up_reg(fp, &value, grp, reg))
+ return false;
+
+ pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value);
+ fp->pc += 2;
+ return true;
+ }
+
+ return false;
+}
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
new file mode 100644
index 00000000..e1f88e02
--- /dev/null
+++ b/arch/blackfin/kernel/ptrace.c
@@ -0,0 +1,407 @@
+/*
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ * these modifications are Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/regset.h>
+#include <linux/signal.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+#include <asm/dma.h>
+#include <asm/fixed_code.h>
+#include <asm/cacheflush.h>
+#include <asm/mem_map.h>
+#include <asm/mmu_context.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline long
+get_reg(struct task_struct *task, unsigned long regno,
+ unsigned long __user *datap)
+{
+ long tmp;
+ struct pt_regs *regs = task_pt_regs(task);
+
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
+ return -EIO;
+
+ switch (regno) {
+ case PT_TEXT_ADDR:
+ tmp = task->mm->start_code;
+ break;
+ case PT_TEXT_END_ADDR:
+ tmp = task->mm->end_code;
+ break;
+ case PT_DATA_ADDR:
+ tmp = task->mm->start_data;
+ break;
+ case PT_USP:
+ tmp = task->thread.usp;
+ break;
+ default:
+ if (regno < sizeof(*regs)) {
+ void *reg_ptr = regs;
+ tmp = *(long *)(reg_ptr + regno);
+ } else
+ return -EIO;
+ }
+
+ return put_user(tmp, datap);
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
+ return -EIO;
+
+ switch (regno) {
+ case PT_PC:
+ /*********************************************************************/
+ /* At this point the kernel is most likely in exception. */
+ /* The RETX register will be used to populate the pc of the process. */
+ /*********************************************************************/
+ regs->retx = data;
+ regs->pc = data;
+ break;
+ case PT_RETX:
+ break; /* regs->retx = data; break; */
+ case PT_USP:
+ regs->usp = data;
+ task->thread.usp = data;
+ break;
+ case PT_SYSCFG: /* don't let userspace screw with this */
+ if ((data & ~1) != 0x6)
+ pr_warning("ptrace: ignore syscfg write of %#lx\n", data);
+ break; /* regs->syscfg = data; break; */
+ default:
+ if (regno < sizeof(*regs)) {
+ void *reg_offset = regs;
+ *(long *)(reg_offset + regno) = data;
+ }
+ /* Ignore writes to pseudo registers */
+ }
+
+ return 0;
+}
+
+/*
+ * check that an address falls within the bounds of the target process's memory mappings
+ */
+int
+is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len)
+{
+ struct vm_area_struct *vma;
+ struct sram_list_struct *sraml;
+
+ /* overflow */
+ if (start + len < start)
+ return -EIO;
+
+ vma = find_vma(child->mm, start);
+ if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
+ return 0;
+
+ for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
+ if (start >= (unsigned long)sraml->addr
+ && start + len < (unsigned long)sraml->addr + sraml->length)
+ return 0;
+
+ if (start >= FIXED_CODE_START && start + len < FIXED_CODE_END)
+ return 0;
+
+#ifdef CONFIG_APP_STACK_L1
+ if (child->mm->context.l1_stack_save)
+ if (start >= (unsigned long)l1_stack_base &&
+ start + len < (unsigned long)l1_stack_base + l1_stack_len)
+ return 0;
+#endif
+
+ return -EIO;
+}
+
+/*
+ * retrieve the contents of Blackfin userspace general registers
+ */
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* This sucks ... */
+ regs->usp = target->thread.usp;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, sizeof(*regs));
+ if (ret < 0)
+ return ret;
+
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(*regs), -1);
+}
+
+/*
+ * update the contents of the Blackfin userspace general registers
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* Don't let people set SYSCFG (it's at the end of pt_regs) */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs, 0, PT_SYSCFG);
+ if (ret < 0)
+ return ret;
+
+ /* This sucks ... */
+ target->thread.usp = regs->usp;
+ /* regs->retx = regs->pc; */
+
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PT_SYSCFG, -1);
+}
+
+/*
+ * Define the register sets available on the Blackfin under Linux
+ */
+enum bfin_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset bfin_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct pt_regs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+};
+
+static const struct user_regset_view user_bfin_native_view = {
+ .name = "Blackfin",
+ .e_machine = EM_BLACKFIN,
+ .regsets = bfin_regsets,
+ .n = ARRAY_SIZE(bfin_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_bfin_native_view;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->syscfg |= SYSCFG_SSSTEP;
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->syscfg &= ~SYSCFG_SSSTEP;
+
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (unsigned long __user *)data;
+ void *paddr = (void *)addr;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKDATA:
+ pr_debug("ptrace: PEEKDATA\n");
+ /* fall through */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ {
+ unsigned long tmp = 0;
+ int copied = 0, to_copy = sizeof(tmp);
+
+ ret = -EIO;
+ pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %i\n", addr, to_copy);
+ if (is_user_addr_valid(child, addr, to_copy) < 0)
+ break;
+ pr_debug("ptrace: user address is valid\n");
+
+ switch (bfin_mem_access_type(addr, to_copy)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ copied = access_process_vm(child, addr, &tmp,
+ to_copy, 0);
+ if (copied)
+ break;
+
+ /* hrm, why didn't that work ... maybe no mapping */
+ if (addr >= FIXED_CODE_START &&
+ addr + to_copy <= FIXED_CODE_END) {
+ copy_from_user_page(0, 0, 0, &tmp, paddr, to_copy);
+ copied = to_copy;
+ } else if (addr >= BOOT_ROM_START) {
+ memcpy(&tmp, paddr, to_copy);
+ copied = to_copy;
+ }
+
+ break;
+ case BFIN_MEM_ACCESS_DMA:
+ if (safe_dma_memcpy(&tmp, paddr, to_copy))
+ copied = to_copy;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(&tmp, paddr, to_copy))
+ copied = to_copy;
+ break;
+ default:
+ copied = 0;
+ break;
+ }
+
+ pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
+ if (copied == to_copy)
+ ret = put_user(tmp, datap);
+ break;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKEDATA:
+ pr_debug("ptrace: PTRACE_PEEKDATA\n");
+ /* fall through */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ {
+ int copied = 0, to_copy = sizeof(data);
+
+ ret = -EIO;
+ pr_debug("ptrace: POKETEXT at addr 0x%08lx + %i bytes %lx\n",
+ addr, to_copy, data);
+ if (is_user_addr_valid(child, addr, to_copy) < 0)
+ break;
+ pr_debug("ptrace: user address is valid\n");
+
+ switch (bfin_mem_access_type(addr, to_copy)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ copied = access_process_vm(child, addr, &data,
+ to_copy, 1);
+ break;
+ case BFIN_MEM_ACCESS_DMA:
+ if (safe_dma_memcpy(paddr, &data, to_copy))
+ copied = to_copy;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(paddr, &data, to_copy))
+ copied = to_copy;
+ break;
+ default:
+ copied = 0;
+ break;
+ }
+
+ pr_debug("ptrace: copied size %d\n", copied);
+ if (copied == to_copy)
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_PEEKUSR:
+ switch (addr) {
+#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
+ case PT_FDPIC_EXEC:
+ request = PTRACE_GETFDPIC;
+ addr = PTRACE_GETFDPIC_EXEC;
+ goto case_default;
+ case PT_FDPIC_INTERP:
+ request = PTRACE_GETFDPIC;
+ addr = PTRACE_GETFDPIC_INTERP;
+ goto case_default;
+#endif
+ default:
+ ret = get_reg(child, addr, datap);
+ }
+ pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = put_reg(child, addr, data);
+ pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret);
+ break;
+
+ case PTRACE_GETREGS:
+ pr_debug("ptrace: PTRACE_GETREGS\n");
+ return copy_regset_to_user(child, &user_bfin_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+
+ case PTRACE_SETREGS:
+ pr_debug("ptrace: PTRACE_SETREGS\n");
+ return copy_regset_from_user(child, &user_bfin_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+
+ case_default:
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+ return ret;
+}
+
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c
new file mode 100644
index 00000000..33f59427
--- /dev/null
+++ b/arch/blackfin/kernel/pwm.c
@@ -0,0 +1,100 @@
+/*
+ * Blackfin Pulse Width Modulation (PWM) core
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#include <asm/gptimers.h>
+#include <asm/portmux.h>
+
+struct pwm_device {
+ unsigned id;
+ unsigned short pin;
+};
+
+static const unsigned short pwm_to_gptimer_per[] = {
+ P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
+ P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
+};
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int ret;
+
+ /* XXX: pwm_id really should be unsigned */
+ if (pwm_id < 0)
+ return NULL;
+
+ pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return pwm;
+
+ pwm->id = pwm_id;
+ if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
+ goto err;
+
+ pwm->pin = pwm_to_gptimer_per[pwm->id];
+ ret = peripheral_request(pwm->pin, label);
+ if (ret)
+ goto err;
+
+ return pwm;
+ err:
+ kfree(pwm);
+ return NULL;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ peripheral_free(pwm->pin);
+ kfree(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long period, duty;
+ unsigned long long val;
+
+ if (duty_ns < 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ val = (unsigned long long)get_sclk() * period_ns;
+ do_div(val, NSEC_PER_SEC);
+ period = val;
+
+ val = (unsigned long long)period * duty_ns;
+ do_div(val, period_ns);
+ duty = period - val;
+
+ if (duty >= period)
+ duty = period - 1;
+
+ set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
+ set_gptimer_pwidth(pwm->id, duty);
+ set_gptimer_period(pwm->id, period);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ enable_gptimer(pwm->id);
+ return 0;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ disable_gptimer(pwm->id);
+}
+EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
new file mode 100644
index 00000000..b0434f89
--- /dev/null
+++ b/arch/blackfin/kernel/reboot.c
@@ -0,0 +1,112 @@
+/*
+ * arch/blackfin/kernel/reboot.c - handle shutdown/reboot
+ *
+ * Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <asm/bfin-global.h>
+#include <asm/reboot.h>
+#include <asm/bfrom.h>
+
+/* A system soft reset makes external memory unusable so force
+ * this function into L1. We use the compiler ssync here rather
+ * than SSYNC() because it's safe (no interrupts and such) and
+ * we save some L1. We do not need to force sanity in the SYSCR
+ * register as the BMODE selection bit is cleared by the soft
+ * reset while the Core B bit (on dual core parts) is cleared by
+ * the core reset.
+ */
+__attribute__ ((__l1_text__, __noreturn__))
+static void bfin_reset(void)
+{
+ if (!ANOMALY_05000353 && !ANOMALY_05000386)
+ bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20));
+
+ /* Wait for completion of "system" events such as cache line
+ * line fills so that we avoid infinite stalls later on as
+ * much as possible. This code is in L1, so it won't trigger
+ * any such event after this point in time.
+ */
+ __builtin_bfin_ssync();
+
+ /* Initiate System software reset. */
+ bfin_write_SWRST(0x7);
+
+ /* Due to the way reset is handled in the hardware, we need
+ * to delay for 10 SCLKS. The only reliable way to do this is
+ * to calculate the CCLK/SCLK ratio and multiply 10. For now,
+ * we'll assume worse case which is a 1:15 ratio.
+ */
+ asm(
+ "LSETUP (1f, 1f) LC0 = %0\n"
+ "1: nop;"
+ :
+ : "a" (15 * 10)
+ : "LC0", "LB0", "LT0"
+ );
+
+ /* Clear System software reset */
+ bfin_write_SWRST(0);
+
+ /* The BF526 ROM will crash during reset */
+#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
+ /* Seems to be fixed with newer parts though ... */
+ if (__SILICON_REVISION__ < 1 && bfin_revid() < 1)
+ bfin_read_SWRST();
+#endif
+
+ /* Wait for the SWRST write to complete. Cannot rely on SSYNC
+ * though as the System state is all reset now.
+ */
+ asm(
+ "LSETUP (1f, 1f) LC1 = %0\n"
+ "1: nop;"
+ :
+ : "a" (15 * 1)
+ : "LC1", "LB1", "LT1"
+ );
+
+ while (1)
+ /* Issue core reset */
+ asm("raise 1");
+}
+
+__attribute__((weak))
+void native_machine_restart(char *cmd)
+{
+}
+
+void machine_restart(char *cmd)
+{
+ native_machine_restart(cmd);
+ local_irq_disable();
+ if (smp_processor_id())
+ smp_call_function((void *)bfin_reset, 0, 1);
+ else
+ bfin_reset();
+}
+
+__attribute__((weak))
+void native_machine_halt(void)
+{
+ idle_with_irq_disabled();
+}
+
+void machine_halt(void)
+{
+ native_machine_halt();
+}
+
+__attribute__((weak))
+void native_machine_power_off(void)
+{
+ idle_with_irq_disabled();
+}
+
+void machine_power_off(void)
+{
+ native_machine_power_off();
+}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
new file mode 100644
index 00000000..2ad747e9
--- /dev/null
+++ b/arch/blackfin/kernel/setup.c
@@ -0,0 +1,1369 @@
+/*
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/pfn.h>
+
+#ifdef CONFIG_MTD_UCLINUX
+#include <linux/mtd/map.h>
+#include <linux/ext2_fs.h>
+#include <linux/cramfs_fs.h>
+#include <linux/romfs_fs.h>
+#endif
+
+#include <asm/cplb.h>
+#include <asm/cacheflush.h>
+#include <asm/blackfin.h>
+#include <asm/cplbinit.h>
+#include <asm/div64.h>
+#include <asm/cpu.h>
+#include <asm/fixed_code.h>
+#include <asm/early_printk.h>
+#include <asm/irq_handler.h>
+#include <asm/pda.h>
+
+u16 _bfin_swrst;
+EXPORT_SYMBOL(_bfin_swrst);
+
+unsigned long memory_start, memory_end, physical_mem_end;
+unsigned long _rambase, _ramstart, _ramend;
+unsigned long reserved_mem_dcache_on;
+unsigned long reserved_mem_icache_on;
+EXPORT_SYMBOL(memory_start);
+EXPORT_SYMBOL(memory_end);
+EXPORT_SYMBOL(physical_mem_end);
+EXPORT_SYMBOL(_ramend);
+EXPORT_SYMBOL(reserved_mem_dcache_on);
+
+#ifdef CONFIG_MTD_UCLINUX
+extern struct map_info uclinux_ram_map;
+unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
+unsigned long _ebss;
+EXPORT_SYMBOL(memory_mtd_end);
+EXPORT_SYMBOL(memory_mtd_start);
+EXPORT_SYMBOL(mtd_size);
+#endif
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct blackfin_initial_pda __initdata initial_pda;
+
+/* boot memmap, for parsing "memmap=" */
+#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
+#define BFIN_MEMMAP_RAM 1
+#define BFIN_MEMMAP_RESERVED 2
+static struct bfin_memmap {
+ int nr_map;
+ struct bfin_memmap_entry {
+ unsigned long long addr; /* start of memory segment */
+ unsigned long long size;
+ unsigned long type;
+ } map[BFIN_MEMMAP_MAX];
+} bfin_memmap __initdata;
+
+/* for memmap sanitization */
+struct change_member {
+ struct bfin_memmap_entry *pentry; /* pointer to original entry */
+ unsigned long long addr; /* address for this change point */
+};
+static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
+static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
+static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
+static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
+
+DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
+
+static int early_init_clkin_hz(char *buf);
+
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+void __init generate_cplb_tables(void)
+{
+ unsigned int cpu;
+
+ generate_cplb_tables_all();
+ /* Generate per-CPU I&D CPLB tables */
+ for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
+ generate_cplb_tables_cpu(cpu);
+}
+#endif
+
+void __cpuinit bfin_setup_caches(unsigned int cpu)
+{
+#ifdef CONFIG_BFIN_ICACHE
+ bfin_icache_init(icplb_tbl[cpu]);
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+ bfin_dcache_init(dcplb_tbl[cpu]);
+#endif
+
+ bfin_setup_cpudata(cpu);
+
+ /*
+ * In cache coherence emulation mode, we need to have the
+ * D-cache enabled before running any atomic operation which
+ * might involve cache invalidation (i.e. spinlock, rwlock).
+ * So printk's are deferred until then.
+ */
+#ifdef CONFIG_BFIN_ICACHE
+ printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# ifdef CONFIG_BFIN_L2_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+
+#else
+ printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+ printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# if defined CONFIG_BFIN_L2_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_L2_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+#else
+ printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
+#endif
+}
+
+void __cpuinit bfin_setup_cpudata(unsigned int cpu)
+{
+ struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
+
+ cpudata->imemctl = bfin_read_IMEM_CONTROL();
+ cpudata->dmemctl = bfin_read_DMEM_CONTROL();
+}
+
+void __init bfin_cache_init(void)
+{
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+ generate_cplb_tables();
+#endif
+ bfin_setup_caches(0);
+}
+
+void __init bfin_relocate_l1_mem(void)
+{
+ unsigned long text_l1_len = (unsigned long)_text_l1_len;
+ unsigned long data_l1_len = (unsigned long)_data_l1_len;
+ unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
+ unsigned long l2_len = (unsigned long)_l2_len;
+
+ early_shadow_stamp();
+
+ /*
+ * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
+ * we know that everything about l1 text/data is nice and aligned,
+ * so copy by 4 byte chunks, and don't worry about overlapping
+ * src/dest.
+ *
+ * We can't use the dma_memcpy functions, since they can call
+ * scheduler functions which might be in L1 :( and core writes
+ * into L1 instruction cause bad access errors, so we are stuck,
+ * we are required to use DMA, but can't use the common dma
+ * functions. We can't use memcpy either - since that might be
+ * going to be in the relocated L1
+ */
+
+ blackfin_dma_early_init();
+
+ /* if necessary, copy L1 text to L1 instruction SRAM */
+ if (L1_CODE_LENGTH && text_l1_len)
+ early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
+
+ /* if necessary, copy L1 data to L1 data bank A SRAM */
+ if (L1_DATA_A_LENGTH && data_l1_len)
+ early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
+
+ /* if necessary, copy L1 data B to L1 data bank B SRAM */
+ if (L1_DATA_B_LENGTH && data_b_l1_len)
+ early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
+
+ early_dma_memcpy_done();
+
+#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
+ blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
+#endif
+
+ /* if necessary, copy L2 text/data to L2 SRAM */
+ if (L2_LENGTH && l2_len)
+ memcpy(_stext_l2, _l2_lma, l2_len);
+}
+
+#ifdef CONFIG_SMP
+void __init bfin_relocate_coreb_l1_mem(void)
+{
+ unsigned long text_l1_len = (unsigned long)_text_l1_len;
+ unsigned long data_l1_len = (unsigned long)_data_l1_len;
+ unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
+
+ blackfin_dma_early_init();
+
+ /* if necessary, copy L1 text to L1 instruction SRAM */
+ if (L1_CODE_LENGTH && text_l1_len)
+ early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
+ text_l1_len);
+
+ /* if necessary, copy L1 data to L1 data bank A SRAM */
+ if (L1_DATA_A_LENGTH && data_l1_len)
+ early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
+ data_l1_len);
+
+ /* if necessary, copy L1 data B to L1 data bank B SRAM */
+ if (L1_DATA_B_LENGTH && data_b_l1_len)
+ early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
+ data_b_l1_len);
+
+ early_dma_memcpy_done();
+
+#ifdef CONFIG_ICACHE_FLUSH_L1
+ blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
+ (unsigned long)_stext_l1 + COREB_L1_CODE_START;
+#endif
+}
+#endif
+
+#ifdef CONFIG_ROMKERNEL
+void __init bfin_relocate_xip_data(void)
+{
+ early_shadow_stamp();
+
+ memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
+ memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
+}
+#endif
+
+/* add_memory_region to memmap */
+static void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type)
+{
+ int i;
+
+ i = bfin_memmap.nr_map;
+
+ if (i == BFIN_MEMMAP_MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ bfin_memmap.map[i].addr = start;
+ bfin_memmap.map[i].size = size;
+ bfin_memmap.map[i].type = type;
+ bfin_memmap.nr_map++;
+}
+
+/*
+ * Sanitize the boot memmap, removing overlaps.
+ */
+static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
+{
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in memmap */
+ for (i = 0; i < old_nr; i++)
+ if (map[i].addr + map[i].size < map[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i = 0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i = 0; i < old_nr; i++) {
+ if (map[i].size != 0) {
+ change_point[chgidx]->addr = map[i].addr;
+ change_point[chgidx++]->pentry = &map[i];
+ change_point[chgidx]->addr = map[i].addr + map[i].size;
+ change_point[chgidx++]->pentry = &map[i];
+ }
+ }
+ chg_nr = chgidx; /* true number of change-points */
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i = 1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pentry->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
+ ) {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing = 1;
+ }
+ }
+ }
+
+ /* create a new memmap, removing overlaps */
+ overlap_entries = 0; /* number of entries in the overlap table */
+ new_entry = 0; /* index for creating new memmap entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new memmap */
+ for (chgidx = 0; chgidx < chg_nr; chgidx++) {
+ /* keep track of all overlapping memmap entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
+ } else {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i = 0; i < overlap_entries; i++) {
+ if (overlap_list[i] == change_point[chgidx]->pentry)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i = 0; i < overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new memmap based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_map[new_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_map[new_entry].size != 0)
+ if (++new_entry >= BFIN_MEMMAP_MAX)
+ break; /* no more space left for new entries */
+ }
+ if (current_type != 0) {
+ new_map[new_entry].addr = change_point[chgidx]->addr;
+ new_map[new_entry].type = current_type;
+ last_addr = change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_entry; /* retain count for new entries */
+
+ /* copy new mapping into original location */
+ memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+static void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < bfin_memmap.nr_map; i++) {
+ printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
+ bfin_memmap.map[i].addr,
+ bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
+ switch (bfin_memmap.map[i].type) {
+ case BFIN_MEMMAP_RAM:
+ printk(KERN_CONT "(usable)\n");
+ break;
+ case BFIN_MEMMAP_RESERVED:
+ printk(KERN_CONT "(reserved)\n");
+ break;
+ default:
+ printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
+ break;
+ }
+ }
+}
+
+static __init int parse_memmap(char *arg)
+{
+ unsigned long long start_at, mem_size;
+
+ if (!arg)
+ return -EINVAL;
+
+ mem_size = memparse(arg, &arg);
+ if (*arg == '@') {
+ start_at = memparse(arg+1, &arg);
+ add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
+ } else if (*arg == '$') {
+ start_at = memparse(arg+1, &arg);
+ add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
+ }
+
+ return 0;
+}
+
+/*
+ * Initial parsing of the command line. Currently, we support:
+ * - Controlling the linux memory size: mem=xxx[KMG]
+ * - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
+ * $ -> reserved memory is dcacheable
+ * # -> reserved memory is icacheable
+ * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
+ * @ from <start> to <start>+<mem>, type RAM
+ * $ from <start> to <start>+<mem>, type RESERVED
+ */
+static __init void parse_cmdline_early(char *cmdline_p)
+{
+ char c = ' ', *to = cmdline_p;
+ unsigned int memsize;
+ for (;;) {
+ if (c == ' ') {
+ if (!memcmp(to, "mem=", 4)) {
+ to += 4;
+ memsize = memparse(to, &to);
+ if (memsize)
+ _ramend = memsize;
+
+ } else if (!memcmp(to, "max_mem=", 8)) {
+ to += 8;
+ memsize = memparse(to, &to);
+ if (memsize) {
+ physical_mem_end = memsize;
+ if (*to != ' ') {
+ if (*to == '$'
+ || *(to + 1) == '$')
+ reserved_mem_dcache_on = 1;
+ if (*to == '#'
+ || *(to + 1) == '#')
+ reserved_mem_icache_on = 1;
+ }
+ }
+ } else if (!memcmp(to, "clkin_hz=", 9)) {
+ to += 9;
+ early_init_clkin_hz(to);
+#ifdef CONFIG_EARLY_PRINTK
+ } else if (!memcmp(to, "earlyprintk=", 12)) {
+ to += 12;
+ setup_early_printk(to);
+#endif
+ } else if (!memcmp(to, "memmap=", 7)) {
+ to += 7;
+ parse_memmap(to);
+ }
+ }
+ c = *(to++);
+ if (!c)
+ break;
+ }
+}
+
+/*
+ * Setup memory defaults from user config.
+ * The physical memory layout looks like:
+ *
+ * [_rambase, _ramstart]: kernel image
+ * [memory_start, memory_end]: dynamic memory managed by kernel
+ * [memory_end, _ramend]: reserved memory
+ * [memory_mtd_start(memory_end),
+ * memory_mtd_start + mtd_size]: rootfs (if any)
+ * [_ramend - DMA_UNCACHED_REGION,
+ * _ramend]: uncached DMA region
+ * [_ramend, physical_mem_end]: memory not managed by kernel
+ */
+static __init void memory_setup(void)
+{
+#ifdef CONFIG_MTD_UCLINUX
+ unsigned long mtd_phys = 0;
+ unsigned long n;
+#endif
+ unsigned long max_mem;
+
+ _rambase = CONFIG_BOOT_LOAD;
+ _ramstart = (unsigned long)_end;
+
+ if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
+ console_init();
+ panic("DMA region exceeds memory limit: %lu.",
+ _ramend - _ramstart);
+ }
+ max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
+
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
+ /* Due to a Hardware Anomaly we need to limit the size of usable
+ * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
+ * 05000263 - Hardware loop corrupted when taking an ICPLB exception
+ */
+# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
+ if (max_mem >= 56 * 1024 * 1024)
+ max_mem = 56 * 1024 * 1024;
+# else
+ if (max_mem >= 60 * 1024 * 1024)
+ max_mem = 60 * 1024 * 1024;
+# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
+#endif /* ANOMALY_05000263 */
+
+
+#ifdef CONFIG_MPU
+ /* Round up to multiple of 4MB */
+ memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
+#else
+ memory_start = PAGE_ALIGN(_ramstart);
+#endif
+
+#if defined(CONFIG_MTD_UCLINUX)
+ /* generic memory mapped MTD driver */
+ memory_mtd_end = memory_end;
+
+ mtd_phys = _ramstart;
+ mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
+
+# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
+ n = ext2_image_size((void *)(mtd_phys + 0x400));
+ if (n)
+ mtd_size = PAGE_ALIGN(n * 1024);
+# endif
+
+# if defined(CONFIG_CRAMFS)
+ if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
+ mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
+# endif
+
+# if defined(CONFIG_ROMFS_FS)
+ if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
+ && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
+ mtd_size =
+ PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
+
+ /* ROM_FS is XIP, so if we found it, we need to limit memory */
+ if (memory_end > max_mem) {
+ pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+ memory_end = max_mem;
+ }
+ }
+# endif /* CONFIG_ROMFS_FS */
+
+ /* Since the default MTD_UCLINUX has no magic number, we just blindly
+ * read 8 past the end of the kernel's image, and look at it.
+ * When no image is attached, mtd_size is set to a random number
+ * Do some basic sanity checks before operating on things
+ */
+ if (mtd_size == 0 || memory_end <= mtd_size) {
+ pr_emerg("Could not find valid ram mtd attached.\n");
+ } else {
+ memory_end -= mtd_size;
+
+ /* Relocate MTD image to the top of memory after the uncached memory area */
+ uclinux_ram_map.phys = memory_mtd_start = memory_end;
+ uclinux_ram_map.size = mtd_size;
+ pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
+ _end, mtd_size, (void *)memory_mtd_start);
+ dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
+ }
+#endif /* CONFIG_MTD_UCLINUX */
+
+ /* We need lo limit memory, since everything could have a text section
+ * of userspace in it, and expose anomaly 05000263. If the anomaly
+ * doesn't exist, or we don't need to - then dont.
+ */
+ if (memory_end > max_mem) {
+ pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+ memory_end = max_mem;
+ }
+
+#ifdef CONFIG_MPU
+#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
+ page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
+ ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
+#else
+ page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
+#endif
+ page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
+#endif
+
+ init_mm.start_code = (unsigned long)_stext;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)0;
+
+ printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
+ printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
+
+ printk(KERN_INFO "Memory map:\n"
+ " fixedcode = 0x%p-0x%p\n"
+ " text = 0x%p-0x%p\n"
+ " rodata = 0x%p-0x%p\n"
+ " bss = 0x%p-0x%p\n"
+ " data = 0x%p-0x%p\n"
+ " stack = 0x%p-0x%p\n"
+ " init = 0x%p-0x%p\n"
+ " available = 0x%p-0x%p\n"
+#ifdef CONFIG_MTD_UCLINUX
+ " rootfs = 0x%p-0x%p\n"
+#endif
+#if DMA_UNCACHED_REGION > 0
+ " DMA Zone = 0x%p-0x%p\n"
+#endif
+ , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
+ _stext, _etext,
+ __start_rodata, __end_rodata,
+ __bss_start, __bss_stop,
+ _sdata, _edata,
+ (void *)&init_thread_union,
+ (void *)((int)(&init_thread_union) + THREAD_SIZE),
+ __init_begin, __init_end,
+ (void *)_ramstart, (void *)memory_end
+#ifdef CONFIG_MTD_UCLINUX
+ , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
+#endif
+#if DMA_UNCACHED_REGION > 0
+ , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
+#endif
+ );
+}
+
+/*
+ * Find the lowest, highest page frame number we have available
+ */
+void __init find_min_max_pfn(void)
+{
+ int i;
+
+ max_pfn = 0;
+ min_low_pfn = memory_end;
+
+ for (i = 0; i < bfin_memmap.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
+ continue;
+ start = PFN_UP(bfin_memmap.map[i].addr);
+ end = PFN_DOWN(bfin_memmap.map[i].addr +
+ bfin_memmap.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ if (start < min_low_pfn)
+ min_low_pfn = start;
+ }
+}
+
+static __init void setup_bootmem_allocator(void)
+{
+ int bootmap_size;
+ int i;
+ unsigned long start_pfn, end_pfn;
+ unsigned long curr_pfn, last_pfn, size;
+
+ /* mark memory between memory_start and memory_end usable */
+ add_memory_region(memory_start,
+ memory_end - memory_start, BFIN_MEMMAP_RAM);
+ /* sanity check for overlap */
+ sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
+ print_memory_map("boot memmap");
+
+ /* initialize globals in linux/bootmem.h */
+ find_min_max_pfn();
+ /* pfn of the last usable page frame */
+ if (max_pfn > memory_end >> PAGE_SHIFT)
+ max_pfn = memory_end >> PAGE_SHIFT;
+ /* pfn of last page frame directly mapped by kernel */
+ max_low_pfn = max_pfn;
+ /* pfn of the first usable page frame after kernel image*/
+ if (min_low_pfn < memory_start >> PAGE_SHIFT)
+ min_low_pfn = memory_start >> PAGE_SHIFT;
+
+ start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
+ end_pfn = memory_end >> PAGE_SHIFT;
+
+ /*
+ * give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory.
+ */
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ memory_start >> PAGE_SHIFT, /* map goes here */
+ start_pfn, end_pfn);
+
+ /* register the memmap regions with the bootmem allocator */
+ for (i = 0; i < bfin_memmap.nr_map; i++) {
+ /*
+ * Reserve usable memory
+ */
+ if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
+ continue;
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
+ if (curr_pfn >= end_pfn)
+ continue;
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
+ bfin_memmap.map[i].size);
+
+ if (last_pfn > end_pfn)
+ last_pfn = end_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+
+ /* reserve memory before memory_start, including bootmap */
+ reserve_bootmem(PAGE_OFFSET,
+ memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
+ BOOTMEM_DEFAULT);
+}
+
+#define EBSZ_TO_MEG(ebsz) \
+({ \
+ int meg = 0; \
+ switch (ebsz & 0xf) { \
+ case 0x1: meg = 16; break; \
+ case 0x3: meg = 32; break; \
+ case 0x5: meg = 64; break; \
+ case 0x7: meg = 128; break; \
+ case 0x9: meg = 256; break; \
+ case 0xb: meg = 512; break; \
+ } \
+ meg; \
+})
+static inline int __init get_mem_size(void)
+{
+#if defined(EBIU_SDBCTL)
+# if defined(BF561_FAMILY)
+ int ret = 0;
+ u32 sdbctl = bfin_read_EBIU_SDBCTL();
+ ret += EBSZ_TO_MEG(sdbctl >> 0);
+ ret += EBSZ_TO_MEG(sdbctl >> 8);
+ ret += EBSZ_TO_MEG(sdbctl >> 16);
+ ret += EBSZ_TO_MEG(sdbctl >> 24);
+ return ret;
+# else
+ return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
+# endif
+#elif defined(EBIU_DDRCTL1)
+ u32 ddrctl = bfin_read_EBIU_DDRCTL1();
+ int ret = 0;
+ switch (ddrctl & 0xc0000) {
+ case DEVSZ_64:
+ ret = 64 / 8;
+ break;
+ case DEVSZ_128:
+ ret = 128 / 8;
+ break;
+ case DEVSZ_256:
+ ret = 256 / 8;
+ break;
+ case DEVSZ_512:
+ ret = 512 / 8;
+ break;
+ }
+ switch (ddrctl & 0x30000) {
+ case DEVWD_4: ret *= 2;
+ case DEVWD_8: ret *= 2;
+ case DEVWD_16: break;
+ }
+ if ((ddrctl & 0xc000) == 0x4000)
+ ret *= 2;
+ return ret;
+#endif
+ BUG();
+}
+
+__attribute__((weak))
+void __init native_machine_early_platform_add_devices(void)
+{
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ u32 mmr;
+ unsigned long sclk, cclk;
+
+ native_machine_early_platform_add_devices();
+
+ enable_shadow_console();
+
+ /* Check to make sure we are running on the right processor */
+ if (unlikely(CPUID != bfin_cpuid()))
+ printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+ CPU, bfin_cpuid(), bfin_revid());
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+#if defined(CONFIG_CMDLINE_BOOL)
+ strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
+ command_line[sizeof(command_line) - 1] = 0;
+#endif
+
+ /* Keep a copy of command line */
+ *cmdline_p = &command_line[0];
+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
+
+ memset(&bfin_memmap, 0, sizeof(bfin_memmap));
+
+ /* If the user does not specify things on the command line, use
+ * what the bootloader set things up as
+ */
+ physical_mem_end = 0;
+ parse_cmdline_early(&command_line[0]);
+
+ if (_ramend == 0)
+ _ramend = get_mem_size() * 1024 * 1024;
+
+ if (physical_mem_end == 0)
+ physical_mem_end = _ramend;
+
+ memory_setup();
+
+ /* Initialize Async memory banks */
+ bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
+ bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
+ bfin_write_EBIU_AMGCTL(AMGCTLVAL);
+#ifdef CONFIG_EBIU_MBSCTLVAL
+ bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
+ bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
+ bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
+#endif
+#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
+ bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
+ bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
+ bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
+ bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
+ ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
+#endif
+
+ cclk = get_cclk();
+ sclk = get_sclk();
+
+ if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
+ panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
+
+#ifdef BF561_FAMILY
+ if (ANOMALY_05000266) {
+ bfin_read_IMDMA_D0_IRQ_STATUS();
+ bfin_read_IMDMA_D1_IRQ_STATUS();
+ }
+#endif
+
+ mmr = bfin_read_TBUFCTL();
+ printk(KERN_INFO "Hardware Trace %s and %sabled\n",
+ (mmr & 0x1) ? "active" : "off",
+ (mmr & 0x2) ? "en" : "dis");
+
+ mmr = bfin_read_SYSCR();
+ printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
+
+ /* Newer parts mirror SWRST bits in SYSCR */
+#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
+ defined(CONFIG_BF538) || defined(CONFIG_BF539)
+ _bfin_swrst = bfin_read_SWRST();
+#else
+ /* Clear boot mode field */
+ _bfin_swrst = mmr & ~0xf;
+#endif
+
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
+ bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
+#endif
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
+ bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
+#endif
+
+#ifdef CONFIG_SMP
+ if (_bfin_swrst & SWRST_DBL_FAULT_A) {
+#else
+ if (_bfin_swrst & RESET_DOUBLE) {
+#endif
+ printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ /* We assume the crashing kernel, and the current symbol table match */
+ printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+ initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
+ initial_pda.retx_doublefault);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
+ initial_pda.dcplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
+ initial_pda.icplb_doublefault_addr);
+#endif
+ printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
+ initial_pda.retx);
+ } else if (_bfin_swrst & RESET_WDOG)
+ printk(KERN_INFO "Recovering from Watchdog event\n");
+ else if (_bfin_swrst & RESET_SOFTWARE)
+ printk(KERN_NOTICE "Reset caused by Software reset\n");
+
+ printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
+ if (bfin_compiled_revid() == 0xffff)
+ printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
+ else if (bfin_compiled_revid() == -1)
+ printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
+ else
+ printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
+
+ if (likely(CPUID == bfin_cpuid())) {
+ if (bfin_revid() != bfin_compiled_revid()) {
+ if (bfin_compiled_revid() == -1)
+ printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
+ bfin_revid());
+ else if (bfin_compiled_revid() != 0xffff) {
+ printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
+ bfin_compiled_revid(), bfin_revid());
+ if (bfin_compiled_revid() > bfin_revid())
+ panic("Error: you are missing anomaly workarounds for this rev");
+ }
+ }
+ if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
+ printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
+ CPU, bfin_revid());
+ }
+
+ printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
+
+ printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
+ cclk / 1000000, sclk / 1000000);
+
+ setup_bootmem_allocator();
+
+ paging_init();
+
+ /* Copy atomic sequences to their fixed location, and sanity check that
+ these locations are the ones that we advertise to userspace. */
+ memcpy((void *)FIXED_CODE_START, &fixed_code_start,
+ FIXED_CODE_END - FIXED_CODE_START);
+ BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
+ != SIGRETURN_STUB - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
+ != ATOMIC_XCHG32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
+ != ATOMIC_CAS32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
+ != ATOMIC_ADD32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
+ != ATOMIC_SUB32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
+ != ATOMIC_IOR32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
+ != ATOMIC_AND32 - FIXED_CODE_START);
+ BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
+ != ATOMIC_XOR32 - FIXED_CODE_START);
+ BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
+ != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
+
+#ifdef CONFIG_SMP
+ platform_init_cpus();
+#endif
+ init_exception_vectors();
+ bfin_cache_init(); /* Initialize caches for the boot CPU */
+}
+
+static int __init topology_init(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+/* Get the input clock frequency */
+static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
+static u_long get_clkin_hz(void)
+{
+ return cached_clkin_hz;
+}
+static int __init early_init_clkin_hz(char *buf)
+{
+ cached_clkin_hz = simple_strtoul(buf, NULL, 0);
+#ifdef BFIN_KERNEL_CLOCK
+ if (cached_clkin_hz != CONFIG_CLKIN_HZ)
+ panic("cannot change clkin_hz when reprogramming clocks");
+#endif
+ return 1;
+}
+early_param("clkin_hz=", early_init_clkin_hz);
+
+/* Get the voltage input multiplier */
+static u_long get_vco(void)
+{
+ static u_long cached_vco;
+ u_long msel, pll_ctl;
+
+ /* The assumption here is that VCO never changes at runtime.
+ * If, someday, we support that, then we'll have to change this.
+ */
+ if (cached_vco)
+ return cached_vco;
+
+ pll_ctl = bfin_read_PLL_CTL();
+ msel = (pll_ctl >> 9) & 0x3F;
+ if (0 == msel)
+ msel = 64;
+
+ cached_vco = get_clkin_hz();
+ cached_vco >>= (1 & pll_ctl); /* DF bit */
+ cached_vco *= msel;
+ return cached_vco;
+}
+
+/* Get the Core clock */
+u_long get_cclk(void)
+{
+ static u_long cached_cclk_pll_div, cached_cclk;
+ u_long csel, ssel;
+
+ if (bfin_read_PLL_STAT() & 0x1)
+ return get_clkin_hz();
+
+ ssel = bfin_read_PLL_DIV();
+ if (ssel == cached_cclk_pll_div)
+ return cached_cclk;
+ else
+ cached_cclk_pll_div = ssel;
+
+ csel = ((ssel >> 4) & 0x03);
+ ssel &= 0xf;
+ if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
+ cached_cclk = get_vco() / ssel;
+ else
+ cached_cclk = get_vco() >> csel;
+ return cached_cclk;
+}
+EXPORT_SYMBOL(get_cclk);
+
+/* Get the System clock */
+u_long get_sclk(void)
+{
+ static u_long cached_sclk;
+ u_long ssel;
+
+ /* The assumption here is that SCLK never changes at runtime.
+ * If, someday, we support that, then we'll have to change this.
+ */
+ if (cached_sclk)
+ return cached_sclk;
+
+ if (bfin_read_PLL_STAT() & 0x1)
+ return get_clkin_hz();
+
+ ssel = bfin_read_PLL_DIV() & 0xf;
+ if (0 == ssel) {
+ printk(KERN_WARNING "Invalid System Clock\n");
+ ssel = 1;
+ }
+
+ cached_sclk = get_vco() / ssel;
+ return cached_sclk;
+}
+EXPORT_SYMBOL(get_sclk);
+
+unsigned long sclk_to_usecs(unsigned long sclk)
+{
+ u64 tmp = USEC_PER_SEC * (u64)sclk;
+ do_div(tmp, get_sclk());
+ return tmp;
+}
+EXPORT_SYMBOL(sclk_to_usecs);
+
+unsigned long usecs_to_sclk(unsigned long usecs)
+{
+ u64 tmp = get_sclk() * (u64)usecs;
+ do_div(tmp, USEC_PER_SEC);
+ return tmp;
+}
+EXPORT_SYMBOL(usecs_to_sclk);
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *cpu, *mmu, *fpu, *vendor, *cache;
+ uint32_t revid;
+ int cpu_num = *(unsigned int *)v;
+ u_long sclk, cclk;
+ u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
+ struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
+
+ cpu = CPU;
+ mmu = "none";
+ fpu = "none";
+ revid = bfin_revid();
+
+ sclk = get_sclk();
+ cclk = get_cclk();
+
+ switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
+ case 0xca:
+ vendor = "Analog Devices";
+ break;
+ default:
+ vendor = "unknown";
+ break;
+ }
+
+ seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
+
+ if (CPUID == bfin_cpuid())
+ seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
+ else
+ seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
+ CPUID, bfin_cpuid());
+
+ seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
+ "stepping\t: %d ",
+ cpu, cclk/1000000, sclk/1000000,
+#ifdef CONFIG_MPU
+ "mpu on",
+#else
+ "mpu off",
+#endif
+ revid);
+
+ if (bfin_revid() != bfin_compiled_revid()) {
+ if (bfin_compiled_revid() == -1)
+ seq_printf(m, "(Compiled for Rev none)");
+ else if (bfin_compiled_revid() == 0xffff)
+ seq_printf(m, "(Compiled for Rev any)");
+ else
+ seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
+ }
+
+ seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+ cclk/1000000, cclk%1000000,
+ sclk/1000000, sclk%1000000);
+ seq_printf(m, "bogomips\t: %lu.%02lu\n"
+ "Calibration\t: %lu loops\n",
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100,
+ (loops_per_jiffy * HZ));
+
+ /* Check Cache configutation */
+ switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
+ case ACACHE_BSRAM:
+ cache = "dbank-A/B\t: cache/sram";
+ dcache_size = 16;
+ dsup_banks = 1;
+ break;
+ case ACACHE_BCACHE:
+ cache = "dbank-A/B\t: cache/cache";
+ dcache_size = 32;
+ dsup_banks = 2;
+ break;
+ case ASRAM_BSRAM:
+ cache = "dbank-A/B\t: sram/sram";
+ dcache_size = 0;
+ dsup_banks = 0;
+ break;
+ default:
+ cache = "unknown";
+ dcache_size = 0;
+ dsup_banks = 0;
+ break;
+ }
+
+ /* Is it turned on? */
+ if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
+ dcache_size = 0;
+
+ if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
+ icache_size = 0;
+
+ seq_printf(m, "cache size\t: %d KB(L1 icache) "
+ "%d KB(L1 dcache) %d KB(L2 cache)\n",
+ icache_size, dcache_size, 0);
+ seq_printf(m, "%s\n", cache);
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+
+ if (icache_size)
+ seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
+ BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
+ else
+ seq_printf(m, "icache setup\t: off\n");
+
+ seq_printf(m,
+ "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
+ dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
+ BFIN_DLINES);
+#ifdef __ARCH_SYNC_CORE_DCACHE
+ seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
+#endif
+#ifdef __ARCH_SYNC_CORE_ICACHE
+ seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
+#endif
+
+ seq_printf(m, "\n");
+
+ if (cpu_num != num_possible_cpus() - 1)
+ return 0;
+
+ if (L2_LENGTH) {
+ seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+ }
+ seq_printf(m, "board name\t: %s\n", bfin_board_name);
+ seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
+ physical_mem_end >> 10, 0ul, physical_mem_end);
+ seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
+ ((int)memory_end - (int)_rambase) >> 10,
+ _rambase, memory_end);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos == 0)
+ *pos = cpumask_first(cpu_online_mask);
+ if (*pos >= num_online_cpus())
+ return NULL;
+
+ return pos;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ *pos = cpumask_next(*pos, cpu_online_mask);
+
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+void __init cmdline_init(const char *r0)
+{
+ early_shadow_stamp();
+ if (r0)
+ strncpy(command_line, r0, COMMAND_LINE_SIZE);
+}
diff --git a/arch/blackfin/kernel/shadow_console.c b/arch/blackfin/kernel/shadow_console.c
new file mode 100644
index 00000000..557e9fef
--- /dev/null
+++ b/arch/blackfin/kernel/shadow_console.c
@@ -0,0 +1,111 @@
+/*
+ * manage a small early shadow of the log buffer which we can pass between the
+ * bootloader so early crash messages are communicated properly and easily
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <asm/blackfin.h>
+#include <asm/irq_handler.h>
+#include <asm/early_printk.h>
+
+#define SHADOW_CONSOLE_START (0x500)
+#define SHADOW_CONSOLE_END (0x1000)
+#define SHADOW_CONSOLE_MAGIC_LOC (0x4F0)
+#define SHADOW_CONSOLE_MAGIC (0xDEADBEEF)
+
+static __initdata char *shadow_console_buffer = (char *)SHADOW_CONSOLE_START;
+
+__init void early_shadow_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ unsigned int i;
+ /*
+ * save 2 bytes for the double null at the end
+ * once we fail on a long line, make sure we don't write a short line afterwards
+ */
+ if ((shadow_console_buffer + n) <= (char *)(SHADOW_CONSOLE_END - 2)) {
+ /* can't use memcpy - it may not be relocated yet */
+ for (i = 0; i <= n; i++)
+ shadow_console_buffer[i] = s[i];
+ shadow_console_buffer += n;
+ shadow_console_buffer[0] = 0;
+ shadow_console_buffer[1] = 0;
+ } else
+ shadow_console_buffer = (char *)SHADOW_CONSOLE_END;
+}
+
+static __initdata struct console early_shadow_console = {
+ .name = "early_shadow",
+ .write = early_shadow_write,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
+ .index = -1,
+ .device = 0,
+};
+
+__init int shadow_console_enabled(void)
+{
+ return early_shadow_console.flags & CON_ENABLED;
+}
+
+__init void mark_shadow_error(void)
+{
+ int *loc = (int *)SHADOW_CONSOLE_MAGIC_LOC;
+ loc[0] = SHADOW_CONSOLE_MAGIC;
+ loc[1] = SHADOW_CONSOLE_START;
+}
+
+__init void enable_shadow_console(void)
+{
+ if (!shadow_console_enabled()) {
+ register_console(&early_shadow_console);
+ /* for now, assume things are going to fail */
+ mark_shadow_error();
+ }
+}
+
+static __init int disable_shadow_console(void)
+{
+ /*
+ * by the time pure_initcall runs, the standard console is enabled,
+ * and the early_console is off, so unset the magic numbers
+ * unregistering the console is taken care of in common code (See
+ * ./kernel/printk:disable_boot_consoles() )
+ */
+ int *loc = (int *)SHADOW_CONSOLE_MAGIC_LOC;
+
+ loc[0] = 0;
+
+ return 0;
+}
+pure_initcall(disable_shadow_console);
+
+/*
+ * since we can't use printk, dump numbers (as hex), n = # bits
+ */
+__init void early_shadow_reg(unsigned long reg, unsigned int n)
+{
+ /*
+ * can't use any "normal" kernel features, since thay
+ * may not be relocated to their execute address yet
+ */
+ int i;
+ char ascii[11] = " 0x";
+
+ n = n / 4;
+ reg = reg << ((8 - n) * 4);
+ n += 3;
+
+ for (i = 3; i <= n ; i++) {
+ ascii[i] = hex_asc_lo(reg >> 28);
+ reg <<= 4;
+ }
+ early_shadow_write(NULL, ascii, n);
+
+}
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
new file mode 100644
index 00000000..d536f35d
--- /dev/null
+++ b/arch/blackfin/kernel/signal.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/ptrace.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/fixed_code.h>
+#include <asm/syscall.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/* Location of the trace bit in SYSCFG. */
+#define TRACE_BITS 0x0001
+
+struct fdpic_func_descriptor {
+ unsigned long text;
+ unsigned long GOT;
+};
+
+struct rt_sigframe {
+ int sig;
+ struct siginfo *pinfo;
+ void *puc;
+ /* This is no longer needed by the kernel, but unfortunately userspace
+ * code expects it to be there. */
+ char retcode[8];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+ return do_sigaltstack(uss, uoss, rdusp());
+}
+
+static inline int
+rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *pr0)
+{
+ unsigned long usp = 0;
+ int err = 0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x)
+
+ /* restore passed registers */
+ RESTORE(r0); RESTORE(r1); RESTORE(r2); RESTORE(r3);
+ RESTORE(r4); RESTORE(r5); RESTORE(r6); RESTORE(r7);
+ RESTORE(p0); RESTORE(p1); RESTORE(p2); RESTORE(p3);
+ RESTORE(p4); RESTORE(p5);
+ err |= __get_user(usp, &sc->sc_usp);
+ wrusp(usp);
+ RESTORE(a0w); RESTORE(a1w);
+ RESTORE(a0x); RESTORE(a1x);
+ RESTORE(astat);
+ RESTORE(rets);
+ RESTORE(pc);
+ RESTORE(retx);
+ RESTORE(fp);
+ RESTORE(i0); RESTORE(i1); RESTORE(i2); RESTORE(i3);
+ RESTORE(m0); RESTORE(m1); RESTORE(m2); RESTORE(m3);
+ RESTORE(l0); RESTORE(l1); RESTORE(l2); RESTORE(l3);
+ RESTORE(b0); RESTORE(b1); RESTORE(b2); RESTORE(b3);
+ RESTORE(lc0); RESTORE(lc1);
+ RESTORE(lt0); RESTORE(lt1);
+ RESTORE(lb0); RESTORE(lb1);
+ RESTORE(seqstat);
+
+ regs->orig_p0 = -1; /* disable syscall checks */
+
+ *pr0 = regs->r0;
+ return err;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+ struct pt_regs *regs = (struct pt_regs *)__unused;
+ unsigned long usp = rdusp();
+ struct rt_sigframe *frame = (struct rt_sigframe *)(usp);
+ sigset_t set;
+ int r0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->usp) == -EFAULT)
+ goto badframe;
+
+ return r0;
+
+ badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static inline int rt_setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs)
+{
+ int err = 0;
+
+#define SETUP(x) err |= __put_user(regs->x, &sc->sc_##x)
+
+ SETUP(r0); SETUP(r1); SETUP(r2); SETUP(r3);
+ SETUP(r4); SETUP(r5); SETUP(r6); SETUP(r7);
+ SETUP(p0); SETUP(p1); SETUP(p2); SETUP(p3);
+ SETUP(p4); SETUP(p5);
+ err |= __put_user(rdusp(), &sc->sc_usp);
+ SETUP(a0w); SETUP(a1w);
+ SETUP(a0x); SETUP(a1x);
+ SETUP(astat);
+ SETUP(rets);
+ SETUP(pc);
+ SETUP(retx);
+ SETUP(fp);
+ SETUP(i0); SETUP(i1); SETUP(i2); SETUP(i3);
+ SETUP(m0); SETUP(m1); SETUP(m2); SETUP(m3);
+ SETUP(l0); SETUP(l1); SETUP(l2); SETUP(l3);
+ SETUP(b0); SETUP(b1); SETUP(b2); SETUP(b3);
+ SETUP(lc0); SETUP(lc1);
+ SETUP(lt0); SETUP(lt1);
+ SETUP(lb0); SETUP(lb1);
+ SETUP(seqstat);
+
+ return err;
+}
+
+static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = rdusp();
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!on_sig_stack(usp))
+ usp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void *)((usp - frame_size) & -8UL);
+}
+
+static int
+setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
+ sigset_t * set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ err |= __put_user((current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->
+ signal_invmap[sig] : sig), &frame->sig);
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |=
+ __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(rdusp()), &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= rt_setup_sigcontext(&frame->uc.uc_mcontext, regs);
+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ wrusp((unsigned long)frame);
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor *) ka->sa.sa_handler;
+ __get_user(regs->pc, &funcptr->text);
+ __get_user(regs->p3, &funcptr->GOT);
+ } else
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+ regs->rets = SIGRETURN_STUB;
+
+ regs->r0 = frame->sig;
+ regs->r1 = (unsigned long)(&frame->info);
+ regs->r2 = (unsigned long)(&frame->uc);
+
+ return 0;
+
+ give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->r0) {
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->r0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ do_restart:
+ regs->p0 = regs->orig_p0;
+ regs->r0 = regs->orig_r0;
+ regs->pc -= 2;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->p0 = __NR_restart_syscall;
+ regs->pc -= 2;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static int
+handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ int ret;
+
+ /* are we from a system call? to see pt_regs->orig_p0 */
+ if (regs->orig_p0 >= 0)
+ /* If so, check system call restarting.. */
+ handle_restart(regs, ka, 1);
+
+ /* set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ret == 0) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+ return ret;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals
+ * that the kernel can handle, and then we build all the user-level signal
+ * handling stack-frames in one go after that.
+ */
+asmlinkage void do_signal(struct pt_regs *regs)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+ sigset_t *oldset;
+
+ current->thread.esp0 = (unsigned long)regs;
+
+ if (try_to_freeze())
+ goto no_signal;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+ }
+
+ return;
+ }
+
+ no_signal:
+ /* Did we come from a system call? */
+ if (regs->orig_p0 >= 0)
+ /* Restart the system call - no handlers present */
+ handle_restart(regs, NULL, 0);
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+}
+
+/*
+ * notification of userspace execution resumption
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
+ do_signal(regs);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
+
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c
new file mode 100644
index 00000000..30301e1e
--- /dev/null
+++ b/arch/blackfin/kernel/stacktrace.c
@@ -0,0 +1,53 @@
+/*
+ * Blackfin stacktrace code (mostly copied from avr32)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+register unsigned long current_frame_pointer asm("FP");
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+};
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long low, high;
+ unsigned long fp;
+ struct stackframe *frame;
+ int skip = trace->skip;
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+ fp = current_frame_pointer;
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+
+ if (skip) {
+ skip--;
+ } else {
+ trace->entries[trace->nr_entries++] = frame->rets;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+
+ /*
+ * The next frame must be at a higher address than the
+ * current frame.
+ */
+ low = fp + sizeof(*frame);
+ fp = frame->fp;
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
new file mode 100644
index 00000000..d998383c
--- /dev/null
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -0,0 +1,88 @@
+/*
+ * contains various random system calls that have a non-standard
+ * calling sequence on the Linux/Blackfin platform.
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/spinlock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/ipc.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/cachectl.h>
+#include <asm/ptrace.h>
+
+asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
+{
+ return sram_alloc_with_lsl(size, flags);
+}
+
+asmlinkage int sys_sram_free(const void *addr)
+{
+ return sram_free_with_lsl(addr);
+}
+
+asmlinkage void *sys_dma_memcpy(void *dest, const void *src, size_t len)
+{
+ return safe_dma_memcpy(dest, src, len);
+}
+
+#if defined(CONFIG_FB) || defined(CONFIG_FB_MODULE)
+#include <linux/fb.h>
+#include <linux/export.h>
+unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct fb_info *info = filp->private_data;
+ return (unsigned long)info->screen_base;
+}
+EXPORT_SYMBOL(get_fb_unmapped_area);
+#endif
+
+/* Needed for legacy userspace atomic emulation */
+static DEFINE_SPINLOCK(bfin_spinlock_lock);
+
+#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
+__attribute__((l1_text))
+#endif
+asmlinkage int sys_bfin_spinlock(int *p)
+{
+ int ret, tmp = 0;
+
+ spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
+ ret = get_user(tmp, p);
+ if (likely(ret == 0)) {
+ if (unlikely(tmp))
+ ret = 1;
+ else
+ put_user(1, p);
+ }
+ spin_unlock(&bfin_spinlock_lock);
+
+ return ret;
+}
+
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, int, op)
+{
+ if (is_user_addr_valid(current, addr, len) != 0)
+ return -EINVAL;
+
+ if (op & DCACHE)
+ blackfin_dcache_flush_range(addr, addr + len);
+ if (op & ICACHE)
+ blackfin_icache_flush_range(addr, addr + len);
+
+ return 0;
+}
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
new file mode 100644
index 00000000..d98f2d69
--- /dev/null
+++ b/arch/blackfin/kernel/time-ts.c
@@ -0,0 +1,375 @@
+/*
+ * Based on arm clockevents implementation and old bfin time tick.
+ *
+ * Copyright 2008-2009 Analog Devics Inc.
+ * 2008 GeoTechnologies
+ * Vitja Makarov
+ *
+ * Licensed under the GPL-2
+ */
+
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpufreq.h>
+
+#include <asm/blackfin.h>
+#include <asm/time.h>
+#include <asm/gptimers.h>
+#include <asm/nmi.h>
+
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+
+static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
+{
+#ifdef CONFIG_CPU_FREQ
+ return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
+#else
+ return get_cycles();
+#endif
+}
+
+static struct clocksource bfin_cs_cycles = {
+ .name = "bfin_cs_cycles",
+ .rating = 400,
+ .read = bfin_read_cycles,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static inline unsigned long long bfin_cs_cycles_sched_clock(void)
+{
+ return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
+ bfin_cs_cycles.mult, bfin_cs_cycles.shift);
+}
+
+static int __init bfin_cs_cycles_init(void)
+{
+ if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
+ panic("failed to register clocksource");
+
+ return 0;
+}
+#else
+# define bfin_cs_cycles_init()
+#endif
+
+#ifdef CONFIG_GPTMR0_CLOCKSOURCE
+
+void __init setup_gptimer0(void)
+{
+ disable_gptimers(TIMER0bit);
+
+ set_gptimer_config(TIMER0_id, \
+ TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
+ set_gptimer_period(TIMER0_id, -1);
+ set_gptimer_pwidth(TIMER0_id, -2);
+ SSYNC();
+ enable_gptimers(TIMER0bit);
+}
+
+static cycle_t bfin_read_gptimer0(struct clocksource *cs)
+{
+ return bfin_read_TIMER0_COUNTER();
+}
+
+static struct clocksource bfin_cs_gptimer0 = {
+ .name = "bfin_cs_gptimer0",
+ .rating = 350,
+ .read = bfin_read_gptimer0,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
+{
+ return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
+ bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
+}
+
+static int __init bfin_cs_gptimer0_init(void)
+{
+ setup_gptimer0();
+
+ if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
+ panic("failed to register clocksource");
+
+ return 0;
+}
+#else
+# define bfin_cs_gptimer0_init()
+#endif
+
+#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
+/* prefer to use cycles since it has higher rating */
+notrace unsigned long long sched_clock(void)
+{
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ return bfin_cs_cycles_sched_clock();
+#else
+ return bfin_cs_gptimer0_sched_clock();
+#endif
+}
+#endif
+
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
+static int bfin_gptmr0_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ disable_gptimers(TIMER0bit);
+
+ /* it starts counting three SCLK cycles after the TIMENx bit is set */
+ set_gptimer_pwidth(TIMER0_id, cycles - 3);
+ enable_gptimers(TIMER0bit);
+ return 0;
+}
+
+static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC: {
+ set_gptimer_config(TIMER0_id, \
+ TIMER_OUT_DIS | TIMER_IRQ_ENA | \
+ TIMER_PERIOD_CNT | TIMER_MODE_PWM);
+ set_gptimer_period(TIMER0_id, get_sclk() / HZ);
+ set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
+ enable_gptimers(TIMER0bit);
+ break;
+ }
+ case CLOCK_EVT_MODE_ONESHOT:
+ disable_gptimers(TIMER0bit);
+ set_gptimer_config(TIMER0_id, \
+ TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
+ set_gptimer_period(TIMER0_id, 0);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ disable_gptimers(TIMER0bit);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static void bfin_gptmr0_ack(void)
+{
+ set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
+}
+
+static void __init bfin_gptmr0_init(void)
+{
+ disable_gptimers(TIMER0bit);
+}
+
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ smp_mb();
+ /*
+ * We want to ACK before we handle so that we can handle smaller timer
+ * intervals. This way if the timer expires again while we're handling
+ * things, we're more likely to see that 2nd int rather than swallowing
+ * it by ACKing the int at the end of this handler.
+ */
+ bfin_gptmr0_ack();
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction gptmr0_irq = {
+ .name = "Blackfin GPTimer0",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
+ .handler = bfin_gptmr0_interrupt,
+};
+
+static struct clock_event_device clockevent_gptmr0 = {
+ .name = "bfin_gptimer0",
+ .rating = 300,
+ .irq = IRQ_TIMER0,
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = bfin_gptmr0_set_next_event,
+ .set_mode = bfin_gptmr0_set_mode,
+};
+
+static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
+{
+ unsigned long clock_tick;
+
+ clock_tick = get_sclk();
+ evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(-1, evt);
+ evt->min_delta_ns = clockevent_delta2ns(100, evt);
+
+ evt->cpumask = cpumask_of(0);
+
+ clockevents_register_device(evt);
+}
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+
+#if defined(CONFIG_TICKSOURCE_CORETMR)
+/* per-cpu local core timer */
+DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
+
+static int bfin_coretmr_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+ bfin_write_TCOUNT(cycles);
+ CSYNC();
+ bfin_write_TCNTL(TMPWR | TMREN);
+ return 0;
+}
+
+static void bfin_coretmr_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC: {
+ unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+ bfin_write_TSCALE(TIME_SCALE - 1);
+ bfin_write_TPERIOD(tcount);
+ bfin_write_TCOUNT(tcount);
+ CSYNC();
+ bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
+ break;
+ }
+ case CLOCK_EVT_MODE_ONESHOT:
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+ bfin_write_TSCALE(TIME_SCALE - 1);
+ bfin_write_TPERIOD(0);
+ bfin_write_TCOUNT(0);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ bfin_write_TCNTL(0);
+ CSYNC();
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+void bfin_coretmr_init(void)
+{
+ /* power up the timer, but don't enable it just yet */
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+
+ /* the TSCALE prescaler counter. */
+ bfin_write_TSCALE(TIME_SCALE - 1);
+ bfin_write_TPERIOD(0);
+ bfin_write_TCOUNT(0);
+
+ CSYNC();
+}
+
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+
+irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+
+ smp_mb();
+ evt->event_handler(evt);
+
+ touch_nmi_watchdog();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction coretmr_irq = {
+ .name = "Blackfin CoreTimer",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
+ .handler = bfin_coretmr_interrupt,
+};
+
+void bfin_coretmr_clockevent_init(void)
+{
+ unsigned long clock_tick;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+
+#ifdef CONFIG_SMP
+ evt->broadcast = smp_timer_broadcast;
+#endif
+
+
+ evt->name = "bfin_core_timer";
+ evt->rating = 350;
+ evt->irq = -1;
+ evt->shift = 32;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->set_next_event = bfin_coretmr_set_next_event;
+ evt->set_mode = bfin_coretmr_set_mode;
+
+ clock_tick = get_cclk() / TIME_SCALE;
+ evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(-1, evt);
+ evt->min_delta_ns = clockevent_delta2ns(100, evt);
+
+ evt->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(evt);
+}
+#endif /* CONFIG_TICKSOURCE_CORETMR */
+
+
+void read_persistent_clock(struct timespec *ts)
+{
+ time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
+
+#ifdef CONFIG_RTC_DRV_BFIN
+ /* [#2663] hack to filter junk RTC values that would cause
+ * userspace to have to deal with time values greater than
+ * 2^31 seconds (which uClibc cannot cope with yet)
+ */
+ if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
+ printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
+ bfin_write_RTC_STAT(0);
+ }
+#endif
+
+ bfin_cs_cycles_init();
+ bfin_cs_gptimer0_init();
+
+#if defined(CONFIG_TICKSOURCE_CORETMR)
+ bfin_coretmr_init();
+ setup_irq(IRQ_CORETMR, &coretmr_irq);
+ bfin_coretmr_clockevent_init();
+#endif
+
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
+ bfin_gptmr0_init();
+ setup_irq(IRQ_TIMER0, &gptmr0_irq);
+ gptmr0_irq.dev_id = &clockevent_gptmr0;
+ bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
+#endif
+
+#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
+# error at least one clock event device is required
+#endif
+}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
new file mode 100644
index 00000000..2310b249
--- /dev/null
+++ b/arch/blackfin/kernel/time.c
@@ -0,0 +1,156 @@
+/*
+ * arch/blackfin/kernel/time.c
+ *
+ * This file contains the Blackfin-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <asm/blackfin.h>
+#include <asm/time.h>
+#include <asm/gptimers.h>
+
+/* This is an NTP setting */
+#define TICK_SIZE (tick_nsec / 1000)
+
+static struct irqaction bfin_timer_irq = {
+ .name = "Blackfin Timer Tick",
+};
+
+#if defined(CONFIG_IPIPE)
+void __init setup_system_timer0(void)
+{
+ /* Power down the core timer, just to play safe. */
+ bfin_write_TCNTL(0);
+
+ disable_gptimers(TIMER0bit);
+ set_gptimer_status(0, TIMER_STATUS_TRUN0);
+ while (get_gptimer_status(0) & TIMER_STATUS_TRUN0)
+ udelay(10);
+
+ set_gptimer_config(0, 0x59); /* IRQ enable, periodic, PWM_OUT, SCLKed, OUT PAD disabled */
+ set_gptimer_period(TIMER0_id, get_sclk() / HZ);
+ set_gptimer_pwidth(TIMER0_id, 1);
+ SSYNC();
+ enable_gptimers(TIMER0bit);
+}
+#else
+void __init setup_core_timer(void)
+{
+ u32 tcount;
+
+ /* power up the timer, but don't enable it just yet */
+ bfin_write_TCNTL(TMPWR);
+ CSYNC();
+
+ /* the TSCALE prescaler counter */
+ bfin_write_TSCALE(TIME_SCALE - 1);
+
+ tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
+ bfin_write_TPERIOD(tcount);
+ bfin_write_TCOUNT(tcount);
+
+ /* now enable the timer */
+ CSYNC();
+
+ bfin_write_TCNTL(TAUTORLD | TMREN | TMPWR);
+}
+#endif
+
+static void __init
+time_sched_init(irqreturn_t(*timer_routine) (int, void *))
+{
+#if defined(CONFIG_IPIPE)
+ setup_system_timer0();
+ bfin_timer_irq.handler = timer_routine;
+ setup_irq(IRQ_TIMER0, &bfin_timer_irq);
+#else
+ setup_core_timer();
+ bfin_timer_irq.handler = timer_routine;
+ setup_irq(IRQ_CORETMR, &bfin_timer_irq);
+#endif
+}
+
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+/*
+ * Should return useconds since last timer tick
+ */
+u32 arch_gettimeoffset(void)
+{
+ unsigned long offset;
+ unsigned long clocks_per_jiffy;
+
+#if defined(CONFIG_IPIPE)
+ clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
+ offset = bfin_read_TIMER0_COUNTER() / \
+ (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
+
+ if ((get_gptimer_status(0) & TIMER_STATUS_TIMIL0) && offset < (100000 / HZ / 2))
+ offset += (USEC_PER_SEC / HZ);
+#else
+ clocks_per_jiffy = bfin_read_TPERIOD();
+ offset = (clocks_per_jiffy - bfin_read_TCOUNT()) / \
+ (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
+
+ /* Check if we just wrapped the counters and maybe missed a tick */
+ if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
+ && (offset < (100000 / HZ / 2)))
+ offset += (USEC_PER_SEC / HZ);
+#endif
+ return offset;
+}
+#endif
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "xtime_update()" routine every clocktick
+ */
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+irqreturn_t timer_interrupt(int irq, void *dummy)
+{
+ xtime_update(1);
+
+#ifdef CONFIG_IPIPE
+ update_root_process_times(get_irq_regs());
+#else
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+ profile_tick(CPU_PROFILING);
+
+ return IRQ_HANDLED;
+}
+
+void read_persistent_clock(struct timespec *ts)
+{
+ time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
+#ifdef CONFIG_RTC_DRV_BFIN
+ /* [#2663] hack to filter junk RTC values that would cause
+ * userspace to have to deal with time values greater than
+ * 2^31 seconds (which uClibc cannot cope with yet)
+ */
+ if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
+ printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
+ bfin_write_RTC_STAT(0);
+ }
+#endif
+
+ time_sched_init(timer_interrupt);
+}
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
new file mode 100644
index 00000000..44bbf2f5
--- /dev/null
+++ b/arch/blackfin/kernel/trace.c
@@ -0,0 +1,984 @@
+/* provide some functions which dump the trace buffer, in a nice way for people
+ * to read it, and understand what is going on
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <asm/dma.h>
+#include <asm/trace.h>
+#include <asm/fixed_code.h>
+#include <asm/traps.h>
+#include <asm/irq_handler.h>
+#include <asm/pda.h>
+
+void decode_address(char *buf, unsigned long address)
+{
+ struct task_struct *p;
+ struct mm_struct *mm;
+ unsigned long flags, offset;
+ unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+ struct rb_node *n;
+
+#ifdef CONFIG_KALLSYMS
+ unsigned long symsize;
+ const char *symname;
+ char *modname;
+ char *delim = ":";
+ char namebuf[128];
+#endif
+
+ buf += sprintf(buf, "<0x%08lx> ", address);
+
+#ifdef CONFIG_KALLSYMS
+ /* look up the address and see if we are in kernel space */
+ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
+
+ if (symname) {
+ /* yeah! kernel space! */
+ if (!modname)
+ modname = delim = "";
+ sprintf(buf, "{ %s%s%s%s + 0x%lx }",
+ delim, modname, delim, symname,
+ (unsigned long)offset);
+ return;
+ }
+#endif
+
+ if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
+ /* Problem in fixed code section? */
+ strcat(buf, "/* Maybe fixed code section */");
+ return;
+
+ } else if (address < CONFIG_BOOT_LOAD) {
+ /* Problem somewhere before the kernel start address */
+ strcat(buf, "/* Maybe null pointer? */");
+ return;
+
+ } else if (address >= COREMMR_BASE) {
+ strcat(buf, "/* core mmrs */");
+ return;
+
+ } else if (address >= SYSMMR_BASE) {
+ strcat(buf, "/* system mmrs */");
+ return;
+
+ } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
+ strcat(buf, "/* on-chip L1 ROM */");
+ return;
+
+ } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) {
+ strcat(buf, "/* on-chip scratchpad */");
+ return;
+
+ } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) {
+ strcat(buf, "/* unconnected memory */");
+ return;
+
+ } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) {
+ strcat(buf, "/* reserved memory */");
+ return;
+
+ } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) {
+ strcat(buf, "/* on-chip Data Bank A */");
+ return;
+
+ } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) {
+ strcat(buf, "/* on-chip Data Bank B */");
+ return;
+ }
+
+ /*
+ * Don't walk any of the vmas if we are oopsing, it has been known
+ * to cause problems - corrupt vmas (kernel crashes) cause double faults
+ */
+ if (oops_in_progress) {
+ strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
+ return;
+ }
+
+ /* looks like we're off in user-land, so let's walk all the
+ * mappings of all our processes and see if we can't be a whee
+ * bit more specific
+ */
+ write_lock_irqsave(&tasklist_lock, flags);
+ for_each_process(p) {
+ mm = (in_atomic ? p->mm : get_task_mm(p));
+ if (!mm)
+ continue;
+
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!in_atomic)
+ mmput(mm);
+ continue;
+ }
+
+ for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+ struct vm_area_struct *vma;
+
+ vma = rb_entry(n, struct vm_area_struct, vm_rb);
+
+ if (address >= vma->vm_start && address < vma->vm_end) {
+ char _tmpbuf[256];
+ char *name = p->comm;
+ struct file *file = vma->vm_file;
+
+ if (file) {
+ char *d_name = d_path(&file->f_path, _tmpbuf,
+ sizeof(_tmpbuf));
+ if (!IS_ERR(d_name))
+ name = d_name;
+ }
+
+ /* FLAT does not have its text aligned to the start of
+ * the map while FDPIC ELF does ...
+ */
+
+ /* before we can check flat/fdpic, we need to
+ * make sure current is valid
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3)) {
+ if (current->mm &&
+ (address > current->mm->start_code) &&
+ (address < current->mm->end_code))
+ offset = address - current->mm->start_code;
+ else
+ offset = (address - vma->vm_start) +
+ (vma->vm_pgoff << PAGE_SHIFT);
+
+ sprintf(buf, "[ %s + 0x%lx ]", name, offset);
+ } else
+ sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
+ name, vma->vm_start, vma->vm_end);
+
+ up_read(&mm->mmap_sem);
+ if (!in_atomic)
+ mmput(mm);
+
+ if (buf[0] == '\0')
+ sprintf(buf, "[ %s ] dynamic memory", name);
+
+ goto done;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ if (!in_atomic)
+ mmput(mm);
+ }
+
+ /*
+ * we were unable to find this address anywhere,
+ * or some MMs were skipped because they were in use.
+ */
+ sprintf(buf, "/* kernel dynamic memory */");
+
+done:
+ write_unlock_irqrestore(&tasklist_lock, flags);
+}
+
+#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
+
+/*
+ * Similar to get_user, do some address checking, then dereference
+ * Return true on success, false on bad address
+ */
+bool get_mem16(unsigned short *val, unsigned short *address)
+{
+ unsigned long addr = (unsigned long)address;
+
+ /* Check for odd addresses */
+ if (addr & 0x1)
+ return false;
+
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
+ }
+}
+
+bool get_instruction(unsigned int *val, unsigned short *address)
+{
+ unsigned long addr = (unsigned long)address;
+ unsigned short opcode0, opcode1;
+
+ /* Check for odd addresses */
+ if (addr & 0x1)
+ return false;
+
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
+ return false;
+
+ /* Scratchpad will never have instructions */
+ if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH)
+ return false;
+
+ /* Data banks will never have instructions */
+ if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START)
+ return false;
+
+ if (!get_mem16(&opcode0, address))
+ return false;
+
+ /* was this a 32-bit instruction? If so, get the next 16 bits */
+ if ((opcode0 & 0xc000) == 0xc000) {
+ if (!get_mem16(&opcode1, address + 1))
+ return false;
+ *val = (opcode0 << 16) + opcode1;
+ } else
+ *val = opcode0;
+
+ return true;
+}
+
+#if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
+/*
+ * decode the instruction if we are printing out the trace, as it
+ * makes things easier to follow, without running it through objdump
+ * Decode the change of flow, and the common load/store instructions
+ * which are the main cause for faults, and discontinuities in the trace
+ * buffer.
+ */
+
+#define ProgCtrl_opcode 0x0000
+#define ProgCtrl_poprnd_bits 0
+#define ProgCtrl_poprnd_mask 0xf
+#define ProgCtrl_prgfunc_bits 4
+#define ProgCtrl_prgfunc_mask 0xf
+#define ProgCtrl_code_bits 8
+#define ProgCtrl_code_mask 0xff
+
+static void decode_ProgCtrl_0(unsigned int opcode)
+{
+ int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask);
+ int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask);
+
+ if (prgfunc == 0 && poprnd == 0)
+ pr_cont("NOP");
+ else if (prgfunc == 1 && poprnd == 0)
+ pr_cont("RTS");
+ else if (prgfunc == 1 && poprnd == 1)
+ pr_cont("RTI");
+ else if (prgfunc == 1 && poprnd == 2)
+ pr_cont("RTX");
+ else if (prgfunc == 1 && poprnd == 3)
+ pr_cont("RTN");
+ else if (prgfunc == 1 && poprnd == 4)
+ pr_cont("RTE");
+ else if (prgfunc == 2 && poprnd == 0)
+ pr_cont("IDLE");
+ else if (prgfunc == 2 && poprnd == 3)
+ pr_cont("CSYNC");
+ else if (prgfunc == 2 && poprnd == 4)
+ pr_cont("SSYNC");
+ else if (prgfunc == 2 && poprnd == 5)
+ pr_cont("EMUEXCPT");
+ else if (prgfunc == 3)
+ pr_cont("CLI R%i", poprnd);
+ else if (prgfunc == 4)
+ pr_cont("STI R%i", poprnd);
+ else if (prgfunc == 5)
+ pr_cont("JUMP (P%i)", poprnd);
+ else if (prgfunc == 6)
+ pr_cont("CALL (P%i)", poprnd);
+ else if (prgfunc == 7)
+ pr_cont("CALL (PC + P%i)", poprnd);
+ else if (prgfunc == 8)
+ pr_cont("JUMP (PC + P%i", poprnd);
+ else if (prgfunc == 9)
+ pr_cont("RAISE %i", poprnd);
+ else if (prgfunc == 10)
+ pr_cont("EXCPT %i", poprnd);
+ else
+ pr_cont("0x%04x", opcode);
+
+}
+
+#define BRCC_opcode 0x1000
+#define BRCC_offset_bits 0
+#define BRCC_offset_mask 0x3ff
+#define BRCC_B_bits 10
+#define BRCC_B_mask 0x1
+#define BRCC_T_bits 11
+#define BRCC_T_mask 0x1
+#define BRCC_code_bits 12
+#define BRCC_code_mask 0xf
+
+static void decode_BRCC_0(unsigned int opcode)
+{
+ int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask);
+ int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask);
+
+ pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : "");
+}
+
+#define CALLa_opcode 0xe2000000
+#define CALLa_addr_bits 0
+#define CALLa_addr_mask 0xffffff
+#define CALLa_S_bits 24
+#define CALLa_S_mask 0x1
+#define CALLa_code_bits 25
+#define CALLa_code_mask 0x7f
+
+static void decode_CALLa_0(unsigned int opcode)
+{
+ int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask);
+
+ if (S)
+ pr_cont("CALL pcrel");
+ else
+ pr_cont("JUMP.L");
+}
+
+#define LoopSetup_opcode 0xe0800000
+#define LoopSetup_eoffset_bits 0
+#define LoopSetup_eoffset_mask 0x3ff
+#define LoopSetup_dontcare_bits 10
+#define LoopSetup_dontcare_mask 0x3
+#define LoopSetup_reg_bits 12
+#define LoopSetup_reg_mask 0xf
+#define LoopSetup_soffset_bits 16
+#define LoopSetup_soffset_mask 0xf
+#define LoopSetup_c_bits 20
+#define LoopSetup_c_mask 0x1
+#define LoopSetup_rop_bits 21
+#define LoopSetup_rop_mask 0x3
+#define LoopSetup_code_bits 23
+#define LoopSetup_code_mask 0x1ff
+
+static void decode_LoopSetup_0(unsigned int opcode)
+{
+ int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask);
+ int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask);
+ int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask);
+
+ pr_cont("LSETUP <> LC%i", c);
+ if ((rop & 1) == 1)
+ pr_cont("= P%i", reg);
+ if ((rop & 2) == 2)
+ pr_cont(" >> 0x1");
+}
+
+#define DspLDST_opcode 0x9c00
+#define DspLDST_reg_bits 0
+#define DspLDST_reg_mask 0x7
+#define DspLDST_i_bits 3
+#define DspLDST_i_mask 0x3
+#define DspLDST_m_bits 5
+#define DspLDST_m_mask 0x3
+#define DspLDST_aop_bits 7
+#define DspLDST_aop_mask 0x3
+#define DspLDST_W_bits 9
+#define DspLDST_W_mask 0x1
+#define DspLDST_code_bits 10
+#define DspLDST_code_mask 0x3f
+
+static void decode_dspLDST_0(unsigned int opcode)
+{
+ int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask);
+ int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask);
+ int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask);
+ int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask);
+ int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask);
+
+ if (W == 0) {
+ pr_cont("R%i", reg);
+ switch (m) {
+ case 0:
+ pr_cont(" = ");
+ break;
+ case 1:
+ pr_cont(".L = ");
+ break;
+ case 2:
+ pr_cont(".W = ");
+ break;
+ }
+ }
+
+ pr_cont("[ I%i", i);
+
+ switch (aop) {
+ case 0:
+ pr_cont("++ ]");
+ break;
+ case 1:
+ pr_cont("-- ]");
+ break;
+ }
+
+ if (W == 1) {
+ pr_cont(" = R%i", reg);
+ switch (m) {
+ case 1:
+ pr_cont(".L = ");
+ break;
+ case 2:
+ pr_cont(".W = ");
+ break;
+ }
+ }
+}
+
+#define LDST_opcode 0x9000
+#define LDST_reg_bits 0
+#define LDST_reg_mask 0x7
+#define LDST_ptr_bits 3
+#define LDST_ptr_mask 0x7
+#define LDST_Z_bits 6
+#define LDST_Z_mask 0x1
+#define LDST_aop_bits 7
+#define LDST_aop_mask 0x3
+#define LDST_W_bits 9
+#define LDST_W_mask 0x1
+#define LDST_sz_bits 10
+#define LDST_sz_mask 0x3
+#define LDST_code_bits 12
+#define LDST_code_mask 0xf
+
+static void decode_LDST_0(unsigned int opcode)
+{
+ int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask);
+ int W = ((opcode >> LDST_W_bits) & LDST_W_mask);
+ int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask);
+ int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask);
+ int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask);
+ int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask);
+
+ if (W == 0)
+ pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+ switch (sz) {
+ case 1:
+ pr_cont("W");
+ break;
+ case 2:
+ pr_cont("B");
+ break;
+ }
+
+ pr_cont("[P%i", ptr);
+
+ switch (aop) {
+ case 0:
+ pr_cont("++");
+ break;
+ case 1:
+ pr_cont("--");
+ break;
+ }
+ pr_cont("]");
+
+ if (W == 1)
+ pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+ if (sz) {
+ if (Z)
+ pr_cont(" (X)");
+ else
+ pr_cont(" (Z)");
+ }
+}
+
+#define LDSTii_opcode 0xa000
+#define LDSTii_reg_bit 0
+#define LDSTii_reg_mask 0x7
+#define LDSTii_ptr_bit 3
+#define LDSTii_ptr_mask 0x7
+#define LDSTii_offset_bit 6
+#define LDSTii_offset_mask 0xf
+#define LDSTii_op_bit 10
+#define LDSTii_op_mask 0x3
+#define LDSTii_W_bit 12
+#define LDSTii_W_mask 0x1
+#define LDSTii_code_bit 13
+#define LDSTii_code_mask 0x7
+
+static void decode_LDSTii_0(unsigned int opcode)
+{
+ int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask);
+ int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask);
+ int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask);
+ int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask);
+ int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask);
+
+ if (W == 0) {
+ pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg,
+ op == 1 || op == 2 ? "" : "W", ptr, offset);
+ if (op == 2)
+ pr_cont("(Z)");
+ if (op == 3)
+ pr_cont("(X)");
+ } else {
+ pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr,
+ offset, op == 3 ? "P" : "R", reg);
+ }
+}
+
+#define LDSTidxI_opcode 0xe4000000
+#define LDSTidxI_offset_bits 0
+#define LDSTidxI_offset_mask 0xffff
+#define LDSTidxI_reg_bits 16
+#define LDSTidxI_reg_mask 0x7
+#define LDSTidxI_ptr_bits 19
+#define LDSTidxI_ptr_mask 0x7
+#define LDSTidxI_sz_bits 22
+#define LDSTidxI_sz_mask 0x3
+#define LDSTidxI_Z_bits 24
+#define LDSTidxI_Z_mask 0x1
+#define LDSTidxI_W_bits 25
+#define LDSTidxI_W_mask 0x1
+#define LDSTidxI_code_bits 26
+#define LDSTidxI_code_mask 0x3f
+
+static void decode_LDSTidxI_0(unsigned int opcode)
+{
+ int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask);
+ int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask);
+ int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask);
+ int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask);
+ int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask);
+ int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask);
+
+ if (W == 0)
+ pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg);
+
+ if (sz == 1)
+ pr_cont("W");
+ if (sz == 2)
+ pr_cont("B");
+
+ pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "",
+ (offset & 0x1f) << 2);
+
+ if (W == 0 && sz != 0) {
+ if (Z)
+ pr_cont("(X)");
+ else
+ pr_cont("(Z)");
+ }
+
+ if (W == 1)
+ pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+}
+
+static void decode_opcode(unsigned int opcode)
+{
+#ifdef CONFIG_BUG
+ if (opcode == BFIN_BUG_OPCODE)
+ pr_cont("BUG");
+ else
+#endif
+ if ((opcode & 0xffffff00) == ProgCtrl_opcode)
+ decode_ProgCtrl_0(opcode);
+ else if ((opcode & 0xfffff000) == BRCC_opcode)
+ decode_BRCC_0(opcode);
+ else if ((opcode & 0xfffff000) == 0x2000)
+ pr_cont("JUMP.S");
+ else if ((opcode & 0xfe000000) == CALLa_opcode)
+ decode_CALLa_0(opcode);
+ else if ((opcode & 0xff8000C0) == LoopSetup_opcode)
+ decode_LoopSetup_0(opcode);
+ else if ((opcode & 0xfffffc00) == DspLDST_opcode)
+ decode_dspLDST_0(opcode);
+ else if ((opcode & 0xfffff000) == LDST_opcode)
+ decode_LDST_0(opcode);
+ else if ((opcode & 0xffffe000) == LDSTii_opcode)
+ decode_LDSTii_0(opcode);
+ else if ((opcode & 0xfc000000) == LDSTidxI_opcode)
+ decode_LDSTidxI_0(opcode);
+ else if (opcode & 0xffff0000)
+ pr_cont("0x%08x", opcode);
+ else
+ pr_cont("0x%04x", opcode);
+}
+
+#define BIT_MULTI_INS 0x08000000
+static void decode_instruction(unsigned short *address)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, address))
+ return;
+
+ decode_opcode(opcode);
+
+ /* If things are a 32-bit instruction, it has the possibility of being
+ * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
+ * This test collidates with the unlink instruction, so disallow that
+ */
+ if ((opcode & 0xc0000000) == 0xc0000000 &&
+ (opcode & BIT_MULTI_INS) &&
+ (opcode & 0xe8000000) != 0xe8000000) {
+ pr_cont(" || ");
+ if (!get_instruction(&opcode, address + 2))
+ return;
+ decode_opcode(opcode);
+ pr_cont(" || ");
+ if (!get_instruction(&opcode, address + 3))
+ return;
+ decode_opcode(opcode);
+ }
+}
+#endif
+
+void dump_bfin_trace_buffer(void)
+{
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int tflags, i = 0, fault = 0;
+ char buf[150];
+ unsigned short *addr;
+ unsigned int cpu = raw_smp_processor_id();
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ int j, index;
+#endif
+
+ trace_buffer_save(tflags);
+
+ pr_notice("Hardware Trace:\n");
+
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
+#endif
+
+ if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
+ for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice("%4i Target : %s\n", i, buf);
+ /* Normally, the faulting instruction doesn't go into
+ * the trace buffer, (since it doesn't commit), so
+ * we print out the fault address here
+ */
+ if (!fault && addr == ((unsigned short *)evt_ivhw)) {
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice(" FAULT : %s ", buf);
+ decode_instruction(addr);
+ pr_cont("\n");
+ fault = 1;
+ continue;
+ }
+ if (!fault && addr == (unsigned short *)trap &&
+ (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
+ decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
+ pr_notice(" FAULT : %s ", buf);
+ decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
+ pr_cont("\n");
+ fault = 1;
+ }
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice(" Source : %s ", buf);
+ decode_instruction(addr);
+ pr_cont("\n");
+ }
+ }
+
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ if (trace_buff_offset)
+ index = trace_buff_offset / 4;
+ else
+ index = EXPAND_LEN;
+
+ j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
+ while (j) {
+ decode_address(buf, software_trace_buff[index]);
+ pr_notice("%4i Target : %s\n", i, buf);
+ index -= 1;
+ if (index < 0)
+ index = EXPAND_LEN;
+ decode_address(buf, software_trace_buff[index]);
+ pr_notice(" Source : %s ", buf);
+ decode_instruction((unsigned short *)software_trace_buff[index]);
+ pr_cont("\n");
+ index -= 1;
+ if (index < 0)
+ index = EXPAND_LEN;
+ j--;
+ i++;
+ }
+#endif
+
+ trace_buffer_restore(tflags);
+#endif
+}
+EXPORT_SYMBOL(dump_bfin_trace_buffer);
+
+void dump_bfin_process(struct pt_regs *fp)
+{
+ /* We should be able to look at fp->ipend, but we don't push it on the
+ * stack all the time, so do this until we fix that */
+ unsigned int context = bfin_read_IPEND();
+
+ if (oops_in_progress)
+ pr_emerg("Kernel OOPS in progress\n");
+
+ if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
+ pr_notice("HW Error context\n");
+ else if (context & 0x0020)
+ pr_notice("Deferred Exception context\n");
+ else if (context & 0x3FC0)
+ pr_notice("Interrupt context\n");
+ else if (context & 0x4000)
+ pr_notice("Deferred Interrupt context\n");
+ else if (context & 0x8000)
+ pr_notice("Kernel process context\n");
+
+ /* Because we are crashing, and pointers could be bad, we check things
+ * pretty closely before we use them
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3) && current->pid) {
+ pr_notice("CURRENT PROCESS:\n");
+ if (current->comm >= (char *)FIXED_CODE_START)
+ pr_notice("COMM=%s PID=%d",
+ current->comm, current->pid);
+ else
+ pr_notice("COMM= invalid");
+
+ pr_cont(" CPU=%d\n", current_thread_info()->cpu);
+ if (!((unsigned long)current->mm & 0x3) &&
+ (unsigned long)current->mm >= FIXED_CODE_START) {
+ pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
+ (void *)current->mm->start_code,
+ (void *)current->mm->end_code,
+ (void *)current->mm->start_data,
+ (void *)current->mm->end_data);
+ pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
+ (void *)current->mm->end_data,
+ (void *)current->mm->brk,
+ (void *)current->mm->start_stack);
+ } else
+ pr_notice("invalid mm\n");
+ } else
+ pr_notice("No Valid process in current context\n");
+}
+
+void dump_bfin_mem(struct pt_regs *fp)
+{
+ unsigned short *addr, *erraddr, val = 0, err = 0;
+ char sti = 0, buf[6];
+
+ erraddr = (void *)fp->pc;
+
+ pr_notice("return address: [0x%p]; contents of:", erraddr);
+
+ for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
+ addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
+ addr++) {
+ if (!((unsigned long)addr & 0xF))
+ pr_notice("0x%p: ", addr);
+
+ if (!get_mem16(&val, addr)) {
+ val = 0;
+ sprintf(buf, "????");
+ } else
+ sprintf(buf, "%04x", val);
+
+ if (addr == erraddr) {
+ pr_cont("[%s]", buf);
+ err = val;
+ } else
+ pr_cont(" %s ", buf);
+
+ /* Do any previous instructions turn on interrupts? */
+ if (addr <= erraddr && /* in the past */
+ ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
+ val == 0x017b)) /* [SP++] = RETI */
+ sti = 1;
+ }
+
+ pr_cont("\n");
+
+ /* Hardware error interrupts can be deferred */
+ if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
+ oops_in_progress)){
+ pr_notice("Looks like this was a deferred error - sorry\n");
+#ifndef CONFIG_DEBUG_HWERR
+ pr_notice("The remaining message may be meaningless\n");
+ pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
+#else
+ /* If we are handling only one peripheral interrupt
+ * and current mm and pid are valid, and the last error
+ * was in that user space process's text area
+ * print it out - because that is where the problem exists
+ */
+ if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
+ (current->pid && current->mm)) {
+ /* And the last RETI points to the current userspace context */
+ if ((fp + 1)->pc >= current->mm->start_code &&
+ (fp + 1)->pc <= current->mm->end_code) {
+ pr_notice("It might be better to look around here :\n");
+ pr_notice("-------------------------------------------\n");
+ show_regs(fp + 1);
+ pr_notice("-------------------------------------------\n");
+ }
+ }
+#endif
+ }
+}
+
+void show_regs(struct pt_regs *fp)
+{
+ char buf[150];
+ struct irqaction *action;
+ unsigned int i;
+ unsigned long flags = 0;
+ unsigned int cpu = raw_smp_processor_id();
+ unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+
+ pr_notice("\n");
+ if (CPUID != bfin_cpuid())
+ pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
+ "but running on:0x%04x (Rev %d)\n",
+ CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
+
+ pr_notice("ADSP-%s-0.%d",
+ CPU, bfin_compiled_revid());
+
+ if (bfin_compiled_revid() != bfin_revid())
+ pr_cont("(Detected 0.%d)", bfin_revid());
+
+ pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
+ get_cclk()/1000000, get_sclk()/1000000,
+#ifdef CONFIG_MPU
+ "mpu on"
+#else
+ "mpu off"
+#endif
+ );
+
+ pr_notice("%s", linux_banner);
+
+ pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
+ pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
+ (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
+ if (fp->ipend & EVT_IRPTEN)
+ pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
+ EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
+ pr_notice(" Peripheral interrupts masked off\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
+ pr_notice(" Kernel interrupts masked off\n");
+ if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
+ pr_notice(" HWERRCAUSE: 0x%lx\n",
+ (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
+#ifdef EBIU_ERRMST
+ /* If the error was from the EBIU, print it out */
+ if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
+ pr_notice(" EBIU Error Reason : 0x%04x\n",
+ bfin_read_EBIU_ERRMST());
+ pr_notice(" EBIU Error Address : 0x%08x\n",
+ bfin_read_EBIU_ERRADD());
+ }
+#endif
+ }
+ pr_notice(" EXCAUSE : 0x%lx\n",
+ fp->seqstat & SEQSTAT_EXCAUSE);
+ for (i = 2; i <= 15 ; i++) {
+ if (fp->ipend & (1 << i)) {
+ if (i != 4) {
+ decode_address(buf, bfin_read32(EVT0 + 4*i));
+ pr_notice(" physical IVG%i asserted : %s\n", i, buf);
+ } else
+ pr_notice(" interrupts disabled\n");
+ }
+ }
+
+ /* if no interrupts are going off, don't print this out */
+ if (fp->ipend & ~0x3F) {
+ for (i = 0; i < (NR_IRQS - 1); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ if (!in_atomic)
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ action = desc->action;
+ if (!action)
+ goto unlock;
+
+ decode_address(buf, (unsigned int)action->handler);
+ pr_notice(" logical irq %3d mapped : %s", i, buf);
+ for (action = action->next; action; action = action->next) {
+ decode_address(buf, (unsigned int)action->handler);
+ pr_cont(", %s", buf);
+ }
+ pr_cont("\n");
+unlock:
+ if (!in_atomic)
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ }
+
+ decode_address(buf, fp->rete);
+ pr_notice(" RETE: %s\n", buf);
+ decode_address(buf, fp->retn);
+ pr_notice(" RETN: %s\n", buf);
+ decode_address(buf, fp->retx);
+ pr_notice(" RETX: %s\n", buf);
+ decode_address(buf, fp->rets);
+ pr_notice(" RETS: %s\n", buf);
+ decode_address(buf, fp->pc);
+ pr_notice(" PC : %s\n", buf);
+
+ if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
+ (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
+ decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
+ pr_notice("DCPLB_FAULT_ADDR: %s\n", buf);
+ decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
+ pr_notice("ICPLB_FAULT_ADDR: %s\n", buf);
+ }
+
+ pr_notice("PROCESSOR STATE:\n");
+ pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
+ fp->r0, fp->r1, fp->r2, fp->r3);
+ pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
+ fp->r4, fp->r5, fp->r6, fp->r7);
+ pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
+ fp->p0, fp->p1, fp->p2, fp->p3);
+ pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
+ fp->p4, fp->p5, fp->fp, (long)fp);
+ pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
+ fp->lb0, fp->lt0, fp->lc0);
+ pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
+ fp->lb1, fp->lt1, fp->lc1);
+ pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
+ fp->b0, fp->l0, fp->m0, fp->i0);
+ pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
+ fp->b1, fp->l1, fp->m1, fp->i1);
+ pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
+ fp->b2, fp->l2, fp->m2, fp->i2);
+ pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
+ fp->b3, fp->l3, fp->m3, fp->i3);
+ pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
+ fp->a0w, fp->a0x, fp->a1w, fp->a1x);
+
+ pr_notice("USP : %08lx ASTAT: %08lx\n",
+ rdusp(), fp->astat);
+
+ pr_notice("\n");
+}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
new file mode 100644
index 00000000..de5c2c3e
--- /dev/null
+++ b/arch/blackfin/kernel/traps.c
@@ -0,0 +1,582 @@
+/*
+ * Main exception handling logic.
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/bug.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <asm/traps.h>
+#include <asm/cplb.h>
+#include <asm/blackfin.h>
+#include <asm/irq_handler.h>
+#include <linux/irq.h>
+#include <asm/trace.h>
+#include <asm/fixed_code.h>
+#include <asm/pseudo_instructions.h>
+#include <asm/pda.h>
+
+#ifdef CONFIG_KGDB
+# include <linux/kgdb.h>
+
+# define CHK_DEBUGGER_TRAP() \
+ do { \
+ kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
+ } while (0)
+# define CHK_DEBUGGER_TRAP_MAYBE() \
+ do { \
+ if (kgdb_connected) \
+ CHK_DEBUGGER_TRAP(); \
+ } while (0)
+#else
+# define CHK_DEBUGGER_TRAP() do { } while (0)
+# define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
+#endif
+
+
+#ifdef CONFIG_DEBUG_VERBOSE
+#define verbose_printk(fmt, arg...) \
+ printk(fmt, ##arg)
+#else
+#define verbose_printk(fmt, arg...) \
+ ({ if (0) printk(fmt, ##arg); 0; })
+#endif
+
+#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
+u32 last_seqstat;
+#ifdef CONFIG_DEBUG_MMRS_MODULE
+EXPORT_SYMBOL(last_seqstat);
+#endif
+#endif
+
+/* Initiate the event table handler */
+void __init trap_init(void)
+{
+ CSYNC();
+ bfin_write_EVT3(trap);
+ CSYNC();
+}
+
+static int kernel_mode_regs(struct pt_regs *regs)
+{
+ return regs->ipend & 0xffc0;
+}
+
+asmlinkage notrace void trap_c(struct pt_regs *fp)
+{
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int j;
+#endif
+#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
+ int opcode;
+#endif
+ unsigned int cpu = raw_smp_processor_id();
+ const char *strerror = NULL;
+ int sig = 0;
+ siginfo_t info;
+ unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
+
+ trace_buffer_save(j);
+#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
+ last_seqstat = (u32)fp->seqstat;
+#endif
+
+ /* Important - be very careful dereferncing pointers - will lead to
+ * double faults if the stack has become corrupt
+ */
+
+ /* trap_c() will be called for exceptions. During exceptions
+ * processing, the pc value should be set with retx value.
+ * With this change we can cleanup some code in signal.c- TODO
+ */
+ fp->orig_pc = fp->retx;
+ /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
+ trapnr, fp->ipend, fp->pc, fp->retx); */
+
+ /* send the appropriate signal to the user program */
+ switch (trapnr) {
+
+ /* This table works in conjunction with the one in ./mach-common/entry.S
+ * Some exceptions are handled there (in assembly, in exception space)
+ * Some are handled here, (in C, in interrupt space)
+ * Some, like CPLB, are handled in both, where the normal path is
+ * handled in assembly/exception space, and the error path is handled
+ * here
+ */
+
+ /* 0x00 - Linux Syscall, getting here is an error */
+ /* 0x01 - userspace gdb breakpoint, handled here */
+ case VEC_EXCPT01:
+ info.si_code = TRAP_ILLTRAP;
+ sig = SIGTRAP;
+ CHK_DEBUGGER_TRAP_MAYBE();
+ /* Check if this is a breakpoint in kernel space */
+ if (kernel_mode_regs(fp))
+ goto traps_done;
+ else
+ break;
+ /* 0x03 - User Defined, userspace stack overflow */
+ case VEC_EXCPT03:
+ info.si_code = SEGV_STACKFLOW;
+ sig = SIGSEGV;
+ strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x02 - KGDB initial connection and break signal trap */
+ case VEC_EXCPT02:
+#ifdef CONFIG_KGDB
+ info.si_code = TRAP_ILLTRAP;
+ sig = SIGTRAP;
+ CHK_DEBUGGER_TRAP();
+ goto traps_done;
+#endif
+ /* 0x04 - User Defined */
+ /* 0x05 - User Defined */
+ /* 0x06 - User Defined */
+ /* 0x07 - User Defined */
+ /* 0x08 - User Defined */
+ /* 0x09 - User Defined */
+ /* 0x0A - User Defined */
+ /* 0x0B - User Defined */
+ /* 0x0C - User Defined */
+ /* 0x0D - User Defined */
+ /* 0x0E - User Defined */
+ /* 0x0F - User Defined */
+ /* If we got here, it is most likely that someone was trying to use a
+ * custom exception handler, and it is not actually installed properly
+ */
+ case VEC_EXCPT04 ... VEC_EXCPT15:
+ info.si_code = ILL_ILLPARAOP;
+ sig = SIGILL;
+ strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x10 HW Single step, handled here */
+ case VEC_STEP:
+ info.si_code = TRAP_STEP;
+ sig = SIGTRAP;
+ CHK_DEBUGGER_TRAP_MAYBE();
+ /* Check if this is a single step in kernel space */
+ if (kernel_mode_regs(fp))
+ goto traps_done;
+ else
+ break;
+ /* 0x11 - Trace Buffer Full, handled here */
+ case VEC_OVFLOW:
+ info.si_code = TRAP_TRACEFLOW;
+ sig = SIGTRAP;
+ strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x12 - Reserved, Caught by default */
+ /* 0x13 - Reserved, Caught by default */
+ /* 0x14 - Reserved, Caught by default */
+ /* 0x15 - Reserved, Caught by default */
+ /* 0x16 - Reserved, Caught by default */
+ /* 0x17 - Reserved, Caught by default */
+ /* 0x18 - Reserved, Caught by default */
+ /* 0x19 - Reserved, Caught by default */
+ /* 0x1A - Reserved, Caught by default */
+ /* 0x1B - Reserved, Caught by default */
+ /* 0x1C - Reserved, Caught by default */
+ /* 0x1D - Reserved, Caught by default */
+ /* 0x1E - Reserved, Caught by default */
+ /* 0x1F - Reserved, Caught by default */
+ /* 0x20 - Reserved, Caught by default */
+ /* 0x21 - Undefined Instruction, handled here */
+ case VEC_UNDEF_I:
+#ifdef CONFIG_BUG
+ if (kernel_mode_regs(fp)) {
+ switch (report_bug(fp->pc, fp)) {
+ case BUG_TRAP_TYPE_NONE:
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ dump_bfin_trace_buffer();
+ fp->pc += 2;
+ goto traps_done;
+ case BUG_TRAP_TYPE_BUG:
+ /* call to panic() will dump trace, and it is
+ * off at this point, so it won't be clobbered
+ */
+ panic("BUG()");
+ }
+ }
+#endif
+#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
+ /*
+ * Support for the fake instructions, if the instruction fails,
+ * then just execute a illegal opcode failure (like normal).
+ * Don't support these instructions inside the kernel
+ */
+ if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) {
+ if (execute_pseudodbg_assert(fp, opcode))
+ goto traps_done;
+ if (execute_pseudodbg(fp, opcode))
+ goto traps_done;
+ }
+#endif
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x22 - Illegal Instruction Combination, handled here */
+ case VEC_ILGAL_I:
+ info.si_code = ILL_ILLPARAOP;
+ sig = SIGILL;
+ strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x23 - Data CPLB protection violation, handled here */
+ case VEC_CPLB_VL:
+ info.si_code = ILL_CPLB_VI;
+ sig = SIGSEGV;
+ strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x24 - Data access misaligned, handled here */
+ case VEC_MISALI_D:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x25 - Unrecoverable Event, handled here */
+ case VEC_UNCOV:
+ info.si_code = ILL_ILLEXCPT;
+ sig = SIGILL;
+ strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
+ error case is handled here */
+ case VEC_CPLB_M:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
+ break;
+ /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
+ case VEC_CPLB_MHIT:
+ info.si_code = ILL_CPLB_MULHIT;
+ sig = SIGSEGV;
+#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
+ if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
+ strerror = KERN_NOTICE "NULL pointer access\n";
+ else
+#endif
+ strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x28 - Emulation Watchpoint, handled here */
+ case VEC_WATCH:
+ info.si_code = TRAP_WATCHPT;
+ sig = SIGTRAP;
+ pr_debug(EXC_0x28(KERN_DEBUG));
+ CHK_DEBUGGER_TRAP_MAYBE();
+ /* Check if this is a watchpoint in kernel space */
+ if (kernel_mode_regs(fp))
+ goto traps_done;
+ else
+ break;
+#ifdef CONFIG_BF535
+ /* 0x29 - Instruction fetch access error (535 only) */
+ case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
+ info.si_code = BUS_OPFETCH;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+#else
+ /* 0x29 - Reserved, Caught by default */
+#endif
+ /* 0x2A - Instruction fetch misaligned, handled here */
+ case VEC_MISALI_I:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x2B - Instruction CPLB protection violation, handled here */
+ case VEC_CPLB_I_VL:
+ info.si_code = ILL_CPLB_VI;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
+ case VEC_CPLB_I_M:
+ info.si_code = ILL_CPLB_MISS;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
+ break;
+ /* 0x2D - Instruction CPLB Multiple Hits, handled here */
+ case VEC_CPLB_I_MHIT:
+ info.si_code = ILL_CPLB_MULHIT;
+ sig = SIGSEGV;
+#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
+ if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
+ strerror = KERN_NOTICE "Jump to NULL address\n";
+ else
+#endif
+ strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x2E - Illegal use of Supervisor Resource, handled here */
+ case VEC_ILL_RES:
+ info.si_code = ILL_PRVOPC;
+ sig = SIGILL;
+ strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /* 0x2F - Reserved, Caught by default */
+ /* 0x30 - Reserved, Caught by default */
+ /* 0x31 - Reserved, Caught by default */
+ /* 0x32 - Reserved, Caught by default */
+ /* 0x33 - Reserved, Caught by default */
+ /* 0x34 - Reserved, Caught by default */
+ /* 0x35 - Reserved, Caught by default */
+ /* 0x36 - Reserved, Caught by default */
+ /* 0x37 - Reserved, Caught by default */
+ /* 0x38 - Reserved, Caught by default */
+ /* 0x39 - Reserved, Caught by default */
+ /* 0x3A - Reserved, Caught by default */
+ /* 0x3B - Reserved, Caught by default */
+ /* 0x3C - Reserved, Caught by default */
+ /* 0x3D - Reserved, Caught by default */
+ /* 0x3E - Reserved, Caught by default */
+ /* 0x3F - Reserved, Caught by default */
+ case VEC_HWERR:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ switch (fp->seqstat & SEQSTAT_HWERRCAUSE) {
+ /* System MMR Error */
+ case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
+ break;
+ /* External Memory Addressing Error */
+ case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
+ if (ANOMALY_05000310) {
+ static unsigned long anomaly_rets;
+
+ if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
+ (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
+ /*
+ * A false hardware error will happen while fetching at
+ * the L1 instruction SRAM boundary. Ignore it.
+ */
+ anomaly_rets = fp->rets;
+ goto traps_done;
+ } else if (fp->rets == anomaly_rets) {
+ /*
+ * While boundary code returns to a function, at the ret
+ * point, a new false hardware error might occur too based
+ * on tests. Ignore it too.
+ */
+ goto traps_done;
+ } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
+ (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
+ /*
+ * If boundary code calls a function, at the entry point,
+ * a new false hardware error maybe happen based on tests.
+ * Ignore it too.
+ */
+ goto traps_done;
+ } else
+ anomaly_rets = 0;
+ }
+
+ info.si_code = BUS_ADRERR;
+ sig = SIGBUS;
+ strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
+ break;
+ /* Performance Monitor Overflow */
+ case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
+ strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
+ break;
+ /* RAISE 5 instruction */
+ case (SEQSTAT_HWERRCAUSE_RAISE_5):
+ printk(KERN_NOTICE HWC_x18(KERN_NOTICE));
+ break;
+ default: /* Reserved */
+ printk(KERN_NOTICE HWC_default(KERN_NOTICE));
+ break;
+ }
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ /*
+ * We should be handling all known exception types above,
+ * if we get here we hit a reserved one, so panic
+ */
+ default:
+ info.si_code = ILL_ILLPARAOP;
+ sig = SIGILL;
+ verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
+ (fp->seqstat & SEQSTAT_EXCAUSE));
+ CHK_DEBUGGER_TRAP_MAYBE();
+ break;
+ }
+
+ BUG_ON(sig == 0);
+
+ /* If the fault was caused by a kernel thread, or interrupt handler
+ * we will kernel panic, so the system reboots.
+ */
+ if (kernel_mode_regs(fp) || (current && !current->mm)) {
+ console_verbose();
+ oops_in_progress = 1;
+ }
+
+ if (sig != SIGTRAP) {
+ if (strerror)
+ verbose_printk(strerror);
+
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+
+ /* Print out the trace buffer if it makes sense */
+#ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
+ if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M)
+ verbose_printk(KERN_NOTICE "No trace since you do not have "
+ "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
+ else
+#endif
+ dump_bfin_trace_buffer();
+
+ if (oops_in_progress) {
+ /* Dump the current kernel stack */
+ verbose_printk(KERN_NOTICE "Kernel Stack\n");
+ show_stack(current, NULL);
+ print_modules();
+#ifndef CONFIG_ACCESS_CHECK
+ verbose_printk(KERN_EMERG "Please turn on "
+ "CONFIG_ACCESS_CHECK\n");
+#endif
+ panic("Kernel exception");
+ } else {
+#ifdef CONFIG_DEBUG_VERBOSE
+ unsigned long *stack;
+ /* Dump the user space stack */
+ stack = (unsigned long *)rdusp();
+ verbose_printk(KERN_NOTICE "Userspace Stack\n");
+ show_stack(NULL, stack);
+#endif
+ }
+ }
+
+#ifdef CONFIG_IPIPE
+ if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
+#endif
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ switch (trapnr) {
+ case VEC_CPLB_VL:
+ case VEC_MISALI_D:
+ case VEC_CPLB_M:
+ case VEC_CPLB_MHIT:
+ info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr;
+ break;
+ default:
+ info.si_addr = (void __user *)fp->pc;
+ break;
+ }
+ force_sig_info(sig, &info, current);
+ }
+
+ if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) ||
+ (ANOMALY_05000281 && trapnr == VEC_HWERR) ||
+ (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL)))
+ fp->pc = SAFE_USER_INSTRUCTION;
+
+ traps_done:
+ trace_buffer_restore(j);
+}
+
+asmlinkage void double_fault_c(struct pt_regs *fp)
+{
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int j;
+ trace_buffer_save(j);
+#endif
+
+ console_verbose();
+ oops_in_progress = 1;
+#ifdef CONFIG_DEBUG_VERBOSE
+ printk(KERN_EMERG "Double Fault\n");
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
+ if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
+ unsigned int cpu = raw_smp_processor_id();
+ char buf[150];
+ decode_address(buf, cpu_pda[cpu].retx_doublefault);
+ printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
+ (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
+ decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
+ decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
+
+ decode_address(buf, fp->retx);
+ printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
+ } else
+#endif
+ {
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ dump_bfin_trace_buffer();
+ }
+#endif
+ panic("Double Fault - unrecoverable event");
+
+}
+
+
+void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
+{
+ switch (cplb_panic) {
+ case CPLB_NO_UNLOCKED:
+ printk(KERN_EMERG "All CPLBs are locked\n");
+ break;
+ case CPLB_PROT_VIOL:
+ return;
+ case CPLB_NO_ADDR_MATCH:
+ return;
+ case CPLB_UNKNOWN_ERR:
+ printk(KERN_EMERG "Unknown CPLB Exception\n");
+ break;
+ }
+
+ oops_in_progress = 1;
+
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ dump_stack();
+ panic("Unrecoverable event");
+}
+
+#ifdef CONFIG_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, (unsigned short *)addr))
+ return 0;
+
+ return opcode == BFIN_BUG_OPCODE;
+}
+#endif
+
+/* stub this out */
+#ifndef CONFIG_DEBUG_VERBOSE
+void show_regs(struct pt_regs *fp)
+{
+
+}
+#endif
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
new file mode 100644
index 00000000..ba35864b
--- /dev/null
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/mem_map.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-bfin")
+ENTRY(__start)
+_jiffies = _jiffies_64;
+
+SECTIONS
+{
+#ifdef CONFIG_RAMKERNEL
+ . = CONFIG_BOOT_LOAD;
+#else
+ . = CONFIG_ROM_BASE;
+#endif
+
+ /* Neither the text, ro_data or bss section need to be aligned
+ * So pack them back to back
+ */
+ .text :
+ {
+ __text = .;
+ _text = .;
+ __stext = .;
+ TEXT_TEXT
+#ifndef CONFIG_SCHEDULE_L1
+ SCHED_TEXT
+#endif
+ LOCK_TEXT
+ IRQENTRY_TEXT
+ KPROBES_TEXT
+#ifdef CONFIG_ROMKERNEL
+ __sinittext = .;
+ INIT_TEXT
+ __einittext = .;
+ EXIT_TEXT
+#endif
+ *(.text.*)
+ *(.fixup)
+
+#if !L1_CODE_LENGTH
+ *(.l1.text)
+#endif
+ __etext = .;
+ }
+
+ EXCEPTION_TABLE(4)
+ NOTES
+
+ /* Just in case the first read only is a 32-bit access */
+ RO_DATA(4)
+ __rodata_end = .;
+
+#ifdef CONFIG_ROMKERNEL
+ . = CONFIG_BOOT_LOAD;
+ .bss : AT(__rodata_end)
+#else
+ .bss :
+#endif
+ {
+ . = ALIGN(4);
+ ___bss_start = .;
+ *(.bss .bss.*)
+ *(COMMON)
+#if !L1_DATA_A_LENGTH
+ *(.l1.bss)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.bss.B)
+#endif
+ . = ALIGN(4);
+ ___bss_stop = .;
+ }
+
+#if defined(CONFIG_ROMKERNEL)
+ .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
+#else
+ .data :
+#endif
+ {
+ __sdata = .;
+ /* This gets done first, so the glob doesn't suck it in */
+ CACHELINE_ALIGNED_DATA(32)
+
+#if !L1_DATA_A_LENGTH
+ . = ALIGN(32);
+ *(.data_l1.cacheline_aligned)
+ *(.l1.data)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.data.B)
+#endif
+#if !L2_LENGTH
+ . = ALIGN(32);
+ *(.data_l2.cacheline_aligned)
+ *(.l2.data)
+#endif
+
+ DATA_DATA
+ CONSTRUCTORS
+
+ INIT_TASK_DATA(THREAD_SIZE)
+
+ __edata = .;
+ }
+ __data_lma = LOADADDR(.data);
+ __data_len = SIZEOF(.data);
+
+ /* The init section should be last, so when we free it, it goes into
+ * the general memory pool, and (hopefully) will decrease fragmentation
+ * a tiny bit. The init section has a _requirement_ that it be
+ * PAGE_SIZE aligned
+ */
+ . = ALIGN(PAGE_SIZE);
+ ___init_begin = .;
+
+#ifdef CONFIG_RAMKERNEL
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ /* We have to discard exit text and such at runtime, not link time, to
+ * handle embedded cross-section references (alt instructions, bug
+ * table, eh_frame, etc...). We need all of our .text up front and
+ * .data after it for PCREL call issues.
+ */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+
+ . = ALIGN(16);
+ INIT_DATA_SECTION(16)
+ PERCPU_SECTION(32)
+
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+
+ .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
+#else
+ .init.data : AT(__data_lma + __data_len)
+ {
+ __sinitdata = .;
+ INIT_DATA
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+
+ . = ALIGN(PAGE_SIZE);
+ ___per_cpu_load = .;
+ PERCPU_INPUT(32)
+
+ EXIT_DATA
+ __einitdata = .;
+ }
+ __init_data_lma = LOADADDR(.init.data);
+ __init_data_len = SIZEOF(.init.data);
+ __init_data_end = .;
+
+ .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
+#endif
+ {
+ . = ALIGN(4);
+ __stext_l1 = .;
+ *(.l1.text.head)
+ *(.l1.text)
+#ifdef CONFIG_SCHEDULE_L1
+ SCHED_TEXT
+#endif
+ . = ALIGN(4);
+ __etext_l1 = .;
+ }
+ __text_l1_lma = LOADADDR(.text_l1);
+ __text_l1_len = SIZEOF(.text_l1);
+ ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
+
+ .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
+ {
+ . = ALIGN(4);
+ __sdata_l1 = .;
+ *(.l1.data)
+ __edata_l1 = .;
+
+ . = ALIGN(32);
+ *(.data_l1.cacheline_aligned)
+
+ . = ALIGN(4);
+ __sbss_l1 = .;
+ *(.l1.bss)
+ . = ALIGN(4);
+ __ebss_l1 = .;
+ }
+ __data_l1_lma = LOADADDR(.data_l1);
+ __data_l1_len = SIZEOF(.data_l1);
+ ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
+
+ .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
+ {
+ . = ALIGN(4);
+ __sdata_b_l1 = .;
+ *(.l1.data.B)
+ __edata_b_l1 = .;
+
+ . = ALIGN(4);
+ __sbss_b_l1 = .;
+ *(.l1.bss.B)
+ . = ALIGN(4);
+ __ebss_b_l1 = .;
+ }
+ __data_b_l1_lma = LOADADDR(.data_b_l1);
+ __data_b_l1_len = SIZEOF(.data_b_l1);
+ ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
+
+ .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
+ {
+ . = ALIGN(4);
+ __stext_l2 = .;
+ *(.l2.text)
+ . = ALIGN(4);
+ __etext_l2 = .;
+
+ . = ALIGN(4);
+ __sdata_l2 = .;
+ *(.l2.data)
+ __edata_l2 = .;
+
+ . = ALIGN(32);
+ *(.data_l2.cacheline_aligned)
+
+ . = ALIGN(4);
+ __sbss_l2 = .;
+ *(.l2.bss)
+ . = ALIGN(4);
+ __ebss_l2 = .;
+ }
+ __l2_lma = LOADADDR(.text_data_l2);
+ __l2_len = SIZEOF(.text_data_l2);
+ ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
+
+ /* Force trailing alignment of our init section so that when we
+ * free our init memory, we don't leave behind a partial page.
+ */
+#ifdef CONFIG_RAMKERNEL
+ . = __l2_lma + __l2_len;
+#else
+ . = __init_data_end;
+#endif
+ . = ALIGN(PAGE_SIZE);
+ ___init_end = .;
+
+ __end =.;
+
+ STABS_DEBUG
+
+ DWARF_DEBUG
+
+ DISCARDS
+}