summaryrefslogtreecommitdiff
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile51
-rw-r--r--arch/sh/kernel/asm-offsets.c61
-rw-r--r--arch/sh/kernel/cpu/Makefile21
-rw-r--r--arch/sh/kernel/cpu/adc.c36
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c74
-rw-r--r--arch/sh/kernel/cpu/clock.c51
-rw-r--r--arch/sh/kernel/cpu/fpu.c86
-rw-r--r--arch/sh/kernel/cpu/init.c365
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile6
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c84
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c197
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c83
-rw-r--r--arch/sh/kernel/cpu/proc.c148
-rw-r--r--arch/sh/kernel/cpu/sh2/Makefile7
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c77
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S321
-rw-r--r--arch/sh/kernel/cpu/sh2/ex.S47
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c33
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c232
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile20
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c85
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c81
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c83
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S250
-rw-r--r--arch/sh/kernel/cpu/sh2a/ex.S73
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c575
-rw-r--r--arch/sh/kernel/cpu/sh2a/opcode_helper.c54
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c1597
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c54
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c248
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c470
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c434
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c395
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile33
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh3.c89
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7705.c84
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7706.c84
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c85
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7710.c78
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S512
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S59
-rw-r--r--arch/sh/kernel/cpu/sh3/pinmux-sh7720.c1242
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c111
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh770x.c33
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7710.c20
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7720.c37
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh3.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c253
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c311
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c252
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c470
-rw-r--r--arch/sh/kernel/cpu/sh3/swsusp.S147
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile36
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c177
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4.c80
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c429
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c268
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c260
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c200
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c459
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c336
-rw-r--r--arch/sh/kernel/cpu/sh4/softfloat.c930
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c416
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile49
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c289
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c282
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c267
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c317
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c381
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c155
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c119
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c73
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c125
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c181
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c200
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c155
-rw-r--r--arch/sh/kernel/cpu/sh4a/intc-shx3.c34
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c302
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c1784
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c1909
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c2230
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c2287
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c1310
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c843
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-shx3.c587
-rw-r--r--arch/sh/kernel/cpu/sh4a/serial-sh7722.c23
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c509
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c463
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c734
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c773
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c1418
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c1255
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c572
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c736
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c626
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c715
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c1056
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c508
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c166
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile15
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c79
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2068
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c114
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c75
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c184
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S198
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c326
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile7
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c120
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c157
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S405
-rw-r--r--arch/sh/kernel/cpufreq.c201
-rw-r--r--arch/sh/kernel/crash_dump.c45
-rw-r--r--arch/sh/kernel/debugtraps.S41
-rw-r--r--arch/sh/kernel/disassemble.c573
-rw-r--r--arch/sh/kernel/dma-nommu.c82
-rw-r--r--arch/sh/kernel/dumpstack.c108
-rw-r--r--arch/sh/kernel/dwarf.c1220
-rw-r--r--arch/sh/kernel/entry-common.S375
-rw-r--r--arch/sh/kernel/ftrace.c401
-rw-r--r--arch/sh/kernel/head_32.S355
-rw-r--r--arch/sh/kernel/head_64.S357
-rw-r--r--arch/sh/kernel/hw_breakpoint.c421
-rw-r--r--arch/sh/kernel/idle.c162
-rw-r--r--arch/sh/kernel/init_task.c30
-rw-r--r--arch/sh/kernel/io.c114
-rw-r--r--arch/sh/kernel/io_trapped.c296
-rw-r--r--arch/sh/kernel/iomap.c165
-rw-r--r--arch/sh/kernel/ioport.c43
-rw-r--r--arch/sh/kernel/irq.c284
-rw-r--r--arch/sh/kernel/irq_32.c57
-rw-r--r--arch/sh/kernel/irq_64.c51
-rw-r--r--arch/sh/kernel/kdebugfs.c16
-rw-r--r--arch/sh/kernel/kgdb.c330
-rw-r--r--arch/sh/kernel/kprobes.c585
-rw-r--r--arch/sh/kernel/localtimer.c66
-rw-r--r--arch/sh/kernel/machine_kexec.c207
-rw-r--r--arch/sh/kernel/machvec.c127
-rw-r--r--arch/sh/kernel/module.c126
-rw-r--r--arch/sh/kernel/nmi_debug.c77
-rw-r--r--arch/sh/kernel/perf_callchain.c41
-rw-r--r--arch/sh/kernel/perf_event.c400
-rw-r--r--arch/sh/kernel/process.c103
-rw-r--r--arch/sh/kernel/process_32.c338
-rw-r--r--arch/sh/kernel/process_64.c549
-rw-r--r--arch/sh/kernel/ptrace.c33
-rw-r--r--arch/sh/kernel/ptrace_32.c539
-rw-r--r--arch/sh/kernel/ptrace_64.c593
-rw-r--r--arch/sh/kernel/reboot.c102
-rw-r--r--arch/sh/kernel/relocate_kernel.S232
-rw-r--r--arch/sh/kernel/return_address.c59
-rw-r--r--arch/sh/kernel/setup.c324
-rw-r--r--arch/sh/kernel/sh_bios.c172
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c93
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c56
-rw-r--r--arch/sh/kernel/signal_32.c639
-rw-r--r--arch/sh/kernel/signal_64.c743
-rw-r--r--arch/sh/kernel/smp.c486
-rw-r--r--arch/sh/kernel/stacktrace.c92
-rw-r--r--arch/sh/kernel/swsusp.c38
-rw-r--r--arch/sh/kernel/sys_sh.c95
-rw-r--r--arch/sh/kernel/sys_sh32.c86
-rw-r--r--arch/sh/kernel/sys_sh64.c50
-rw-r--r--arch/sh/kernel/syscalls_32.S386
-rw-r--r--arch/sh/kernel/syscalls_64.S406
-rw-r--r--arch/sh/kernel/time.c115
-rw-r--r--arch/sh/kernel/topology.c82
-rw-r--r--arch/sh/kernel/traps.c116
-rw-r--r--arch/sh/kernel/traps_32.c925
-rw-r--r--arch/sh/kernel/traps_64.c959
-rw-r--r--arch/sh/kernel/unwinder.c164
-rw-r--r--arch/sh/kernel/vmlinux.lds.S88
-rw-r--r--arch/sh/kernel/vsyscall/Makefile36
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-note.S25
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-sigreturn.S74
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-syscall.S10
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S39
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c109
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.lds.S84
181 files changed, 57742 insertions, 0 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
new file mode 100644
index 00000000..77f7ae1d
--- /dev/null
+++ b/arch/sh/kernel/Makefile
@@ -0,0 +1,51 @@
+#
+# Makefile for the Linux/SuperH kernel.
+#
+
+extra-y := head_$(BITS).o init_task.o vmlinux.lds
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
+CFLAGS_REMOVE_return_address.o = -pg
+
+obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+ idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
+ machvec.o nmi_debug.o process.o \
+ process_$(BITS).o ptrace.o ptrace_$(BITS).o \
+ reboot.o return_address.o \
+ setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
+ syscalls_$(BITS).o time.o topology.o traps.o \
+ traps_$(BITS).o unwinder.o
+
+ifndef CONFIG_GENERIC_IOMAP
+obj-y += iomap.o
+obj-$(CONFIG_HAS_IOPORT) += ioport.o
+endif
+
+obj-y += cpu/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_DUMP_CODE) += disassemble.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
+
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
+
+ccflags-y := -Werror
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
new file mode 100644
index 00000000..08a2be77
--- /dev/null
+++ b/arch/sh/kernel/asm-offsets.c
@@ -0,0 +1,61 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+
+#include <asm/thread_info.h>
+#include <asm/suspend.h>
+
+int main(void)
+{
+ /* offsets into the thread_info struct */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+ DEFINE(TI_SIZE, sizeof(struct thread_info));
+
+#ifdef CONFIG_HIBERNATION
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
+#endif
+
+ DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
+ DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
+ DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
+ DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
+ DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
+ DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
+ DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
+ DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
+ DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
+ DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
+ DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
+ DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
+ DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
+ DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
+ DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
+ DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
+ DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
+ DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
+ DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
+ DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
+ DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
+ DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
+ return 0;
+}
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
new file mode 100644
index 00000000..fa58bfd3
--- /dev/null
+++ b/arch/sh/kernel/cpu/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for the Linux/SuperH CPU-specifc backends.
+#
+
+obj-$(CONFIG_CPU_SH2) = sh2/
+obj-$(CONFIG_CPU_SH2A) = sh2a/
+obj-$(CONFIG_CPU_SH3) = sh3/
+obj-$(CONFIG_CPU_SH4) = sh4/
+obj-$(CONFIG_CPU_SH5) = sh5/
+
+# Special cases for family ancestry.
+
+obj-$(CONFIG_CPU_SH4A) += sh4a/
+obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
+
+# Common interfaces.
+
+obj-$(CONFIG_SH_ADC) += adc.o
+obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
+
+obj-y += irq/ init.o clock.o fpu.o proc.o
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
new file mode 100644
index 00000000..d307571d
--- /dev/null
+++ b/arch/sh/kernel/cpu/adc.c
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
+ *
+ * Copyright (C) 2004 Andriy Skulysh <askulysh@image.kiev.ua>
+ */
+
+#include <linux/module.h>
+#include <asm/adc.h>
+#include <asm/io.h>
+
+
+int adc_single(unsigned int channel)
+{
+ int off;
+ unsigned char csr;
+
+ if (channel >= 8) return -1;
+
+ off = (channel & 0x03) << 2;
+
+ csr = __raw_readb(ADCSR);
+ csr = channel | ADCSR_ADST | ADCSR_CKS;
+ __raw_writeb(csr, ADCSR);
+
+ do {
+ csr = __raw_readb(ADCSR);
+ } while ((csr & ADCSR_ADF) == 0);
+
+ csr &= ~(ADCSR_ADF | ADCSR_ADST);
+ __raw_writeb(csr, ADCSR);
+
+ return (((__raw_readb(ADDRAH + off) << 8) |
+ __raw_readb(ADDRAL + off)) >> 6);
+}
+
+EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
new file mode 100644
index 00000000..f59b1f30
--- /dev/null
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -0,0 +1,74 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+static struct clk master_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .rate = CONFIG_SH_PCLK_FREQ,
+};
+
+static struct clk peripheral_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk bus_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk cpu_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+/*
+ * The ordering of these clocks matters, do not change it.
+ */
+static struct clk *onchip_clocks[] = {
+ &master_clk,
+ &peripheral_clk,
+ &bus_clk,
+ &cpu_clk,
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("master_clk", &master_clk),
+ CLKDEV_CON_ID("peripheral_clk", &peripheral_clk),
+ CLKDEV_CON_ID("bus_clk", &bus_clk),
+ CLKDEV_CON_ID("cpu_clk", &cpu_clk),
+};
+
+int __init __deprecated cpg_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
+ struct clk *clk = onchip_clocks[i];
+ arch_init_clk_ops(&clk->ops, i);
+ if (clk->ops)
+ ret |= clk_register(clk);
+ }
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ clk_add_alias("tmu_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("mtu2_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("cmt_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("sci_ick", NULL, "peripheral_clk", NULL);
+
+ return ret;
+}
+
+/*
+ * Placeholder for compatibility, until the lazy CPUs do this
+ * on their own.
+ */
+int __init __weak arch_clk_init(void)
+{
+ return cpg_clk_init();
+}
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
new file mode 100644
index 00000000..4187cf4f
--- /dev/null
+++ b/arch/sh/kernel/cpu/clock.c
@@ -0,0 +1,51 @@
+/*
+ * arch/sh/kernel/cpu/clock.c - SuperH clock framework
+ *
+ * Copyright (C) 2005 - 2009 Paul Mundt
+ *
+ * This clock framework is derived from the OMAP version by:
+ *
+ * Copyright (C) 2004 - 2008 Nokia Corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <asm/clock.h>
+#include <asm/machvec.h>
+
+int __init clk_init(void)
+{
+ int ret;
+
+ ret = arch_clk_init();
+ if (unlikely(ret)) {
+ pr_err("%s: CPU clock registration failed.\n", __func__);
+ return ret;
+ }
+
+ if (sh_mv.mv_clk_init) {
+ ret = sh_mv.mv_clk_init();
+ if (unlikely(ret)) {
+ pr_err("%s: machvec clock initialization failed.\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /* Kick the child clocks.. */
+ recalculate_root_clocks();
+
+ /* Enable the necessary init clocks */
+ clk_enable_init_clocks();
+
+ return ret;
+}
+
+
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 00000000..f8f7af51
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,86 @@
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+int init_fpu(struct task_struct *tsk)
+{
+ if (tsk_used_math(tsk)) {
+ if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+ return 0;
+ }
+
+ /*
+ * Memory allocation at the first usage of the FPU and other state.
+ */
+ if (!tsk->thread.xstate) {
+ tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!tsk->thread.xstate)
+ return -ENOMEM;
+ }
+
+ if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ } else {
+ struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ }
+
+ set_stopped_child_used_math(tsk);
+ return 0;
+}
+
+#ifdef CONFIG_SH_FPU
+void __fpu_state_restore(void)
+{
+ struct task_struct *tsk = current;
+
+ restore_fpu(tsk);
+
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ tsk->fpu_counter++;
+}
+
+void fpu_state_restore(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ if (unlikely(!user_mode(regs))) {
+ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
+ return;
+ }
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+ /*
+ * does a slab alloc which can sleep
+ */
+ if (init_fpu(tsk)) {
+ /*
+ * ran out of memory!
+ */
+ do_group_exit(SIGKILL);
+ return;
+ }
+ local_irq_disable();
+ }
+
+ grab_fpu(regs);
+
+ __fpu_state_restore();
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
+}
+#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
new file mode 100644
index 00000000..61a07daf
--- /dev/null
+++ b/arch/sh/kernel/cpu/init.c
@@ -0,0 +1,365 @@
+/*
+ * arch/sh/kernel/cpu/init.c
+ *
+ * CPU init code
+ *
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ * Copyright (C) 2003 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/log2.h>
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cache.h>
+#include <asm/elf.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/sh_bios.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_SH_FPU
+#define cpu_has_fpu 1
+#else
+#define cpu_has_fpu 0
+#endif
+
+#ifdef CONFIG_SH_DSP
+#define cpu_has_dsp 1
+#else
+#define cpu_has_dsp 0
+#endif
+
+/*
+ * Generic wrapper for command line arguments to disable on-chip
+ * peripherals (nofpu, nodsp, and so forth).
+ */
+#define onchip_setup(x) \
+static int x##_disabled __cpuinitdata = !cpu_has_##x; \
+ \
+static int __cpuinit x##_setup(char *opts) \
+{ \
+ x##_disabled = 1; \
+ return 1; \
+} \
+__setup("no" __stringify(x), x##_setup);
+
+onchip_setup(fpu);
+onchip_setup(dsp);
+
+#ifdef CONFIG_SPECULATIVE_EXECUTION
+#define CPUOPM 0xff2f0000
+#define CPUOPM_RABD (1 << 5)
+
+static void __cpuinit speculative_execution_init(void)
+{
+ /* Clear RABD */
+ __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
+
+ /* Flush the update */
+ (void)__raw_readl(CPUOPM);
+ ctrl_barrier();
+}
+#else
+#define speculative_execution_init() do { } while (0)
+#endif
+
+#ifdef CONFIG_CPU_SH4A
+#define EXPMASK 0xff2f0004
+#define EXPMASK_RTEDS (1 << 0)
+#define EXPMASK_BRDSSLP (1 << 1)
+#define EXPMASK_MMCAW (1 << 4)
+
+static void __cpuinit expmask_init(void)
+{
+ unsigned long expmask = __raw_readl(EXPMASK);
+
+ /*
+ * Future proofing.
+ *
+ * Disable support for slottable sleep instruction, non-nop
+ * instructions in the rte delay slot, and associative writes to
+ * the memory-mapped cache array.
+ */
+ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
+
+ __raw_writel(expmask, EXPMASK);
+ ctrl_barrier();
+}
+#else
+#define expmask_init() do { } while (0)
+#endif
+
+/* 2nd-level cache init */
+void __attribute__ ((weak)) l2_cache_init(void)
+{
+}
+
+/*
+ * Generic first-level cache init
+ */
+#ifdef CONFIG_SUPERH32
+static void cache_init(void)
+{
+ unsigned long ccr, flags;
+
+ jump_to_uncached();
+ ccr = __raw_readl(CCR);
+
+ /*
+ * At this point we don't know whether the cache is enabled or not - a
+ * bootloader may have enabled it. There are at least 2 things that
+ * could be dirty in the cache at this point:
+ * 1. kernel command line set up by boot loader
+ * 2. spilled registers from the prolog of this function
+ * => before re-initialising the cache, we must do a purge of the whole
+ * cache out to memory for safety. As long as nothing is spilled
+ * during the loop to lines that have already been done, this is safe.
+ * - RPC
+ */
+ if (ccr & CCR_CACHE_ENABLE) {
+ unsigned long ways, waysize, addrstart;
+
+ waysize = current_cpu_data.dcache.sets;
+
+#ifdef CCR_CACHE_ORA
+ /*
+ * If the OC is already in RAM mode, we only have
+ * half of the entries to flush..
+ */
+ if (ccr & CCR_CACHE_ORA)
+ waysize >>= 1;
+#endif
+
+ waysize <<= current_cpu_data.dcache.entry_shift;
+
+#ifdef CCR_CACHE_EMODE
+ /* If EMODE is not set, we only have 1 way to flush. */
+ if (!(ccr & CCR_CACHE_EMODE))
+ ways = 1;
+ else
+#endif
+ ways = current_cpu_data.dcache.ways;
+
+ addrstart = CACHE_OC_ADDRESS_ARRAY;
+ do {
+ unsigned long addr;
+
+ for (addr = addrstart;
+ addr < addrstart + waysize;
+ addr += current_cpu_data.dcache.linesz)
+ __raw_writel(0, addr);
+
+ addrstart += current_cpu_data.dcache.way_incr;
+ } while (--ways);
+ }
+
+ /*
+ * Default CCR values .. enable the caches
+ * and invalidate them immediately..
+ */
+ flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
+
+#ifdef CCR_CACHE_EMODE
+ /* Force EMODE if possible */
+ if (current_cpu_data.dcache.ways > 1)
+ flags |= CCR_CACHE_EMODE;
+ else
+ flags &= ~CCR_CACHE_EMODE;
+#endif
+
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+ /* Write-through */
+ flags |= CCR_CACHE_WT;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ /* Write-back */
+ flags |= CCR_CACHE_CB;
+#else
+ /* Off */
+ flags &= ~CCR_CACHE_ENABLE;
+#endif
+
+ l2_cache_init();
+
+ __raw_writel(flags, CCR);
+ back_to_cached();
+}
+#else
+#define cache_init() do { } while (0)
+#endif
+
+#define CSHAPE(totalsize, linesize, assoc) \
+ ((totalsize & ~0xff) | (linesize << 4) | assoc)
+
+#define CACHE_DESC_SHAPE(desc) \
+ CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
+
+static void detect_cache_shape(void)
+{
+ l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
+
+ if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
+ l1i_cache_shape = l1d_cache_shape;
+ else
+ l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
+
+ if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
+ l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
+ else
+ l2_cache_shape = -1; /* No S-cache */
+}
+
+static void __cpuinit fpu_init(void)
+{
+ /* Disable the FPU */
+ if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
+ printk("FPU Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_FPU;
+ }
+
+ disable_fpu();
+ clear_used_math();
+}
+
+#ifdef CONFIG_SH_DSP
+static void __cpuinit release_dsp(void)
+{
+ unsigned long sr;
+
+ /* Clear SR.DSP bit */
+ __asm__ __volatile__ (
+ "stc\tsr, %0\n\t"
+ "and\t%1, %0\n\t"
+ "ldc\t%0, sr\n\t"
+ : "=&r" (sr)
+ : "r" (~SR_DSP)
+ );
+}
+
+static void __cpuinit dsp_init(void)
+{
+ unsigned long sr;
+
+ /*
+ * Set the SR.DSP bit, wait for one instruction, and then read
+ * back the SR value.
+ */
+ __asm__ __volatile__ (
+ "stc\tsr, %0\n\t"
+ "or\t%1, %0\n\t"
+ "ldc\t%0, sr\n\t"
+ "nop\n\t"
+ "stc\tsr, %0\n\t"
+ : "=&r" (sr)
+ : "r" (SR_DSP)
+ );
+
+ /* If the DSP bit is still set, this CPU has a DSP */
+ if (sr & SR_DSP)
+ current_cpu_data.flags |= CPU_HAS_DSP;
+
+ /* Disable the DSP */
+ if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
+ printk("DSP Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_DSP;
+ }
+
+ /* Now that we've determined the DSP status, clear the DSP bit. */
+ release_dsp();
+}
+#else
+static inline void __cpuinit dsp_init(void) { }
+#endif /* CONFIG_SH_DSP */
+
+/**
+ * cpu_init
+ *
+ * This is our initial entry point for each CPU, and is invoked on the
+ * boot CPU prior to calling start_kernel(). For SMP, a combination of
+ * this and start_secondary() will bring up each processor to a ready
+ * state prior to hand forking the idle loop.
+ *
+ * We do all of the basic processor init here, including setting up
+ * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
+ * subsequently platform_setup()) things like determining the CPU
+ * subtype and initial configuration will all be done.
+ *
+ * Each processor family is still responsible for doing its own probing
+ * and cache configuration in cpu_probe().
+ */
+asmlinkage void __cpuinit cpu_init(void)
+{
+ current_thread_info()->cpu = hard_smp_processor_id();
+
+ /* First, probe the CPU */
+ cpu_probe();
+
+ if (current_cpu_data.type == CPU_SH_NONE)
+ panic("Unknown CPU");
+
+ /* First setup the rest of the I-cache info */
+ current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
+ current_cpu_data.icache.linesz;
+
+ current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
+ current_cpu_data.icache.linesz;
+
+ /* And the D-cache too */
+ current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
+ current_cpu_data.dcache.linesz;
+
+ current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
+ current_cpu_data.dcache.linesz;
+
+ /* Init the cache */
+ cache_init();
+
+ if (raw_smp_processor_id() == 0) {
+ shm_align_mask = max_t(unsigned long,
+ current_cpu_data.dcache.way_size - 1,
+ PAGE_SIZE - 1);
+
+ /* Boot CPU sets the cache shape */
+ detect_cache_shape();
+ }
+
+ fpu_init();
+ dsp_init();
+
+ /*
+ * Initialize the per-CPU ASID cache very early, since the
+ * TLB flushing routines depend on this being setup.
+ */
+ current_cpu_data.asid_cache = NO_CONTEXT;
+
+ current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
+
+ speculative_execution_init();
+ expmask_init();
+
+ /* Do the rest of the boot processor setup */
+ if (raw_smp_processor_id() == 0) {
+ /* Save off the BIOS VBR, if there is one */
+ sh_bios_vbr_init();
+
+ /*
+ * Setup VBR for boot CPU. Secondary CPUs do this through
+ * start_secondary().
+ */
+ per_cpu_trap_init();
+
+ /*
+ * Boot processor to setup the FP and extended state
+ * context info.
+ */
+ init_thread_xstate();
+ }
+}
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
new file mode 100644
index 00000000..f0c7025a
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
+#
+obj-$(CONFIG_SUPERH32) += imask.o
+obj-$(CONFIG_CPU_SH5) += intc-sh5.o
+obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
new file mode 100644
index 00000000..e7f1745b
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -0,0 +1,84 @@
+/*
+ * arch/sh/kernel/cpu/irq/imask.c
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ *
+ * Simple interrupt handling using IMASK of SR register.
+ *
+ */
+/* NOTE: Will not work on level 15 */
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <linux/irq.h>
+#include <linux/bitmap.h>
+#include <asm/irq.h>
+
+/* Bitmap of IRQ masked */
+#define IMASK_PRIORITY 15
+
+static DECLARE_BITMAP(imask_mask, IMASK_PRIORITY);
+static int interrupt_priority;
+
+static inline void set_interrupt_registers(int ip)
+{
+ unsigned long __dummy;
+
+ asm volatile(
+#ifdef CONFIG_CPU_HAS_SR_RB
+ "ldc %2, r6_bank\n\t"
+#endif
+ "stc sr, %0\n\t"
+ "and #0xf0, %0\n\t"
+ "shlr2 %0\n\t"
+ "cmp/eq #0x3c, %0\n\t"
+ "bt/s 1f ! CLI-ed\n\t"
+ " stc sr, %0\n\t"
+ "and %1, %0\n\t"
+ "or %2, %0\n\t"
+ "ldc %0, sr\n"
+ "1:"
+ : "=&z" (__dummy)
+ : "r" (~0xf0), "r" (ip << 4)
+ : "t");
+}
+
+static void mask_imask_irq(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+
+ clear_bit(irq, imask_mask);
+ if (interrupt_priority < IMASK_PRIORITY - irq)
+ interrupt_priority = IMASK_PRIORITY - irq;
+ set_interrupt_registers(interrupt_priority);
+}
+
+static void unmask_imask_irq(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+
+ set_bit(irq, imask_mask);
+ interrupt_priority = IMASK_PRIORITY -
+ find_first_zero_bit(imask_mask, IMASK_PRIORITY);
+ set_interrupt_registers(interrupt_priority);
+}
+
+static struct irq_chip imask_irq_chip = {
+ .name = "SR.IMASK",
+ .irq_mask = mask_imask_irq,
+ .irq_unmask = unmask_imask_irq,
+ .irq_mask_ack = mask_imask_irq,
+};
+
+void make_imask_irq(unsigned int irq)
+{
+ irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq,
+ "level");
+}
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
new file mode 100644
index 00000000..9e056a3a
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -0,0 +1,197 @@
+/*
+ * arch/sh/kernel/cpu/irq/intc-sh5.c
+ *
+ * Interrupt Controller support for SH5 INTC.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * Per-interrupt selective. IRLM=0 (Fixed priority) is not
+ * supported being useless without a cascaded interrupt
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <cpu/irq.h>
+#include <asm/page.h>
+
+/*
+ * Maybe the generic Peripheral block could move to a more
+ * generic include file. INTC Block will be defined here
+ * and only here to make INTC self-contained in a single
+ * file.
+ */
+#define INTC_BLOCK_OFFSET 0x01000000
+
+/* Base */
+#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
+ INTC_BLOCK_OFFSET
+
+/* Address */
+#define INTC_ICR_SET (intc_virt + 0x0)
+#define INTC_ICR_CLEAR (intc_virt + 0x8)
+#define INTC_INTPRI_0 (intc_virt + 0x10)
+#define INTC_INTSRC_0 (intc_virt + 0x50)
+#define INTC_INTSRC_1 (intc_virt + 0x58)
+#define INTC_INTREQ_0 (intc_virt + 0x60)
+#define INTC_INTREQ_1 (intc_virt + 0x68)
+#define INTC_INTENB_0 (intc_virt + 0x70)
+#define INTC_INTENB_1 (intc_virt + 0x78)
+#define INTC_INTDSB_0 (intc_virt + 0x80)
+#define INTC_INTDSB_1 (intc_virt + 0x88)
+
+#define INTC_ICR_IRLM 0x1
+#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
+#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
+
+
+/*
+ * Mapper between the vector ordinal and the IRQ number
+ * passed to kernel/device drivers.
+ */
+int intc_evt_to_irq[(0xE20/0x20)+1] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
+ 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
+ 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
+ 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
+ -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
+ -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
+ 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
+ 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
+ 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
+ -1, -1 /* 0xE00 - 0xE20 */
+};
+
+static unsigned long intc_virt;
+static int irlm; /* IRL mode */
+
+static void enable_intc_irq(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
+ printk("Trying to use straight IRL0-3 with an encoding platform.\n");
+
+ if (irq < 32) {
+ reg = INTC_INTENB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTENB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ __raw_writel(bitmask, reg);
+}
+
+static void disable_intc_irq(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+ unsigned long reg;
+ unsigned long bitmask;
+
+ if (irq < 32) {
+ reg = INTC_INTDSB_0;
+ bitmask = 1 << irq;
+ } else {
+ reg = INTC_INTDSB_1;
+ bitmask = 1 << (irq - 32);
+ }
+
+ __raw_writel(bitmask, reg);
+}
+
+static struct irq_chip intc_irq_type = {
+ .name = "INTC",
+ .irq_enable = enable_intc_irq,
+ .irq_disable = disable_intc_irq,
+};
+
+void __init plat_irq_setup(void)
+{
+ unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
+ unsigned long reg;
+ int i;
+
+ intc_virt = (unsigned long)ioremap_nocache(INTC_BASE, 1024);
+ if (!intc_virt) {
+ panic("Unable to remap INTC\n");
+ }
+
+
+ /* Set default: per-line enable/disable, priority driven ack/eoi */
+ for (i = 0; i < NR_INTC_IRQS; i++)
+ irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
+
+
+ /* Disable all interrupts and set all priorities to 0 to avoid trouble */
+ __raw_writel(-1, INTC_INTDSB_0);
+ __raw_writel(-1, INTC_INTDSB_1);
+
+ for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
+ __raw_writel( NO_PRIORITY, reg);
+
+
+#ifdef CONFIG_SH_CAYMAN
+ {
+ unsigned long data;
+
+ /* Set IRLM */
+ /* If all the priorities are set to 'no priority', then
+ * assume we are using encoded mode.
+ */
+ irlm = platform_int_priority[IRQ_IRL0] +
+ platform_int_priority[IRQ_IRL1] +
+ platform_int_priority[IRQ_IRL2] +
+ platform_int_priority[IRQ_IRL3];
+ if (irlm == NO_PRIORITY) {
+ /* IRLM = 0 */
+ reg = INTC_ICR_CLEAR;
+ i = IRQ_INTA;
+ printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+ } else {
+ /* IRLM = 1 */
+ reg = INTC_ICR_SET;
+ i = IRQ_IRL0;
+ }
+ __raw_writel(INTC_ICR_IRLM, reg);
+
+ /* Set interrupt priorities according to platform description */
+ for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
+ data |= platform_int_priority[i] <<
+ ((i % INTC_INTPRI_PPREG) * 4);
+ if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
+ /* Upon the 7th, set Priority Register */
+ __raw_writel(data, reg);
+ data = 0;
+ reg += 8;
+ }
+ }
+ }
+#endif
+
+ /*
+ * And now let interrupts come in.
+ * sti() is not enough, we need to
+ * lower priority, too.
+ */
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
new file mode 100644
index 00000000..5de6dff5
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -0,0 +1,83 @@
+/*
+ * Interrupt handling for IPR-based IRQ.
+ *
+ * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
+ * Copyright (C) 2000 Kazumoto Kojima
+ * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * Supported system:
+ * On-chip supporting modules (TMU, RTC, etc.).
+ * On-chip supporting modules for SH7709/SH7709A/SH7729.
+ * Hitachi SolutionEngine external I/O:
+ * MS7709SE01, MS7709ASE01, and MS7750SE01
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+
+static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ return container_of(chip, struct ipr_desc, chip);
+}
+
+static void disable_ipr_irq(struct irq_data *data)
+{
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
+ /* Set the priority in IPR to 0 */
+ __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+ (void)__raw_readw(addr); /* Read back to flush write posting */
+}
+
+static void enable_ipr_irq(struct irq_data *data)
+{
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
+ /* Set priority in IPR back to original value */
+ __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
+}
+
+/*
+ * The shift value is now the number of bits to shift, not the number of
+ * bits/4. This is to make it easier to read the value directly from the
+ * datasheets. The IPR address is calculated using the ipr_offset table.
+ */
+void register_ipr_controller(struct ipr_desc *desc)
+{
+ int i;
+
+ desc->chip.irq_mask = disable_ipr_irq;
+ desc->chip.irq_unmask = enable_ipr_irq;
+
+ for (i = 0; i < desc->nr_irqs; i++) {
+ struct ipr_data *p = desc->ipr_data + i;
+ int res;
+
+ BUG_ON(p->ipr_idx >= desc->nr_offsets);
+ BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
+
+ res = irq_alloc_desc_at(p->irq, numa_node_id());
+ if (unlikely(res != p->irq && res != -EEXIST)) {
+ printk(KERN_INFO "can not get irq_desc for %d\n",
+ p->irq);
+ continue;
+ }
+
+ disable_irq_nosync(p->irq);
+ irq_set_chip_and_handler_name(p->irq, &desc->chip,
+ handle_level_irq, "level");
+ irq_set_chip_data(p->irq, p);
+ disable_ipr_irq(irq_get_irq_data(p->irq));
+ }
+}
+EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
new file mode 100644
index 00000000..f47be872
--- /dev/null
+++ b/arch/sh/kernel/cpu/proc.c
@@ -0,0 +1,148 @@
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/machvec.h>
+#include <asm/processor.h>
+
+static const char *cpu_name[] = {
+ [CPU_SH7201] = "SH7201",
+ [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
+ [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
+ [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
+ [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
+ [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
+ [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
+ [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
+ [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
+ [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
+ [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
+ [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
+ [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
+ [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
+ [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
+ [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
+ [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
+ [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
+ [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
+ [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
+ [CPU_SH7372] = "SH7372", [CPU_SH_NONE] = "Unknown"
+};
+
+const char *get_cpu_subtype(struct sh_cpuinfo *c)
+{
+ return cpu_name[c->type];
+}
+EXPORT_SYMBOL(get_cpu_subtype);
+
+#ifdef CONFIG_PROC_FS
+/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
+static const char *cpu_flags[] = {
+ "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
+ "ptea", "llsc", "l2", "op32", "pteaex", NULL
+};
+
+static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
+{
+ unsigned long i;
+
+ seq_printf(m, "cpu flags\t:");
+
+ if (!c->flags) {
+ seq_printf(m, " %s\n", cpu_flags[0]);
+ return;
+ }
+
+ for (i = 0; cpu_flags[i]; i++)
+ if ((c->flags & (1 << i)))
+ seq_printf(m, " %s", cpu_flags[i+1]);
+
+ seq_printf(m, "\n");
+}
+
+static void show_cacheinfo(struct seq_file *m, const char *type,
+ struct cache_info info)
+{
+ unsigned int cache_size;
+
+ cache_size = info.ways * info.sets * info.linesz;
+
+ seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
+ type, cache_size >> 10, info.ways);
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct sh_cpuinfo *c = v;
+ unsigned int cpu = c - cpu_data;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ if (cpu == 0)
+ seq_printf(m, "machine\t\t: %s\n", get_system_type());
+ else
+ seq_printf(m, "\n");
+
+ seq_printf(m, "processor\t: %d\n", cpu);
+ seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
+ seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
+ if (c->cut_major == -1)
+ seq_printf(m, "cut\t\t: unknown\n");
+ else if (c->cut_minor == -1)
+ seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
+ else
+ seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
+
+ show_cpuflags(m, c);
+
+ seq_printf(m, "cache type\t: ");
+
+ /*
+ * Check for what type of cache we have, we support both the
+ * unified cache on the SH-2 and SH-3, as well as the harvard
+ * style cache on the SH-4.
+ */
+ if (c->icache.flags & SH_CACHE_COMBINED) {
+ seq_printf(m, "unified\n");
+ show_cacheinfo(m, "cache", c->icache);
+ } else {
+ seq_printf(m, "split (harvard)\n");
+ show_cacheinfo(m, "icache", c->icache);
+ show_cacheinfo(m, "dcache", c->dcache);
+ }
+
+ /* Optional secondary cache */
+ if (c->flags & CPU_HAS_L2_CACHE)
+ show_cacheinfo(m, "scache", c->scache);
+
+ seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
+
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
new file mode 100644
index 00000000..f0f059ac
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-2 backends.
+#
+
+obj-y := ex.o probe.o entry.o
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
new file mode 100644
index 00000000..e80252ae
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -0,0 +1,77 @@
+/*
+ * arch/sh/kernel/cpu/sh2/clock-sh7619.c
+ *
+ * SH7619 support for the clock framework
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/processor.h>
+
+static const int pll1rate[] = {1,2};
+static const int pfc_divisors[] = {1,2,0,4};
+static unsigned int pll2_mult;
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
+}
+
+static struct sh_clk_ops sh7619_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7619_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
+}
+
+static struct sh_clk_ops sh7619_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static struct sh_clk_ops sh7619_cpu_clk_ops = {
+ .recalc = followparent_recalc,
+};
+
+static struct sh_clk_ops *sh7619_clk_ops[] = {
+ &sh7619_master_clk_ops,
+ &sh7619_module_clk_ops,
+ &sh7619_bus_clk_ops,
+ &sh7619_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (test_mode_pin(MODE_PIN2 | MODE_PIN0) ||
+ test_mode_pin(MODE_PIN2 | MODE_PIN1))
+ pll2_mult = 2;
+ else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+
+ BUG_ON(!pll2_mult);
+
+ if (idx < ARRAY_SIZE(sh7619_clk_ops))
+ *ops = sh7619_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
new file mode 100644
index 00000000..c8a4331d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -0,0 +1,321 @@
+/*
+ * arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * The SH-2 exception entry
+ *
+ * Copyright (C) 2005-2008 Yoshinori Sato
+ * Copyright (C) 2005 AXE,Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+
+/* Offsets to the stack */
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_PC = (16*4)
+OFF_SR = (16*4+2*4)
+OFF_TRA = (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+ ! stack
+ ! r0 <- point sp
+ ! r1
+ ! pc
+ ! sr
+ ! r0 = temporary
+ ! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
+ mov.l r2,@-sp
+ mov.l r3,@-sp
+ cli
+ mov.l $cpu_mode,r2
+ mov.l @r2,r0
+ mov.l @(5*4,r15),r3 ! previous SR
+ or r0,r3 ! set MD
+ tst r0,r0
+ bf/s 1f ! previous mode check
+ mov.l r3,@(5*4,r15) ! update SR
+ ! switch to kernel mode
+ mov.l __md_bit,r0
+ mov.l r0,@r2 ! enter kernel mode
+ mov.l $current_thread_info,r2
+ mov.l @r2,r2
+ mov #(THREAD_SIZE >> 8),r0
+ shll8 r0
+ add r2,r0
+ mov r15,r2 ! r2 = user stack top
+ mov r0,r15 ! switch kernel stack
+ mov.l r1,@-r15 ! TRA
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ mov.l @(5*4,r2),r0
+ mov.l r0,@-r15 ! original SR
+ sts.l pr,@-r15
+ mov.l @(4*4,r2),r0
+ mov.l r0,@-r15 ! original PC
+ mov r2,r3
+ add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame
+ mov.l r3,@-r15 ! original SP
+ mov.l r14,@-r15
+ mov.l r13,@-r15
+ mov.l r12,@-r15
+ mov.l r11,@-r15
+ mov.l r10,@-r15
+ mov.l r9,@-r15
+ mov.l r8,@-r15
+ mov.l r7,@-r15
+ mov.l r6,@-r15
+ mov.l r5,@-r15
+ mov.l r4,@-r15
+ mov r1,r9 ! save TRA
+ mov r2,r8 ! copy user -> kernel stack
+ mov.l @(0,r8),r3
+ mov.l r3,@-r15
+ mov.l @(4,r8),r2
+ mov.l r2,@-r15
+ mov.l @(12,r8),r1
+ mov.l r1,@-r15
+ mov.l @(8,r8),r0
+ bra 2f
+ mov.l r0,@-r15
+1:
+ ! in kernel exception
+ mov #(22-4-4-1)*4+4,r0
+ mov r15,r2
+ sub r0,r15
+ mov.l @r2+,r0 ! old R3
+ mov.l r0,@-r15
+ mov.l @r2+,r0 ! old R2
+ mov.l r0,@-r15
+ mov.l @(4,r2),r0 ! old R1
+ mov.l r0,@-r15
+ mov.l @r2,r0 ! old R0
+ mov.l r0,@-r15
+ add #8,r2
+ mov.l @r2+,r3 ! old PC
+ mov.l @r2+,r0 ! old SR
+ add #-4,r2 ! exception frame stub (sr)
+ mov.l r1,@-r2 ! TRA
+ sts.l macl, @-r2
+ sts.l mach, @-r2
+ stc.l gbr, @-r2
+ mov.l r0,@-r2 ! save old SR
+ sts.l pr,@-r2
+ mov.l r3,@-r2 ! save old PC
+ mov r2,r0
+ add #8*4,r0
+ mov.l r0,@-r2 ! save old SP
+ mov.l r14,@-r2
+ mov.l r13,@-r2
+ mov.l r12,@-r2
+ mov.l r11,@-r2
+ mov.l r10,@-r2
+ mov.l r9,@-r2
+ mov.l r8,@-r2
+ mov.l r7,@-r2
+ mov.l r6,@-r2
+ mov.l r5,@-r2
+ mov.l r4,@-r2
+ mov r1,r9
+ mov.l @(OFF_R0,r15),r0
+ mov.l @(OFF_R1,r15),r1
+ mov.l @(OFF_R2,r15),r2
+ mov.l @(OFF_R3,r15),r3
+2:
+ mov #64,r8
+ cmp/hs r8,r9
+ bt interrupt_entry ! vec >= 64 is interrupt
+ mov #32,r8
+ cmp/hs r8,r9
+ bt trap_entry ! 64 > vec >= 32 is trap
+
+ mov.l 4f,r8
+ mov r9,r4
+ shll2 r9
+ add r9,r8
+ mov.l @r8,r8 ! exception handler address
+ tst r8,r8
+ bf 3f
+ mov.l 8f,r8 ! unhandled exception
+3:
+ mov.l 5f,r10
+ jmp @r8
+ lds r10,pr
+
+interrupt_entry:
+ mov r9,r4
+ mov r15,r5
+ mov.l 6f,r9
+ mov.l 7f,r8
+ jmp @r8
+ lds r9,pr
+
+ .align 2
+4: .long exception_handling_table
+5: .long ret_from_exception
+6: .long ret_from_irq
+7: .long do_IRQ
+8: .long exception_error
+
+trap_entry:
+ mov #0x30,r8
+ cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
+ bt 1f
+ add #-0x10,r9 ! convert SH2 to SH3/4 ABI
+1:
+ shll2 r9 ! TRA
+ bra system_call ! jump common systemcall entry
+ mov r9,r8
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+ /* Unwind the stack and jmp to the debug entry */
+ENTRY(sh_bios_handler)
+ mov r15,r0
+ add #(22-4)*4-4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l @(OFF_SP,r0),r1
+ mov #OFF_SR,r2
+ mov.l @(r0,r2),r3
+ mov.l r3,@-r1
+ mov #OFF_SP,r2
+ mov.l @(r0,r2),r3
+ mov.l r3,@-r1
+ mov r15,r0
+ add #(22-4)*4-8,r0
+ mov.l 1f,r2
+ mov.l @r2,r2
+ stc sr,r3
+ mov.l r2,@r0
+ mov.l r3,@(4,r0)
+ mov.l r1,@(8,r0)
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ add #8,r15
+ lds.l @r15+, pr
+ mov.l @r15+,r15
+ rte
+ nop
+ .align 2
+1: .long gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_trap_handler)
+ mov r15,r4 ! regs
+ mov #OFF_PC,r0
+ mov.l @(r0,r15),r6 ! pc
+ mov.l 1f,r0
+ jmp @r0
+ mov #0,r5 ! writeaccess is unknown
+
+ .align 2
+1: .long do_address_error
+
+restore_all:
+ stc sr,r0
+ or #0xf0,r0
+ ldc r0,sr ! all interrupt block (same BL = 1)
+ ! restore special register
+ ! overlap exception frame
+ mov r15,r0
+ add #17*4,r0
+ lds.l @r0+,pr
+ add #4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l $cpu_mode,r2
+ mov #OFF_SR,r3
+ mov.l @(r0,r3),r1
+ mov.l __md_bit,r3
+ and r1,r3 ! copy MD bit
+ mov.l r3,@r2
+ shll2 r1 ! clear MD bit
+ shlr2 r1
+ mov.l @(OFF_SP,r0),r2
+ add #-8,r2
+ mov.l r2,@(OFF_SP,r0) ! point exception frame top
+ mov.l r1,@(4,r2) ! set sr
+ mov #OFF_PC,r3
+ mov.l @(r0,r3),r1
+ mov.l r1,@r2 ! set pc
+ get_current_thread_info r0, r1
+ mov.l $current_thread_info,r1
+ mov.l r0,@r1
+ mov.l @r15+,r0
+ mov.l @r15+,r1
+ mov.l @r15+,r2
+ mov.l @r15+,r3
+ mov.l @r15+,r4
+ mov.l @r15+,r5
+ mov.l @r15+,r6
+ mov.l @r15+,r7
+ mov.l @r15+,r8
+ mov.l @r15+,r9
+ mov.l @r15+,r10
+ mov.l @r15+,r11
+ mov.l @r15+,r12
+ mov.l @r15+,r13
+ mov.l @r15+,r14
+ mov.l @r15,r15
+ rte
+ nop
+
+ .align 2
+__md_bit:
+ .long 0x40000000
+$current_thread_info:
+ .long __current_thread_info
+$cpu_mode:
+ .long __cpu_mode
+
+! common exception handler
+#include "../../entry-common.S"
+
+ .data
+! cpu operation mode
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+ .long 0x40000000
+
+ .section .bss
+__current_thread_info:
+ .long 0
+
+ENTRY(exception_handling_table)
+ .space 4*32
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
new file mode 100644
index 00000000..85b0bf81
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/ex.S
@@ -0,0 +1,47 @@
+/*
+ * arch/sh/kernel/cpu/sh2/ex.S
+ *
+ * The SH-2 exception vector table
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+exception_entry:
+no = 0
+ .rept 256
+ mov.l r1,@-sp
+ bra exception_trampoline
+ mov #no,r1
+no = no + 1
+ .endr
+exception_trampoline:
+ mov.l r0,@-sp
+ mov.l $exception_handler,r0
+ extu.b r1,r1
+ jmp @r0
+ extu.w r1,r1
+
+ .align 2
+$exception_entry:
+ .long exception_entry
+$exception_handler:
+ .long exception_handler
+!
+! Exception Vector Base
+!
+ .align 2
+ENTRY(vbr_base)
+vector = 0
+ .rept 256
+ .long exception_entry + vector * 6
+vector = vector + 1
+ .endr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
new file mode 100644
index 00000000..bab8e759
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -0,0 +1,33 @@
+/*
+ * arch/sh/kernel/cpu/sh2/probe.c
+ *
+ * CPU Subtype Probing for SH-2.
+ *
+ * Copyright (C) 2002 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+void __cpuinit cpu_probe(void)
+{
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+ boot_cpu_data.type = CPU_SH7619;
+ boot_cpu_data.dcache.ways = 4;
+ boot_cpu_data.dcache.way_incr = (1<<12);
+ boot_cpu_data.dcache.sets = 256;
+ boot_cpu_data.dcache.entry_shift = 4;
+ boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.dcache.flags = 0;
+#endif
+ /*
+ * SH-2 doesn't have separate caches
+ */
+ boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
+ boot_cpu_data.icache = boot_cpu_data.dcache;
+ boot_cpu_data.family = CPU_FAMILY_SH2;
+}
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
new file mode 100644
index 00000000..0f8befcc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -0,0 +1,232 @@
+/*
+ * SH7619 Setup
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ WDT, EDMAC, CMT0, CMT1,
+ SCIF0, SCIF1, SCIF2,
+ HIF_HIFI, HIF_HIFBI,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ SIOF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 80), INTC_IRQ(IRQ5, 81),
+ INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83),
+ INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85),
+ INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87),
+ INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89),
+ INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91),
+ INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93),
+ INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95),
+ INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97),
+ INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99),
+ INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101),
+ INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105),
+ INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107),
+ INTC_IRQ(SIOF, 108),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xf8080000, 0, 16, 4, /* IPRC */ { WDT, EDMAC, CMT0, CMT1 } },
+ { 0xf8080002, 0, 16, 4, /* IPRD */ { SCIF0, SCIF1, SCIF2 } },
+ { 0xf8080004, 0, 16, 4, /* IPRE */ { HIF_HIFI, HIF_HIFBI } },
+ { 0xf8080006, 0, 16, 4, /* IPRF */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xf8400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 88, 88, 88, 88 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xf8410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 92, 92, 92, 92 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xf8420000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 96, 96, 96, 96 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct resource eth_resources[] = {
+ [0] = {
+ .start = 0xfb000000,
+ .end = 0xfb0001c8,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 85,
+ .end = 85,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device eth_device = {
+ .name = "sh-eth",
+ .id = -1,
+ .dev = {
+ .platform_data = (void *)1,
+ },
+ .num_resources = ARRAY_SIZE(eth_resources),
+ .resource = eth_resources,
+};
+
+static struct sh_timer_config cmt0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt0_resources[] = {
+ [0] = {
+ .start = 0xf84a0072,
+ .end = 0xf84a0077,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 86,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt0_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt0_platform_data,
+ },
+ .resource = cmt0_resources,
+ .num_resources = ARRAY_SIZE(cmt0_resources),
+};
+
+static struct sh_timer_config cmt1_platform_data = {
+ .channel_offset = 0x08,
+ .timer_bit = 1,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt1_resources[] = {
+ [0] = {
+ .start = 0xf84a0078,
+ .end = 0xf84a007d,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 87,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt1_device = {
+ .name = "sh_cmt",
+ .id = 1,
+ .dev = {
+ .platform_data = &cmt1_platform_data,
+ },
+ .resource = cmt1_resources,
+ .num_resources = ARRAY_SIZE(cmt1_resources),
+};
+
+static struct platform_device *sh7619_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &eth_device,
+ &cmt0_device,
+ &cmt1_device,
+};
+
+static int __init sh7619_devices_setup(void)
+{
+ return platform_add_devices(sh7619_devices,
+ ARRAY_SIZE(sh7619_devices));
+}
+arch_initcall(sh7619_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7619_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt0_device,
+ &cmt1_device,
+};
+
+#define STBCR3 0xf80a0000
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3);
+
+ early_platform_add_devices(sh7619_early_devices,
+ ARRAY_SIZE(sh7619_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
new file mode 100644
index 00000000..45f85c77
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the Linux/SuperH SH-2A backends.
+#
+
+obj-y := common.o probe.o opcode_helper.o
+
+common-y += ex.o entry.o
+
+obj-$(CONFIG_SH_FPU) += fpu.o
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7201) += setup-sh7201.o clock-sh7201.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
+obj-$(CONFIG_CPU_SUBTYPE_MXG) += setup-mxg.o clock-sh7206.o
+
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o
+
+obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
new file mode 100644
index 00000000..532a36c7
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+ *
+ * SH7201 support for the clock framework
+ *
+ * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk>
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static const int pll1rate[]={1,2,3,4,6,8};
+static const int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+static unsigned int pll2_mult;
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate = 10000000 * pll2_mult *
+ pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct sh_clk_ops sh7201_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7201_clk_ops[] = {
+ &sh7201_master_clk_ops,
+ &sh7201_module_clk_ops,
+ &sh7201_bus_clk_ops,
+ &sh7201_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
+ pll2_mult = 1;
+ else if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 2;
+ else
+ pll2_mult = 4;
+
+ if (idx < ARRAY_SIZE(sh7201_clk_ops))
+ *ops = sh7201_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
new file mode 100644
index 00000000..529f719b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+ *
+ * SH7203 support for the clock framework
+ *
+ * Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
+ *
+ * Based on clock-sh7263.c
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static const int pll1rate[]={8,12,16,0};
+static const int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+static unsigned int pll2_mult;
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult;
+}
+
+static struct sh_clk_ops sh7203_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7203_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx-2];
+}
+
+static struct sh_clk_ops sh7203_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static struct sh_clk_ops sh7203_cpu_clk_ops = {
+ .recalc = followparent_recalc,
+};
+
+static struct sh_clk_ops *sh7203_clk_ops[] = {
+ &sh7203_master_clk_ops,
+ &sh7203_module_clk_ops,
+ &sh7203_bus_clk_ops,
+ &sh7203_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+ else if (test_mode_pin(MODE_PIN0))
+ pll2_mult = 2;
+ else
+ pll2_mult = 1;
+
+ if (idx < ARRAY_SIZE(sh7203_clk_ops))
+ *ops = sh7203_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
new file mode 100644
index 00000000..17778983
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -0,0 +1,83 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+ *
+ * SH7206 support for the clock framework
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static const int pll1rate[]={1,2,3,4,6,8};
+static const int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+static unsigned int pll2_mult;
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct sh_clk_ops sh7206_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7206_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct sh_clk_ops sh7206_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7206_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7206_clk_ops[] = {
+ &sh7206_master_clk_ops,
+ &sh7206_module_clk_ops,
+ &sh7206_bus_clk_ops,
+ &sh7206_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0))
+ pll2_mult = 1;
+ else if (test_mode_pin(MODE_PIN2 | MODE_PIN1))
+ pll2_mult = 2;
+ else if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+
+ if (idx < ARRAY_SIZE(sh7206_clk_ops))
+ *ops = sh7206_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
new file mode 100644
index 00000000..222742dd
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -0,0 +1,250 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/entry.S
+ *
+ * The SH-2A exception entry
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ * Based on arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+
+/* Offsets to the stack */
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_PC = (16*4)
+OFF_SR = (16*4+2*4)
+OFF_TRA = (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+ ! stack
+ ! r0 <- point sp
+ ! r1
+ ! pc
+ ! sr
+ ! r0 = temporary
+ ! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
+ mov.l r2,@-sp
+ cli
+ mov.l $cpu_mode,r2
+ bld.b #6,@(0,r2) !previus SR.MD
+ bst.b #6,@(4*4,r15) !set cpu mode to SR.MD
+ bt 1f
+ ! switch to kernel mode
+ bset.b #6,@(0,r2) !set SR.MD
+ mov.l $current_thread_info,r2
+ mov.l @r2,r2
+ mov #(THREAD_SIZE >> 8),r0
+ shll8 r0
+ add r2,r0 ! r0 = kernel stack tail
+ mov r15,r2 ! r2 = user stack top
+ mov r0,r15 ! switch kernel stack
+ mov.l r1,@-r15 ! TRA
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ mov.l @(4*4,r2),r0
+ mov.l r0,@-r15 ! original SR
+ sts.l pr,@-r15
+ mov.l @(3*4,r2),r0
+ mov.l r0,@-r15 ! original PC
+ mov r2,r0
+ add #(3+2)*4,r0 ! rewind r0 - r3 + exception frame
+ lds r0,pr ! pr = original SP
+ movmu.l r3,@-r15 ! save regs
+ mov r2,r8 ! r8 = previus stack top
+ mov r1,r9 ! r9 = interrupt vector
+ ! restore previous stack
+ mov.l @r8+,r2
+ mov.l @r8+,r0
+ mov.l @r8+,r1
+ bra 2f
+ movml.l r2,@-r15
+1:
+ ! in kernel exception
+ mov r15,r2
+ add #-((OFF_TRA + 4) - OFF_PC) + 5*4,r15
+ movmu.l r3,@-r15
+ mov r2,r8 ! r8 = previous stack top
+ mov r1,r9 ! r9 = interrupt vector
+ ! restore exception frame & regs
+ mov.l @r8+,r2 ! old R2
+ mov.l @r8+,r0 ! old R0
+ mov.l @r8+,r1 ! old R1
+ mov.l @r8+,r10 ! old PC
+ mov.l @r8+,r11 ! old SR
+ movml.l r2,@-r15
+ mov.l r10,@(OFF_PC,r15)
+ mov.l r11,@(OFF_SR,r15)
+ mov.l r8,@(OFF_SP,r15) ! save old sp
+ mov r15,r8
+ add #OFF_TRA + 4,r8
+ mov.l r9,@-r8
+ sts.l macl,@-r8
+ sts.l mach,@-r8
+ stc.l gbr,@-r8
+ add #-4,r8
+ sts.l pr,@-r8
+2:
+ ! dispatch exception / interrupt
+ mov #64,r8
+ cmp/hs r8,r9
+ bt interrupt_entry ! vec >= 64 is interrupt
+ mov #32,r8
+ cmp/hs r8,r9
+ bt trap_entry ! 64 > vec >= 32 is trap
+
+ mov.l 4f,r8
+ mov r9,r4
+ shll2 r9
+ add r9,r8
+ mov.l @r8,r8 ! exception handler address
+ tst r8,r8
+ bf 3f
+ mov.l 8f,r8 ! unhandled exception
+3:
+ mov.l 5f,r10
+ jmp @r8
+ lds r10,pr
+
+interrupt_entry:
+ mov r9,r4
+ mov r15,r5
+ mov.l 7f,r8
+ mov.l 6f,r9
+ jmp @r8
+ lds r9,pr
+
+ .align 2
+4: .long exception_handling_table
+5: .long ret_from_exception
+6: .long ret_from_irq
+7: .long do_IRQ
+8: .long exception_error
+
+trap_entry:
+ mov #0x30,r8
+ cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
+ bt 1f
+ add #-0x10,r9 ! convert SH2 to SH3/4 ABI
+1:
+ shll2 r9 ! TRA
+ bra system_call ! jump common systemcall entry
+ mov r9,r8
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+ /* Unwind the stack and jmp to the debug entry */
+ENTRY(sh_bios_handler)
+ mov r15,r0
+ add #(22-4)*4-4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l @(OFF_SP,r0),r1
+ mov.l @(OFF_SR,r2),r3
+ mov.l r3,@-r1
+ mov.l @(OFF_SP,r2),r3
+ mov.l r3,@-r1
+ mov r15,r0
+ add #(22-4)*4-8,r0
+ mov.l 1f,r2
+ mov.l @r2,r2
+ stc sr,r3
+ mov.l r2,@r0
+ mov.l r3,@(4,r0)
+ mov.l r1,@(8,r0)
+ movml.l @r15+,r14
+ add #8,r15
+ lds.l @r15+, pr
+ mov.l @r15+,r15
+ rte
+ nop
+ .align 2
+1: .long gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_trap_handler)
+ mov r15,r4 ! regs
+ mov.l @(OFF_PC,r15),r6 ! pc
+ mov.l 1f,r0
+ jmp @r0
+ mov #0,r5 ! writeaccess is unknown
+
+ .align 2
+1: .long do_address_error
+
+restore_all:
+ stc sr,r0
+ or #0xf0,r0
+ ldc r0,sr ! all interrupt block (same BL = 1)
+ ! restore special register
+ ! overlap exception frame
+ mov r15,r0
+ add #17*4,r0
+ lds.l @r0+,pr
+ add #4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l $cpu_mode,r2
+ bld.b #6,@(OFF_SR,r15)
+ bst.b #6,@(0,r2) ! save CPU mode
+ mov.l @(OFF_SR,r0),r1
+ shll2 r1
+ shlr2 r1 ! clear MD bit
+ mov.l @(OFF_SP,r0),r2
+ add #-8,r2
+ mov.l r2,@(OFF_SP,r0) ! point exception frame top
+ mov.l r1,@(4,r2) ! set sr
+ mov.l @(OFF_PC,r0),r1
+ mov.l r1,@r2 ! set pc
+ get_current_thread_info r0, r1
+ mov.l $current_thread_info,r1
+ mov.l r0,@r1
+ movml.l @r15+,r14
+ mov.l @r15,r15
+ rte
+ nop
+
+ .align 2
+$current_thread_info:
+ .long __current_thread_info
+$cpu_mode:
+ .long __cpu_mode
+
+! common exception handler
+#include "../../entry-common.S"
+
+ .data
+! cpu operation mode
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+ .long 0x40000000
+
+ .section .bss
+__current_thread_info:
+ .long 0
+
+ENTRY(exception_handling_table)
+ .space 4*32
diff --git a/arch/sh/kernel/cpu/sh2a/ex.S b/arch/sh/kernel/cpu/sh2a/ex.S
new file mode 100644
index 00000000..45680667
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/ex.S
@@ -0,0 +1,73 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ex.S
+ *
+ * The SH-2A exception vector table
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+
+! exception no 0 to 255
+exception_entry0:
+no = 0
+ .rept 256
+ mov.l r1,@-sp
+ bra exception_trampoline0
+ mov #no,r1
+no = no + 1
+ .endr
+exception_trampoline0:
+ mov.l r0,@-sp
+ mov.l 1f,r0
+ extu.b r1,r1
+ jmp @r0
+ extu.w r1,r1
+
+ .align 2
+1: .long exception_handler
+
+! exception no 256 to 511
+exception_entry1:
+no = 0
+ .rept 256
+ mov.l r1,@-sp
+ bra exception_trampoline1
+ mov #no,r1
+no = no + 1
+ .endr
+exception_trampoline1:
+ mov.l r0,@-sp
+ extu.b r1,r1
+ movi20 #0x100,r0
+ add r0,r1
+ mov.l 1f,r0
+ jmp @r0
+ extu.w r1,r1
+
+ .align 2
+1: .long exception_handler
+
+ !
+! Exception Vector Base
+!
+ .align 2
+ENTRY(vbr_base)
+vector = 0
+ .rept 256
+ .long exception_entry0 + vector * 6
+vector = vector + 1
+ .endr
+vector = 0
+ .rept 256
+ .long exception_entry1 + vector * 6
+vector = vector + 1
+ .endr
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
new file mode 100644
index 00000000..98bbaa44
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -0,0 +1,575 @@
+/*
+ * Save/restore floating point context for signal handlers.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FIXME! These routines can be optimized in big endian case.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+/* The PR (precision) bit in the FP Status Register must be clear when
+ * an frchg instruction is executed, otherwise the instruction is undefined.
+ * Executing frchg with PR set causes a trap on some SH4 implementations.
+ */
+
+#define FPSCR_RCHG 0x00000000
+
+
+/*
+ * Save FPU registers onto task structure.
+ */
+void save_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t"
+ : "=r" (dummy)
+ : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
+ "r" (FPSCR_RCHG),
+ "r" (FPSCR_INIT)
+ : "memory");
+
+ disable_fpu();
+}
+
+void restore_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile("fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ : "=r" (dummy)
+ : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
+ : "memory");
+ disable_fpu();
+}
+
+/*
+ * Emulate arithmetic ops on denormalized number for some FPU insns.
+ */
+
+/* denormalized float * float */
+static int denormal_mulf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ unsigned long long m, n;
+ int exp, w;
+
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000 || ix == 0)
+ return ((hx ^ hy) & 0x80000000);
+
+ exp = (iy & 0x7f800000) >> 23;
+ ix &= 0x007fffff;
+ iy = (iy & 0x007fffff) | 0x00800000;
+ m = (unsigned long long)ix * iy;
+ n = m;
+ w = -1;
+ while (n) { n >>= 1; w++; }
+
+ /* FIXME: use guard bits */
+ exp += w - 126 - 46;
+ if (exp > 0)
+ ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
+ else if (exp + 22 >= 0)
+ ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x80000000;
+ return ix;
+}
+
+/* denormalized double * double */
+static void mult64(unsigned long long x, unsigned long long y,
+ unsigned long long *highp, unsigned long long *lowp)
+{
+ unsigned long long sub0, sub1, sub2, sub3;
+ unsigned long long high, low;
+
+ sub0 = (x >> 32) * (unsigned long) (y >> 32);
+ sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
+ sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
+ sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
+ low = sub3;
+ high = 0LL;
+ sub3 += (sub1 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ sub3 += (sub2 << 32);
+ if (low > sub3)
+ high++;
+ low = sub3;
+ high += (sub1 >> 32) + (sub2 >> 32);
+ high += sub0;
+ *lowp = low;
+ *highp = high;
+}
+
+static inline long long rshift64(unsigned long long mh,
+ unsigned long long ml, int n)
+{
+ if (n >= 64)
+ return mh >> (n - 64);
+ return (mh << (64 - n)) | (ml >> n);
+}
+
+static long long denormal_muld(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ unsigned long long mh, ml, nh, nl;
+ int exp, w;
+
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL || ix == 0)
+ return ((hx ^ hy) & 0x8000000000000000LL);
+
+ exp = (iy & 0x7ff0000000000000LL) >> 52;
+ ix &= 0x000fffffffffffffLL;
+ iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ mult64(ix, iy, &mh, &ml);
+ nh = mh;
+ nl = ml;
+ w = -1;
+ if (nh) {
+ while (nh) { nh >>= 1; w++;}
+ w += 64;
+ } else
+ while (nl) { nl >>= 1; w++;}
+
+ /* FIXME: use guard bits */
+ exp += w - 1022 - 52 * 2;
+ if (exp > 0)
+ ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
+ | ((long long)exp << 52);
+ else if (exp + 51 >= 0)
+ ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
+ else
+ ix = 0;
+
+ ix |= (hx ^ hy) & 0x8000000000000000LL;
+ return ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static int denormal_subf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix - iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac -= iy;
+ while (frac < 0x00800000) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static int denormal_addf1(unsigned int ix, unsigned int iy)
+{
+ int frac;
+ int exp;
+
+ if (ix < 0x00800000)
+ return ix + iy;
+
+ exp = (ix & 0x7f800000) >> 23;
+ if (exp - 1 > 31)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x007fffff) | 0x00800000;
+ frac += iy;
+ if (frac >= 0x01000000) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 23) | (frac & 0x007fffff);
+}
+
+static int denormal_addf(int hx, int hy)
+{
+ unsigned int ix, iy;
+ int sign;
+
+ if ((hx ^ hy) & 0x80000000) {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000) {
+ ix = denormal_subf1(ix, iy);
+ if ((int) ix < 0) {
+ ix = -ix;
+ sign ^= 0x80000000;
+ }
+ } else {
+ ix = denormal_subf1(iy, ix);
+ sign ^= 0x80000000;
+ }
+ } else {
+ sign = hx & 0x80000000;
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+ if (iy < 0x00800000)
+ ix = denormal_addf1(ix, iy);
+ else
+ ix = denormal_addf1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/* ix - iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ int exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix - iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac -= iy;
+ while (frac < 0x0010000000000000LL) {
+ if (--exp == 0)
+ return frac;
+ frac <<= 1;
+ }
+
+ return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+/* ix + iy where iy: denormal and ix, iy >= 0 */
+static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
+{
+ long long frac;
+ long long exp;
+
+ if (ix < 0x0010000000000000LL)
+ return ix + iy;
+
+ exp = (ix & 0x7ff0000000000000LL) >> 52;
+ if (exp - 1 > 63)
+ return ix;
+ iy >>= exp - 1;
+ if (iy == 0)
+ return ix;
+
+ frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
+ frac += iy;
+ if (frac >= 0x0020000000000000LL) {
+ frac >>= 1;
+ ++exp;
+ }
+
+ return (exp << 52) | (frac & 0x000fffffffffffffLL);
+}
+
+static long long denormal_addd(long long hx, long long hy)
+{
+ unsigned long long ix, iy;
+ long long sign;
+
+ if ((hx ^ hy) & 0x8000000000000000LL) {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL) {
+ ix = denormal_subd1(ix, iy);
+ if ((int) ix < 0) {
+ ix = -ix;
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ ix = denormal_subd1(iy, ix);
+ sign ^= 0x8000000000000000LL;
+ }
+ } else {
+ sign = hx & 0x8000000000000000LL;
+ ix = hx & 0x7fffffffffffffffLL;
+ iy = hy & 0x7fffffffffffffffLL;
+ if (iy < 0x0010000000000000LL)
+ ix = denormal_addd1(ix, iy);
+ else
+ ix = denormal_addd1(iy, ix);
+ }
+
+ return sign | ix;
+}
+
+/**
+ * denormal_to_double - Given denormalized float number,
+ * store double float
+ *
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
+ */
+static void
+denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
+{
+ unsigned long du, dl;
+ unsigned long x = fpu->fpul;
+ int exp = 1023 - 126;
+
+ if (x != 0 && (x & 0x7f800000) == 0) {
+ du = (x & 0x80000000);
+ while ((x & 0x00800000) == 0) {
+ x <<= 1;
+ exp--;
+ }
+ x &= 0x007fffff;
+ du |= (exp << 20) | (x >> 3);
+ dl = x << 29;
+
+ fpu->fp_regs[n] = du;
+ fpu->fp_regs[n+1] = dl;
+ }
+}
+
+/**
+ * ieee_fpe_handler - Handle denormalized number exception
+ *
+ * @regs: Pointer to register structure
+ *
+ * Returns 1 when it's handled (should not cause exception).
+ */
+static int
+ieee_fpe_handler (struct pt_regs *regs)
+{
+ unsigned short insn = *(unsigned short *) regs->pc;
+ unsigned short finsn;
+ unsigned long nextpc;
+ int nib[4] = {
+ (insn >> 12) & 0xf,
+ (insn >> 8) & 0xf,
+ (insn >> 4) & 0xf,
+ insn & 0xf};
+
+ if (nib[0] == 0xb ||
+ (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
+ regs->pr = regs->pc + 4;
+ if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ else
+ nextpc = regs->pc + 4;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4;
+ else
+ nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x4 && nib[3] == 0xb &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
+ nextpc = regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
+ nextpc = regs->pc + 4 + regs->regs[nib[1]];
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else if (insn == 0x000b) { /* rts */
+ nextpc = regs->pr;
+ finsn = *(unsigned short *) (regs->pc + 2);
+ } else {
+ nextpc = regs->pc + 2;
+ finsn = insn;
+ }
+
+#define FPSCR_FPU_ERROR (1 << 17)
+
+ if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
+ struct task_struct *tsk = current;
+
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
+ /* FPU error */
+ denormal_to_double (&tsk->thread.xstate->hardfpu,
+ (finsn >> 8) & 0xf);
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
+ if ((hx & 0x7fffffff) >= 0x00100000)
+ llx = denormal_muld(lly, llx);
+ else
+ llx = denormal_muld(llx, lly);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((hx & 0x7fffffff) >= 0x00800000)
+ hx = denormal_mulf(hy, hx);
+ else
+ hx = denormal_mulf(hx, hy);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
+ prec = fpscr & (1 << 19);
+
+ if ((fpscr & FPSCR_FPU_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal */
+ llx = ((long long) hx << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
+ lly = ((long long) hy << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = denormal_addd(llx, lly);
+ else
+ llx = denormal_addd(llx, lly ^ (1LL << 63));
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_FPU_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = denormal_addf(hx, hy);
+ else
+ hx = denormal_addf(hx, hy ^ 0x80000000);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ }
+
+ return 0;
+}
+
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ __unlazy_fpu(tsk, regs);
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.xstate->hardfpu.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ return;
+ }
+
+ force_sig(SIGFPE, tsk);
+}
diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
new file mode 100644
index 00000000..72aa61c8
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
@@ -0,0 +1,54 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/opcode_helper.c
+ *
+ * Helper for the SH-2A 32-bit opcodes.
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+
+/*
+ * Instructions on SH are generally fixed at 16-bits, however, SH-2A
+ * introduces some 32-bit instructions. Since there are no real
+ * constraints on their use (and they can be mixed and matched), we need
+ * to check the instruction encoding to work out if it's a true 32-bit
+ * instruction or not.
+ *
+ * Presently, 32-bit opcodes have only slight variations in what the
+ * actual encoding looks like in the first-half of the instruction, which
+ * makes it fairly straightforward to differentiate from the 16-bit ones.
+ *
+ * First 16-bits of encoding Used by
+ *
+ * 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
+ * fmov.s, movu.b, movu.w
+ *
+ * 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
+ * bandnot.b, bldnot.b, bor.b, bornot.b,
+ * bxor.b
+ *
+ * 0000nnnniiii0000 movi20
+ * 0000nnnniiii0001 movi20s
+ */
+unsigned int instruction_size(unsigned int insn)
+{
+ /* Look for the common cases */
+ switch ((insn & 0xf00f)) {
+ case 0x0000: /* movi20 */
+ case 0x0001: /* movi20s */
+ case 0x3001: /* 32-bit mov/fmov/movu variants */
+ return 4;
+ }
+
+ /* And the special cases.. */
+ switch ((insn & 0xf08f)) {
+ case 0x3009: /* 32-bit b*.b bit operations */
+ return 4;
+ }
+
+ return 2;
+}
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
new file mode 100644
index 00000000..c465af72
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
@@ -0,0 +1,1597 @@
+/*
+ * SH7203 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7203.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PC14_IN, PC13_IN, PC12_IN,
+ PC11_IN, PC10_IN, PC9_IN, PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE15_IN, PE14_IN, PE13_IN, PE12_IN,
+ PE11_IN, PE10_IN, PE9_IN, PE8_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF30_IN, PF29_IN, PF28_IN,
+ PF27_IN, PF26_IN, PF25_IN, PF24_IN,
+ PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+ PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+ PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PC14_OUT, PC13_OUT, PC12_OUT,
+ PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT,
+ PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF30_OUT, PF29_OUT, PF28_OUT,
+ PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT,
+ PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+ PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+ PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ PB11MD_0, PB11MD_1,
+ PB10MD_0, PB10MD_1,
+ PB9MD_00, PB9MD_01, PB9MD_10,
+ PB8MD_00, PB8MD_01, PB8MD_10,
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10,
+
+ PC14MD_0, PC14MD_1,
+ PC13MD_0, PC13MD_1,
+ PC12MD_0, PC12MD_1,
+ PC11MD_00, PC11MD_01, PC11MD_10,
+ PC10MD_00, PC10MD_01, PC10MD_10,
+ PC9MD_0, PC9MD_1,
+ PC8MD_0, PC8MD_1,
+ PC7MD_0, PC7MD_1,
+ PC6MD_0, PC6MD_1,
+ PC5MD_0, PC5MD_1,
+ PC4MD_0, PC4MD_1,
+ PC3MD_0, PC3MD_1,
+ PC2MD_0, PC2MD_1,
+ PC1MD_0, PC1MD_1,
+ PC0MD_00, PC0MD_01, PC0MD_10,
+
+ PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101,
+ PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101,
+ PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101,
+ PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101,
+ PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101,
+ PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101,
+ PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101,
+ PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101,
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101,
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101,
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101,
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101,
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101,
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101,
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101,
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101,
+
+ PE15MD_00, PE15MD_01, PE15MD_11,
+ PE14MD_00, PE14MD_01, PE14MD_11,
+ PE13MD_00, PE13MD_11,
+ PE12MD_00, PE12MD_11,
+ PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100,
+ PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100,
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100,
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100,
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100,
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100,
+ PE3MD_00, PE3MD_01, PE3MD_11,
+ PE2MD_00, PE2MD_01, PE2MD_11,
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100,
+
+ PF30MD_0, PF30MD_1,
+ PF29MD_0, PF29MD_1,
+ PF28MD_0, PF28MD_1,
+ PF27MD_0, PF27MD_1,
+ PF26MD_0, PF26MD_1,
+ PF25MD_0, PF25MD_1,
+ PF24MD_0, PF24MD_1,
+ PF23MD_00, PF23MD_01, PF23MD_10,
+ PF22MD_00, PF22MD_01, PF22MD_10,
+ PF21MD_00, PF21MD_01, PF21MD_10,
+ PF20MD_00, PF20MD_01, PF20MD_10,
+ PF19MD_00, PF19MD_01, PF19MD_10,
+ PF18MD_00, PF18MD_01, PF18MD_10,
+ PF17MD_00, PF17MD_01, PF17MD_10,
+ PF16MD_00, PF16MD_01, PF16MD_10,
+ PF15MD_00, PF15MD_01, PF15MD_10,
+ PF14MD_00, PF14MD_01, PF14MD_10,
+ PF13MD_00, PF13MD_01, PF13MD_10,
+ PF12MD_00, PF12MD_01, PF12MD_10,
+ PF11MD_00, PF11MD_01, PF11MD_10,
+ PF10MD_00, PF10MD_01, PF10MD_10,
+ PF9MD_00, PF9MD_01, PF9MD_10,
+ PF8MD_00, PF8MD_01, PF8MD_10,
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK,
+ PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK,
+ PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK,
+ PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK,
+ IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK,
+ IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK,
+ IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK,
+ IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK,
+ IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK,
+ IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+ WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK,
+ UBCTRG_MARK,
+ CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK,
+ CRX0_MARK, CRX0_CRX1_MARK,
+ SDA3_MARK, SCL3_MARK,
+ SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK,
+ SDA0_MARK, SCL0_MARK,
+ TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK,
+ DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK,
+ DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK,
+ DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK,
+ ADTRG_PD_MARK, ADTRG_PE_MARK,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, CS4_MARK, MRES_MARK, BS_MARK,
+ IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK,
+ CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK,
+ RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK,
+ RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK,
+ WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK,
+ WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK,
+ CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK,
+ TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK,
+ TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK,
+ TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK,
+ TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK,
+ TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK,
+ TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK,
+ SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK,
+ SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK,
+ SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK,
+ SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK,
+ TXD0_MARK, RXD0_MARK, SCK0_MARK,
+ TXD1_MARK, RXD1_MARK, SCK1_MARK,
+ TXD2_MARK, RXD2_MARK, SCK2_MARK,
+ RTS3_MARK, CTS3_MARK, TXD3_MARK,
+ RXD3_MARK, SCK3_MARK,
+ AUDIO_CLK_MARK,
+ SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK,
+ SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK,
+ SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK,
+ SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK,
+ FCE_MARK, FRB_MARK,
+ NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK,
+ FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK,
+ LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA */
+ PINMUX_DATA(PA7_DATA, PA7_IN),
+ PINMUX_DATA(PA6_DATA, PA6_IN),
+ PINMUX_DATA(PA5_DATA, PA5_IN),
+ PINMUX_DATA(PA4_DATA, PA4_IN),
+ PINMUX_DATA(PA3_DATA, PA3_IN),
+ PINMUX_DATA(PA2_DATA, PA2_IN),
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* PB */
+ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT),
+ PINMUX_DATA(WDTOVF_MARK, PB12MD_01),
+ PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00),
+ PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01),
+ PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10),
+ PINMUX_DATA(UBCTRG_MARK, PB12MD_11),
+
+ PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT),
+ PINMUX_DATA(CTX1_MARK, PB11MD_1),
+
+ PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT),
+ PINMUX_DATA(CRX1_MARK, PB10MD_1),
+
+ PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT),
+ PINMUX_DATA(CTX0_MARK, PB9MD_01),
+ PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10),
+
+ PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT),
+ PINMUX_DATA(CRX0_MARK, PB8MD_01),
+ PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10),
+
+ PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN),
+ PINMUX_DATA(SDA3_MARK, PB7MD_01),
+ PINMUX_DATA(PINT7_PB_MARK, PB7MD_10),
+ PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11),
+
+ PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN),
+ PINMUX_DATA(SCL3_MARK, PB6MD_01),
+ PINMUX_DATA(PINT6_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN),
+ PINMUX_DATA(SDA2_MARK, PB6MD_01),
+ PINMUX_DATA(PINT5_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN),
+ PINMUX_DATA(SCL2_MARK, PB4MD_01),
+ PINMUX_DATA(PINT4_PB_MARK, PB4MD_10),
+ PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11),
+
+ PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN),
+ PINMUX_DATA(SDA1_MARK, PB3MD_01),
+ PINMUX_DATA(PINT3_PB_MARK, PB3MD_10),
+ PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11),
+
+ PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN),
+ PINMUX_DATA(SCL1_MARK, PB2MD_01),
+ PINMUX_DATA(PINT2_PB_MARK, PB2MD_10),
+ PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11),
+
+ PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN),
+ PINMUX_DATA(SDA0_MARK, PB1MD_01),
+ PINMUX_DATA(PINT1_PB_MARK, PB1MD_10),
+ PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11),
+
+ PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN),
+ PINMUX_DATA(SCL0_MARK, PB0MD_01),
+ PINMUX_DATA(PINT0_PB_MARK, PB0MD_10),
+ PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11),
+
+ /* PC */
+ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT),
+ PINMUX_DATA(WAIT_MARK, PC14MD_1),
+
+ PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT),
+ PINMUX_DATA(RDWR_MARK, PC13MD_1),
+
+ PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT),
+ PINMUX_DATA(CKE_MARK, PC12MD_1),
+
+ PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT),
+ PINMUX_DATA(CASU_MARK, PC11MD_01),
+ PINMUX_DATA(BREQ_MARK, PC11MD_10),
+
+ PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT),
+ PINMUX_DATA(RASU_MARK, PC10MD_01),
+ PINMUX_DATA(BACK_MARK, PC10MD_10),
+
+ PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT),
+ PINMUX_DATA(CASL_MARK, PC9MD_1),
+
+ PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT),
+ PINMUX_DATA(RASL_MARK, PC8MD_1),
+
+ PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT),
+ PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1),
+
+ PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT),
+ PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1),
+
+ PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT),
+ PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1),
+
+ PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT),
+ PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1),
+
+ PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT),
+ PINMUX_DATA(CS3_MARK, PC3MD_1),
+
+ PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT),
+ PINMUX_DATA(CS2_MARK, PC2MD_1),
+
+ PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT),
+ PINMUX_DATA(A1_MARK, PC1MD_1),
+
+ PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT),
+ PINMUX_DATA(A0_MARK, PC0MD_01),
+ PINMUX_DATA(CS7_MARK, PC0MD_10),
+
+ /* PD */
+ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT),
+ PINMUX_DATA(D31_MARK, PD15MD_001),
+ PINMUX_DATA(PINT7_PD_MARK, PD15MD_010),
+ PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100),
+ PINMUX_DATA(TIOC4D_MARK, PD15MD_101),
+
+ PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT),
+ PINMUX_DATA(D30_MARK, PD14MD_001),
+ PINMUX_DATA(PINT6_PD_MARK, PD14MD_010),
+ PINMUX_DATA(TIOC4C_MARK, PD14MD_101),
+
+ PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT),
+ PINMUX_DATA(D29_MARK, PD13MD_001),
+ PINMUX_DATA(PINT5_PD_MARK, PD13MD_010),
+ PINMUX_DATA(TEND1_PD_MARK, PD13MD_100),
+ PINMUX_DATA(TIOC4B_MARK, PD13MD_101),
+
+ PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT),
+ PINMUX_DATA(D28_MARK, PD12MD_001),
+ PINMUX_DATA(PINT4_PD_MARK, PD12MD_010),
+ PINMUX_DATA(DACK1_PD_MARK, PD12MD_100),
+ PINMUX_DATA(TIOC4A_MARK, PD12MD_101),
+
+ PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT),
+ PINMUX_DATA(D27_MARK, PD11MD_001),
+ PINMUX_DATA(PINT3_PD_MARK, PD11MD_010),
+ PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100),
+ PINMUX_DATA(TIOC3D_MARK, PD11MD_101),
+
+ PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT),
+ PINMUX_DATA(D26_MARK, PD10MD_001),
+ PINMUX_DATA(PINT2_PD_MARK, PD10MD_010),
+ PINMUX_DATA(TEND0_PD_MARK, PD10MD_100),
+ PINMUX_DATA(TIOC3C_MARK, PD10MD_101),
+
+ PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT),
+ PINMUX_DATA(D25_MARK, PD9MD_001),
+ PINMUX_DATA(PINT1_PD_MARK, PD9MD_010),
+ PINMUX_DATA(DACK0_PD_MARK, PD9MD_100),
+ PINMUX_DATA(TIOC3B_MARK, PD9MD_101),
+
+ PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT),
+ PINMUX_DATA(D24_MARK, PD8MD_001),
+ PINMUX_DATA(PINT0_PD_MARK, PD8MD_010),
+ PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100),
+ PINMUX_DATA(TIOC3A_MARK, PD8MD_101),
+
+ PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT),
+ PINMUX_DATA(D23_MARK, PD7MD_001),
+ PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010),
+ PINMUX_DATA(SCS1_PD_MARK, PD7MD_011),
+ PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100),
+ PINMUX_DATA(TIOC2B_MARK, PD7MD_101),
+
+ PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT),
+ PINMUX_DATA(D22_MARK, PD6MD_001),
+ PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010),
+ PINMUX_DATA(SSO1_PD_MARK, PD6MD_011),
+ PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100),
+ PINMUX_DATA(TIOC2A_MARK, PD6MD_101),
+
+ PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT),
+ PINMUX_DATA(D21_MARK, PD5MD_001),
+ PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010),
+ PINMUX_DATA(SSI1_PD_MARK, PD5MD_011),
+ PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100),
+ PINMUX_DATA(TIOC1B_MARK, PD5MD_101),
+
+ PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT),
+ PINMUX_DATA(D20_MARK, PD4MD_001),
+ PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010),
+ PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011),
+ PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100),
+ PINMUX_DATA(TIOC1A_MARK, PD4MD_101),
+
+ PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT),
+ PINMUX_DATA(D19_MARK, PD3MD_001),
+ PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010),
+ PINMUX_DATA(SCS0_PD_MARK, PD3MD_011),
+ PINMUX_DATA(DACK3_MARK, PD3MD_100),
+ PINMUX_DATA(TIOC0D_MARK, PD3MD_101),
+
+ PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT),
+ PINMUX_DATA(D18_MARK, PD2MD_001),
+ PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010),
+ PINMUX_DATA(SSO0_PD_MARK, PD2MD_011),
+ PINMUX_DATA(DREQ3_MARK, PD2MD_100),
+ PINMUX_DATA(TIOC0C_MARK, PD2MD_101),
+
+ PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT),
+ PINMUX_DATA(D17_MARK, PD1MD_001),
+ PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010),
+ PINMUX_DATA(SSI0_PD_MARK, PD1MD_011),
+ PINMUX_DATA(DACK2_MARK, PD1MD_100),
+ PINMUX_DATA(TIOC0B_MARK, PD1MD_101),
+
+ PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT),
+ PINMUX_DATA(D16_MARK, PD0MD_001),
+ PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010),
+ PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011),
+ PINMUX_DATA(DREQ2_MARK, PD0MD_100),
+ PINMUX_DATA(TIOC0A_MARK, PD0MD_101),
+
+ /* PE */
+ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT),
+ PINMUX_DATA(IOIS16_MARK, PE15MD_01),
+ PINMUX_DATA(RTS3_MARK, PE15MD_11),
+
+ PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT),
+ PINMUX_DATA(CS1_MARK, PE14MD_01),
+ PINMUX_DATA(CTS3_MARK, PE14MD_11),
+
+ PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT),
+ PINMUX_DATA(TXD3_MARK, PE13MD_11),
+
+ PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT),
+ PINMUX_DATA(RXD3_MARK, PE12MD_11),
+
+ PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT),
+ PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001),
+ PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010),
+ PINMUX_DATA(TEND1_PE_MARK, PE11MD_100),
+
+ PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT),
+ PINMUX_DATA(CE2B_MARK, PE10MD_001),
+ PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010),
+ PINMUX_DATA(TEND0_PE_MARK, PE10MD_100),
+
+ PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT),
+ PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01),
+ PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10),
+ PINMUX_DATA(SCK3_MARK, PE9MD_11),
+
+ PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT),
+ PINMUX_DATA(CE2A_MARK, PE8MD_01),
+ PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10),
+ PINMUX_DATA(SCK2_MARK, PE8MD_11),
+
+ PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT),
+ PINMUX_DATA(FRAME_MARK, PE7MD_001),
+ PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010),
+ PINMUX_DATA(TXD2_MARK, PE7MD_011),
+ PINMUX_DATA(DACK1_PE_MARK, PE7MD_100),
+
+ PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT),
+ PINMUX_DATA(A25_MARK, PE6MD_001),
+ PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010),
+ PINMUX_DATA(RXD2_MARK, PE6MD_011),
+ PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100),
+
+ PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT),
+ PINMUX_DATA(A24_MARK, PE5MD_001),
+ PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010),
+ PINMUX_DATA(TXD1_MARK, PE5MD_011),
+ PINMUX_DATA(DACK0_PE_MARK, PE5MD_100),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT),
+ PINMUX_DATA(A23_MARK, PE4MD_001),
+ PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010),
+ PINMUX_DATA(RXD1_MARK, PE4MD_011),
+ PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT),
+ PINMUX_DATA(A22_MARK, PE3MD_01),
+ PINMUX_DATA(SCK1_MARK, PE3MD_11),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT),
+ PINMUX_DATA(A21_MARK, PE2MD_01),
+ PINMUX_DATA(SCK0_MARK, PE2MD_11),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT),
+ PINMUX_DATA(CS4_MARK, PE1MD_01),
+ PINMUX_DATA(MRES_MARK, PE1MD_10),
+ PINMUX_DATA(TXD0_MARK, PE1MD_11),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT),
+ PINMUX_DATA(BS_MARK, PE0MD_001),
+ PINMUX_DATA(RXD0_MARK, PE0MD_011),
+ PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100),
+
+ /* PF */
+ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT),
+ PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1),
+
+ PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT),
+ PINMUX_DATA(SSIDATA3_MARK, PF29MD_1),
+
+ PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT),
+ PINMUX_DATA(SSIWS3_MARK, PF28MD_1),
+
+ PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT),
+ PINMUX_DATA(SSISCK3_MARK, PF27MD_1),
+
+ PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT),
+ PINMUX_DATA(SSIDATA2_MARK, PF26MD_1),
+
+ PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT),
+ PINMUX_DATA(SSIWS2_MARK, PF25MD_1),
+
+ PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT),
+ PINMUX_DATA(SSISCK2_MARK, PF24MD_1),
+
+ PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT),
+ PINMUX_DATA(SSIDATA1_MARK, PF23MD_01),
+ PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10),
+
+ PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT),
+ PINMUX_DATA(SSIWS1_MARK, PF22MD_01),
+ PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10),
+
+ PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT),
+ PINMUX_DATA(SSISCK1_MARK, PF21MD_01),
+ PINMUX_DATA(LCD_CLK_MARK, PF21MD_10),
+
+ PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT),
+ PINMUX_DATA(SSIDATA0_MARK, PF20MD_01),
+ PINMUX_DATA(LCD_FLM_MARK, PF20MD_10),
+
+ PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT),
+ PINMUX_DATA(SSIWS0_MARK, PF19MD_01),
+ PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10),
+
+ PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT),
+ PINMUX_DATA(SSISCK0_MARK, PF18MD_01),
+ PINMUX_DATA(LCD_CL2_MARK, PF18MD_10),
+
+ PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT),
+ PINMUX_DATA(FCE_MARK, PF17MD_01),
+ PINMUX_DATA(LCD_CL1_MARK, PF17MD_10),
+
+ PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT),
+ PINMUX_DATA(FRB_MARK, PF16MD_01),
+ PINMUX_DATA(LCD_DON_MARK, PF16MD_10),
+
+ PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT),
+ PINMUX_DATA(NAF7_MARK, PF15MD_01),
+ PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10),
+
+ PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT),
+ PINMUX_DATA(NAF6_MARK, PF14MD_01),
+ PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10),
+
+ PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT),
+ PINMUX_DATA(NAF5_MARK, PF13MD_01),
+ PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10),
+
+ PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT),
+ PINMUX_DATA(NAF4_MARK, PF12MD_01),
+ PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT),
+ PINMUX_DATA(NAF3_MARK, PF11MD_01),
+ PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT),
+ PINMUX_DATA(NAF2_MARK, PF10MD_01),
+ PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT),
+ PINMUX_DATA(NAF1_MARK, PF9MD_01),
+ PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT),
+ PINMUX_DATA(NAF0_MARK, PF8MD_01),
+ PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT),
+ PINMUX_DATA(FSC_MARK, PF7MD_01),
+ PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10),
+ PINMUX_DATA(SCS1_PF_MARK, PF7MD_11),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT),
+ PINMUX_DATA(FOE_MARK, PF6MD_01),
+ PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10),
+ PINMUX_DATA(SSO1_PF_MARK, PF6MD_11),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT),
+ PINMUX_DATA(FCDE_MARK, PF5MD_01),
+ PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10),
+ PINMUX_DATA(SSI1_PF_MARK, PF5MD_11),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT),
+ PINMUX_DATA(FWE_MARK, PF4MD_01),
+ PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10),
+ PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT),
+ PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01),
+ PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10),
+ PINMUX_DATA(SCS0_PF_MARK, PF3MD_11),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT),
+ PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01),
+ PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10),
+ PINMUX_DATA(SSO0_PF_MARK, PF2MD_11),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT),
+ PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01),
+ PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10),
+ PINMUX_DATA(SSI0_PF_MARK, PF1MD_11),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT),
+ PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01),
+ PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10),
+ PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC14, PC14_DATA),
+ PINMUX_GPIO(GPIO_PC13, PC13_DATA),
+ PINMUX_GPIO(GPIO_PC12, PC12_DATA),
+ PINMUX_GPIO(GPIO_PC11, PC11_DATA),
+ PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+ PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE15, PE15_DATA),
+ PINMUX_GPIO(GPIO_PE14, PE14_DATA),
+ PINMUX_GPIO(GPIO_PE13, PE13_DATA),
+ PINMUX_GPIO(GPIO_PE12, PE12_DATA),
+ PINMUX_GPIO(GPIO_PE11, PE11_DATA),
+ PINMUX_GPIO(GPIO_PE10, PE10_DATA),
+ PINMUX_GPIO(GPIO_PE9, PE9_DATA),
+ PINMUX_GPIO(GPIO_PE8, PE8_DATA),
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF30, PF30_DATA),
+ PINMUX_GPIO(GPIO_PF29, PF29_DATA),
+ PINMUX_GPIO(GPIO_PF28, PF28_DATA),
+ PINMUX_GPIO(GPIO_PF27, PF27_DATA),
+ PINMUX_GPIO(GPIO_PF26, PF26_DATA),
+ PINMUX_GPIO(GPIO_PF25, PF25_DATA),
+ PINMUX_GPIO(GPIO_PF24, PF24_DATA),
+ PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+ PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+ PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+ PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+ PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+ PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+ PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+ PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+ PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+ PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+ PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK),
+ PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK),
+
+ /* SSU */
+ PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) {
+ PB11MD_0, PB11MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB10MD_0, PB10MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB9MD_00, PB9MD_01, PB9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB8MD_00, PB8MD_01, PB8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) {
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) {
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) {
+ 0, 0,
+ PC14_IN, PC14_OUT,
+ PC13_IN, PC13_OUT,
+ PC12_IN, PC12_OUT,
+ PC11_IN, PC11_OUT,
+ PC10_IN, PC10_OUT,
+ PC9_IN, PC9_OUT,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT }
+ },
+ { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC14MD_0, PC14MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC13MD_0, PC13MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC12MD_0, PC12MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) {
+ PC11MD_00, PC11MD_01, PC11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC10MD_00, PC10MD_01, PC10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC9MD_0, PC9MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC8MD_0, PC8MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) {
+ PC7MD_0, PC7MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC6MD_0, PC6MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC5MD_0, PC5MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC4MD_0, PC4MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) {
+ PC3MD_0, PC3MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC2MD_0, PC2MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC1MD_0, PC1MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC0MD_00, PC0MD_01, PC0MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+ { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) {
+ PD15MD_000, PD15MD_001, PD15MD_010, 0,
+ PD15MD_100, PD15MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD14MD_000, PD14MD_001, PD14MD_010, 0,
+ 0, PD14MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD13MD_000, PD13MD_001, PD13MD_010, 0,
+ PD13MD_100, PD13MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD12MD_000, PD12MD_001, PD12MD_010, 0,
+ PD12MD_100, PD12MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) {
+ PD11MD_000, PD11MD_001, PD11MD_010, 0,
+ PD11MD_100, PD11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD10MD_000, PD10MD_001, PD10MD_010, 0,
+ PD10MD_100, PD10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD9MD_000, PD9MD_001, PD9MD_010, 0,
+ PD9MD_100, PD9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD8MD_000, PD8MD_001, PD8MD_010, 0,
+ PD8MD_100, PD8MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) {
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011,
+ PD7MD_100, PD7MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011,
+ PD6MD_100, PD6MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011,
+ PD5MD_100, PD5MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011,
+ PD4MD_100, PD4MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) {
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011,
+ PD3MD_100, PD3MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011,
+ PD2MD_100, PD2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011,
+ PD1MD_100, PD1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011,
+ PD0MD_100, PD0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) {
+ PE15_IN, PE15_OUT,
+ PE14_IN, PE14_OUT,
+ PE13_IN, PE13_OUT,
+ PE12_IN, PE12_OUT,
+ PE11_IN, PE11_OUT,
+ PE10_IN, PE10_OUT,
+ PE9_IN, PE9_OUT,
+ PE8_IN, PE8_OUT,
+ PE7_IN, PE7_OUT,
+ PE6_IN, PE6_OUT,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+ { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) {
+ PE15MD_00, PE15MD_01, 0, PE15MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE14MD_00, PE14MD_01, 0, PE14MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE13MD_00, 0, 0, PE13MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE12MD_00, 0, 0, PE12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) {
+ PE11MD_000, PE11MD_001, PE11MD_010, 0,
+ PE11MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE10MD_000, PE10MD_001, PE10MD_010, 0,
+ PE10MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) {
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011,
+ PE7MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011,
+ PE6MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011,
+ PE5MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011,
+ PE4MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) {
+ PE3MD_00, PE3MD_01, 0, PE3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE2MD_00, PE2MD_01, 0, PE2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE0MD_000, PE0MD_001, 0, PE0MD_011,
+ PE0MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) {
+ 0, 0,
+ PF30_IN, PF30_OUT,
+ PF29_IN, PF29_OUT,
+ PF28_IN, PF28_OUT,
+ PF27_IN, PF27_OUT,
+ PF26_IN, PF26_OUT,
+ PF25_IN, PF25_OUT,
+ PF24_IN, PF24_OUT,
+ PF23_IN, PF23_OUT,
+ PF22_IN, PF22_OUT,
+ PF21_IN, PF21_OUT,
+ PF20_IN, PF20_OUT,
+ PF19_IN, PF19_OUT,
+ PF18_IN, PF18_OUT,
+ PF17_IN, PF17_OUT,
+ PF16_IN, PF16_OUT }
+ },
+ { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) {
+ PF15_IN, PF15_OUT,
+ PF14_IN, PF14_OUT,
+ PF13_IN, PF13_OUT,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+ { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF30MD_0, PF30MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF29MD_0, PF29MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF28MD_0, PF28MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) {
+ PF27MD_0, PF27MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF26MD_0, PF26MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF25MD_0, PF25MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF24MD_0, PF24MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) {
+ PF23MD_00, PF23MD_01, PF23MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF22MD_00, PF22MD_01, PF22MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF21MD_00, PF21MD_01, PF21MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF20MD_00, PF20MD_01, PF20MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) {
+ PF19MD_00, PF19MD_01, PF19MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF18MD_00, PF18MD_01, PF18MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF17MD_00, PF17MD_01, PF17MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF16MD_00, PF16MD_01, PF16MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) {
+ PF15MD_00, PF15MD_01, PF15MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF14MD_00, PF14MD_01, PF14MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF13MD_00, PF13MD_01, PF13MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF12MD_00, PF12MD_01, PF12MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) {
+ PF11MD_00, PF11MD_01, PF11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF10MD_00, PF10MD_01, PF10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF9MD_00, PF9MD_01, PF9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF8MD_00, PF8MD_01, PF8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) {
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) {
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) {
+ 0, 0, 0, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) {
+ 0, PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) {
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) {
+ 0, PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) {
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7203_pinmux_info = {
+ .name = "sh7203_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_LCD_DATA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7203_pinmux_info);
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
new file mode 100644
index 00000000..48e97a2a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -0,0 +1,54 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/probe.c
+ *
+ * CPU Subtype Probing for SH-2A.
+ *
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+void __cpuinit cpu_probe(void)
+{
+ boot_cpu_data.family = CPU_FAMILY_SH2A;
+
+ /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
+ boot_cpu_data.flags |= CPU_HAS_OP32;
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7201)
+ boot_cpu_data.type = CPU_SH7201;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7203)
+ boot_cpu_data.type = CPU_SH7203;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
+ boot_cpu_data.type = CPU_SH7263;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+ boot_cpu_data.type = CPU_SH7206;
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+#elif defined(CONFIG_CPU_SUBTYPE_MXG)
+ boot_cpu_data.type = CPU_MXG;
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+#endif
+
+ boot_cpu_data.dcache.ways = 4;
+ boot_cpu_data.dcache.way_incr = (1 << 11);
+ boot_cpu_data.dcache.sets = 128;
+ boot_cpu_data.dcache.entry_shift = 4;
+ boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.dcache.flags = 0;
+
+ /*
+ * The icache is the same as the dcache as far as this setup is
+ * concerned. The only real difference in hardware is that the icache
+ * lacks the U bit that the dcache has, none of this has any bearing
+ * on the cache info.
+ */
+ boot_cpu_data.icache = boot_cpu_data.dcache;
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
new file mode 100644
index 00000000..949bf2ba
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -0,0 +1,248 @@
+/*
+ * Renesas MX-G (R8A03022BG) Setup
+ *
+ * Copyright (C) 2008, 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15,
+
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1,
+
+ SCIF0, SCIF1,
+
+ MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5,
+ MTU2_TGI3B, MTU2_TGI3C,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73),
+ INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75),
+ INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77),
+ INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95),
+ INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97),
+ INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99),
+ INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101),
+
+ INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221),
+ INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223),
+ INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225),
+ INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227),
+
+ INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229),
+ INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231),
+ INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233),
+
+ INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235),
+ INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237),
+ INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239),
+
+ INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241),
+ INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243),
+
+ INTC_IRQ(MTU2_TGI3B, 244),
+ INTC_IRQ(MTU2_TGI3C, 245),
+
+ INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247),
+ INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249),
+ INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251),
+
+ INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253),
+ INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } },
+ { 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } },
+ { 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffd9800, 0, 16, 4, /* IPR06 */ { } },
+ { 0xfffd9802, 0, 16, 4, /* IPR07 */ { } },
+ { 0xfffd9804, 0, 16, 4, /* IPR08 */ { } },
+ { 0xfffd9806, 0, 16, 4, /* IPR09 */ { } },
+ { 0xfffd9808, 0, 16, 4, /* IPR10 */ { } },
+ { 0xfffd980a, 0, 16, 4, /* IPR11 */ { } },
+ { 0xfffd980c, 0, 16, 4, /* IPR12 */ { } },
+ { 0xfffd980e, 0, 16, 4, /* IPR13 */ { } },
+ { 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } },
+ { 0xfffd9812, 0, 16, 4, /* IPR15 */
+ { SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } },
+ { 0xfffd9814, 0, 16, 4, /* IPR16 */
+ { MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffd9408, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct sh_timer_config mtu2_0_platform_data = {
+ .channel_offset = -0x80,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_0_resources[] = {
+ [0] = {
+ .start = 0xff801300,
+ .end = 0xff801326,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 228,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_0_device = {
+ .name = "sh_mtu2",
+ .id = 0,
+ .dev = {
+ .platform_data = &mtu2_0_platform_data,
+ },
+ .resource = mtu2_0_resources,
+ .num_resources = ARRAY_SIZE(mtu2_0_resources),
+};
+
+static struct sh_timer_config mtu2_1_platform_data = {
+ .channel_offset = -0x100,
+ .timer_bit = 1,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_1_resources[] = {
+ [0] = {
+ .start = 0xff801380,
+ .end = 0xff801390,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 234,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_1_device = {
+ .name = "sh_mtu2",
+ .id = 1,
+ .dev = {
+ .platform_data = &mtu2_1_platform_data,
+ },
+ .resource = mtu2_1_resources,
+ .num_resources = ARRAY_SIZE(mtu2_1_resources),
+};
+
+static struct sh_timer_config mtu2_2_platform_data = {
+ .channel_offset = 0x80,
+ .timer_bit = 2,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_2_resources[] = {
+ [0] = {
+ .start = 0xff801000,
+ .end = 0xff80100a,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 240,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_2_device = {
+ .name = "sh_mtu2",
+ .id = 2,
+ .dev = {
+ .platform_data = &mtu2_2_platform_data,
+ },
+ .resource = mtu2_2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_2_resources),
+};
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xff804000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 220, 220, 220, 220 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct platform_device *mxg_devices[] __initdata = {
+ &scif0_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+static int __init mxg_devices_setup(void)
+{
+ return platform_add_devices(mxg_devices,
+ ARRAY_SIZE(mxg_devices));
+}
+arch_initcall(mxg_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *mxg_early_devices[] __initdata = {
+ &scif0_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(mxg_early_devices,
+ ARRAY_SIZE(mxg_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
new file mode 100644
index 00000000..9df558dc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -0,0 +1,470 @@
+/*
+ * SH7201 setup
+ *
+ * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+
+ ADC_ADI,
+
+ MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU,
+ MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V,
+
+ RTC, WDT,
+
+ IIC30, IIC31, IIC32,
+
+ DMAC0_DMINT0, DMAC1_DMINT1,
+ DMAC2_DMINT2, DMAC3_DMINT3,
+
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
+
+ DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6,
+ DMAC7_DMINT7,
+
+ RCAN0, RCAN1,
+
+ SSI0_SSII, SSI1_SSII,
+
+ TMR0, TMR1,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(ADC_ADI, 92),
+
+ INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109),
+ INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111),
+
+ INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113),
+ INTC_IRQ(MTU20_VEF, 114),
+
+ INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117),
+ INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121),
+
+ INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125),
+ INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129),
+
+ INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133),
+ INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135),
+
+ INTC_IRQ(MTU2_TCI3V, 136),
+
+ INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141),
+ INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143),
+
+ INTC_IRQ(MTU2_TCI4V, 144),
+
+ INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149),
+ INTC_IRQ(MTU25_UVW, 150),
+
+ INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153),
+ INTC_IRQ(RTC, 154),
+
+ INTC_IRQ(WDT, 156),
+
+ INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158),
+ INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160),
+ INTC_IRQ(IIC30, 161),
+
+ INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165),
+ INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167),
+ INTC_IRQ(IIC31, 168),
+
+ INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171),
+ INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173),
+ INTC_IRQ(IIC32, 174),
+
+ INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177),
+ INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179),
+
+ INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181),
+ INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183),
+ INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185),
+ INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187),
+ INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189),
+ INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191),
+ INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193),
+ INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195),
+ INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197),
+ INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199),
+ INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201),
+ INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203),
+ INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205),
+ INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207),
+ INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209),
+ INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211),
+
+ INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216),
+ INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218),
+ INTC_IRQ(DMAC7_DMINT7, 219),
+
+ INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229),
+ INTC_IRQ(RCAN0, 230),
+ INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232),
+
+ INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235),
+ INTC_IRQ(RCAN1, 236),
+ INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238),
+
+ INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245),
+
+ INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247),
+ INTC_IRQ(TMR0, 248),
+
+ INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253),
+ INTC_IRQ(TMR1, 254),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } },
+ { 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } },
+ { 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } },
+ { 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } },
+
+ { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } },
+ { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } },
+ { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } },
+ { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } },
+ { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } },
+ { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } },
+ { 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } },
+ { 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe9408, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 180, 180, 180, 180 }
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 184, 184, 184, 184 }
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 188, 188, 188, 188 }
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 }
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xfffea000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 }
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xfffea800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 }
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .mapbase = 0xfffeb000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 }
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .mapbase = 0xfffeb800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 208, 208, 208, 208 }
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffff0800,
+ .end = 0xffff2000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 152,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct sh_timer_config mtu2_0_platform_data = {
+ .channel_offset = -0x80,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_0_resources[] = {
+ [0] = {
+ .start = 0xfffe4300,
+ .end = 0xfffe4326,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 108,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_0_device = {
+ .name = "sh_mtu2",
+ .id = 0,
+ .dev = {
+ .platform_data = &mtu2_0_platform_data,
+ },
+ .resource = mtu2_0_resources,
+ .num_resources = ARRAY_SIZE(mtu2_0_resources),
+};
+
+static struct sh_timer_config mtu2_1_platform_data = {
+ .channel_offset = -0x100,
+ .timer_bit = 1,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_1_resources[] = {
+ [0] = {
+ .start = 0xfffe4380,
+ .end = 0xfffe4390,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 116,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_1_device = {
+ .name = "sh_mtu2",
+ .id = 1,
+ .dev = {
+ .platform_data = &mtu2_1_platform_data,
+ },
+ .resource = mtu2_1_resources,
+ .num_resources = ARRAY_SIZE(mtu2_1_resources),
+};
+
+static struct sh_timer_config mtu2_2_platform_data = {
+ .channel_offset = 0x80,
+ .timer_bit = 2,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_2_resources[] = {
+ [0] = {
+ .start = 0xfffe4000,
+ .end = 0xfffe400a,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 124,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_2_device = {
+ .name = "sh_mtu2",
+ .id = 2,
+ .dev = {
+ .platform_data = &mtu2_2_platform_data,
+ },
+ .resource = mtu2_2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_2_resources),
+};
+
+static struct platform_device *sh7201_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &rtc_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+static int __init sh7201_devices_setup(void)
+{
+ return platform_add_devices(sh7201_devices,
+ ARRAY_SIZE(sh7201_devices));
+}
+arch_initcall(sh7201_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7201_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+#define STBCR3 0xfffe0408
+
+void __init plat_early_device_setup(void)
+{
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7201_early_devices,
+ ARRAY_SIZE(sh7201_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
new file mode 100644
index 00000000..0bd744f9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -0,0 +1,434 @@
+/*
+ * SH7203 and SH7263 Setup
+ *
+ * Copyright (C) 2007 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+ USB, LCDC, CMT0, CMT1, BSC, WDT,
+
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V,
+
+ ADC_ADI,
+
+ IIC30, IIC31, IIC32, IIC33,
+ SCIF0, SCIF1, SCIF2, SCIF3,
+
+ SSU0, SSU1,
+
+ SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
+
+ /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
+ ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1,
+ SRC, IEBI,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
+ INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
+ INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
+ INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
+ INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147),
+ INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149),
+ INTC_IRQ(MTU0_VEF, 150),
+ INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152),
+ INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154),
+ INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156),
+ INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158),
+ INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160),
+ INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162),
+ INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164),
+ INTC_IRQ(MTU2_TCI3V, 165),
+ INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167),
+ INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169),
+ INTC_IRQ(MTU2_TCI4V, 170),
+ INTC_IRQ(ADC_ADI, 171),
+ INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173),
+ INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175),
+ INTC_IRQ(IIC30, 176),
+ INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178),
+ INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180),
+ INTC_IRQ(IIC31, 181),
+ INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183),
+ INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185),
+ INTC_IRQ(IIC32, 186),
+ INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188),
+ INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190),
+ INTC_IRQ(IIC33, 191),
+ INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193),
+ INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195),
+ INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197),
+ INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199),
+ INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201),
+ INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203),
+ INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205),
+ INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207),
+ INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209),
+ INTC_IRQ(SSU0, 210),
+ INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212),
+ INTC_IRQ(SSU1, 213),
+ INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
+ INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
+ INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225),
+ INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227),
+ INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232),
+ INTC_IRQ(RTC, 233),
+ INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235),
+ INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237),
+ INTC_IRQ(RCAN0, 238),
+ INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240),
+ INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242),
+ INTC_IRQ(RCAN1, 243),
+
+ /* SH7263-specific trash */
+#ifdef CONFIG_CPU_SUBTYPE_SH7263
+ INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219),
+ INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221),
+ INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223),
+
+ INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229),
+ INTC_IRQ(SDHI, 230),
+
+ INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245),
+ INTC_IRQ(SRC, 246),
+
+ INTC_IRQ(IEBI, 247),
+#endif
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
+ MTU2_VU } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
+ MTU2_TCI4V } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
+#ifdef CONFIG_CPU_SUBTYPE_SH7203
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, 0 } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
+#else
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
+ SSI3_SSII, ROMDEC } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
+#endif
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 },
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 },
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 },
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 },
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt0_resources[] = {
+ [0] = {
+ .start = 0xfffec002,
+ .end = 0xfffec007,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 142,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt0_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt0_platform_data,
+ },
+ .resource = cmt0_resources,
+ .num_resources = ARRAY_SIZE(cmt0_resources),
+};
+
+static struct sh_timer_config cmt1_platform_data = {
+ .channel_offset = 0x08,
+ .timer_bit = 1,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt1_resources[] = {
+ [0] = {
+ .start = 0xfffec008,
+ .end = 0xfffec00d,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 143,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt1_device = {
+ .name = "sh_cmt",
+ .id = 1,
+ .dev = {
+ .platform_data = &cmt1_platform_data,
+ },
+ .resource = cmt1_resources,
+ .num_resources = ARRAY_SIZE(cmt1_resources),
+};
+
+static struct sh_timer_config mtu2_0_platform_data = {
+ .channel_offset = -0x80,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_0_resources[] = {
+ [0] = {
+ .start = 0xfffe4300,
+ .end = 0xfffe4326,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 146,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_0_device = {
+ .name = "sh_mtu2",
+ .id = 0,
+ .dev = {
+ .platform_data = &mtu2_0_platform_data,
+ },
+ .resource = mtu2_0_resources,
+ .num_resources = ARRAY_SIZE(mtu2_0_resources),
+};
+
+static struct sh_timer_config mtu2_1_platform_data = {
+ .channel_offset = -0x100,
+ .timer_bit = 1,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_1_resources[] = {
+ [0] = {
+ .start = 0xfffe4380,
+ .end = 0xfffe4390,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 153,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_1_device = {
+ .name = "sh_mtu2",
+ .id = 1,
+ .dev = {
+ .platform_data = &mtu2_1_platform_data,
+ },
+ .resource = mtu2_1_resources,
+ .num_resources = ARRAY_SIZE(mtu2_1_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffff2000,
+ .end = 0xffff2000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 231,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct platform_device *sh7203_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt0_device,
+ &cmt1_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &rtc_device,
+};
+
+static int __init sh7203_devices_setup(void)
+{
+ return platform_add_devices(sh7203_devices,
+ ARRAY_SIZE(sh7203_devices));
+}
+arch_initcall(sh7203_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7203_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt0_device,
+ &cmt1_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+};
+
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
+
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7203_early_devices,
+ ARRAY_SIZE(sh7203_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
new file mode 100644
index 00000000..5d14f849
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -0,0 +1,395 @@
+/*
+ * SH7206 Setup
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ ADC_ADI0, ADC_ADI1,
+
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S,
+ IIC3,
+
+ CMT0, CMT1, BSC, WDT,
+
+ MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V,
+
+ POE2_OEI3,
+
+ SCIF0, SCIF1, SCIF2, SCIF3,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+ INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96),
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
+ INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144),
+ INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152),
+ INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157),
+ INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159),
+ INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161),
+ INTC_IRQ(MTU0_VEF, 162),
+ INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165),
+ INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169),
+ INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173),
+ INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177),
+ INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181),
+ INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183),
+ INTC_IRQ(MTU2_TCI3V, 184),
+ INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189),
+ INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191),
+ INTC_IRQ(MTU2_TCI4V, 192),
+ INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197),
+ INTC_IRQ(MTU5, 198),
+ INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201),
+ INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205),
+ INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207),
+ INTC_IRQ(MTU2S_TCI3V, 208),
+ INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213),
+ INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215),
+ INTC_IRQ(MTU2S_TCI4V, 216),
+ INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221),
+ INTC_IRQ(MTU5S, 222),
+ INTC_IRQ(POE2_OEI3, 224),
+ INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229),
+ INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231),
+ INTC_IRQ(IIC3, 232),
+ INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241),
+ INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243),
+ INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245),
+ INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247),
+ INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249),
+ INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251),
+ INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253),
+ INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI0, ADC_ADI1 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { CMT0, CMT1, BSC, WDT } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { MTU0_ABCD, MTU0_VEF,
+ MTU1_AB, MTU1_VU } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU2_TCI3V } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU4_ABCD, MTU2_TCI4V,
+ MTU5, POE2_12 } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU3S_ABCD, MTU2S_TCI3V,
+ MTU4S_ABCD, MTU2S_TCI4V } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU5S, POE2_OEI3, IIC3, 0 } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 240, 240, 240, 240 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 244, 244, 244, 244 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 248, 248, 248, 248 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 252, 252, 252, 252 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt0_resources[] = {
+ [0] = {
+ .start = 0xfffec002,
+ .end = 0xfffec007,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 140,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt0_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt0_platform_data,
+ },
+ .resource = cmt0_resources,
+ .num_resources = ARRAY_SIZE(cmt0_resources),
+};
+
+static struct sh_timer_config cmt1_platform_data = {
+ .channel_offset = 0x08,
+ .timer_bit = 1,
+ .clockevent_rating = 125,
+ .clocksource_rating = 0, /* disabled due to code generation issues */
+};
+
+static struct resource cmt1_resources[] = {
+ [0] = {
+ .start = 0xfffec008,
+ .end = 0xfffec00d,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 144,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt1_device = {
+ .name = "sh_cmt",
+ .id = 1,
+ .dev = {
+ .platform_data = &cmt1_platform_data,
+ },
+ .resource = cmt1_resources,
+ .num_resources = ARRAY_SIZE(cmt1_resources),
+};
+
+static struct sh_timer_config mtu2_0_platform_data = {
+ .channel_offset = -0x80,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_0_resources[] = {
+ [0] = {
+ .start = 0xfffe4300,
+ .end = 0xfffe4326,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 156,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_0_device = {
+ .name = "sh_mtu2",
+ .id = 0,
+ .dev = {
+ .platform_data = &mtu2_0_platform_data,
+ },
+ .resource = mtu2_0_resources,
+ .num_resources = ARRAY_SIZE(mtu2_0_resources),
+};
+
+static struct sh_timer_config mtu2_1_platform_data = {
+ .channel_offset = -0x100,
+ .timer_bit = 1,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_1_resources[] = {
+ [0] = {
+ .start = 0xfffe4380,
+ .end = 0xfffe4390,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 164,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_1_device = {
+ .name = "sh_mtu2",
+ .id = 1,
+ .dev = {
+ .platform_data = &mtu2_1_platform_data,
+ },
+ .resource = mtu2_1_resources,
+ .num_resources = ARRAY_SIZE(mtu2_1_resources),
+};
+
+static struct sh_timer_config mtu2_2_platform_data = {
+ .channel_offset = 0x80,
+ .timer_bit = 2,
+ .clockevent_rating = 200,
+};
+
+static struct resource mtu2_2_resources[] = {
+ [0] = {
+ .start = 0xfffe4000,
+ .end = 0xfffe400a,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 180,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mtu2_2_device = {
+ .name = "sh_mtu2",
+ .id = 2,
+ .dev = {
+ .platform_data = &mtu2_2_platform_data,
+ },
+ .resource = mtu2_2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_2_resources),
+};
+
+static struct platform_device *sh7206_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt0_device,
+ &cmt1_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+static int __init sh7206_devices_setup(void)
+{
+ return platform_add_devices(sh7206_devices,
+ ARRAY_SIZE(sh7206_devices));
+}
+arch_initcall(sh7206_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7206_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt0_device,
+ &cmt1_device,
+ &mtu2_0_device,
+ &mtu2_1_device,
+ &mtu2_2_device,
+};
+
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
+
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7206_early_devices,
+ ARRAY_SIZE(sh7206_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
new file mode 100644
index 00000000..6f13f33a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for the Linux/SuperH SH-3 backends.
+#
+
+obj-y := ex.o probe.o entry.o setup-sh3.o
+
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+
+# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o
+
+# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SH3) := clock-sh3.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7705) := clock-sh7705.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
+
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o
+
+obj-y += $(clock-y)
+obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c
new file mode 100644
index 00000000..90faa44c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c
@@ -0,0 +1,89 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh3.c
+ *
+ * Generic SH-3 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * FRQCR parsing hacked out of arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
+static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
+static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ clk->rate *= pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh3_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh3_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
+
+ return clk->parent->rate / stc_multipliers[idx];
+}
+
+static struct sh_clk_ops sh3_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
+
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh3_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh3_clk_ops[] = {
+ &sh3_master_clk_ops,
+ &sh3_module_clk_ops,
+ &sh3_bus_clk_ops,
+ &sh3_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh3_clk_ops))
+ *ops = sh3_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
new file mode 100644
index 00000000..a8da4a99
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
@@ -0,0 +1,84 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7705.c
+ *
+ * SH7705 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * FRQCR parsing hacked out of arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+/*
+ * SH7705 uses the same divisors as the generic SH-3 case, it's just the
+ * FRQCR layout that is a bit different..
+ */
+static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
+static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
+static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
+}
+
+static struct sh_clk_ops sh7705_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = __raw_readw(FRQCR) & 0x0003;
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7705_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
+ return clk->parent->rate / stc_multipliers[idx];
+}
+
+static struct sh_clk_ops sh7705_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7705_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7705_clk_ops[] = {
+ &sh7705_master_clk_ops,
+ &sh7705_module_clk_ops,
+ &sh7705_bus_clk_ops,
+ &sh7705_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7705_clk_ops))
+ *ops = sh7705_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
new file mode 100644
index 00000000..a4088e5b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -0,0 +1,84 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7706.c
+ *
+ * SH7706 support for the clock framework
+ *
+ * Copyright (C) 2006 Takashi YOSHII
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c
+ * Copyright (C) 2005 Andriy Skulysh
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int stc_multipliers[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
+static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ clk->rate *= pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7706_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7706_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
+
+ return clk->parent->rate / stc_multipliers[idx];
+}
+
+static struct sh_clk_ops sh7706_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
+
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7706_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7706_clk_ops[] = {
+ &sh7706_master_clk_ops,
+ &sh7706_module_clk_ops,
+ &sh7706_bus_clk_ops,
+ &sh7706_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7706_clk_ops))
+ *ops = sh7706_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
new file mode 100644
index 00000000..54a6d4bc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7709.c
+ *
+ * SH7709 support for the clock framework
+ *
+ * Copyright (C) 2005 Andriy Skulysh
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
+static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
+static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ clk->rate *= pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7709_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7709_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = (frqcr & 0x0080) ?
+ ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
+
+ return clk->parent->rate * stc_multipliers[idx];
+}
+
+static struct sh_clk_ops sh7709_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
+
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7709_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7709_clk_ops[] = {
+ &sh7709_master_clk_ops,
+ &sh7709_module_clk_ops,
+ &sh7709_bus_clk_ops,
+ &sh7709_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7709_clk_ops))
+ *ops = sh7709_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
new file mode 100644
index 00000000..ce601b2e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
@@ -0,0 +1,78 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7710.c
+ *
+ * SH7710 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * FRQCR parsing hacked out of arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
+}
+
+static struct sh_clk_ops sh7710_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0007);
+ return clk->parent->rate / md_table[idx];
+}
+
+static struct sh_clk_ops sh7710_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
+ return clk->parent->rate / md_table[idx];
+}
+
+static struct sh_clk_ops sh7710_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
+ return clk->parent->rate / md_table[idx];
+}
+
+static struct sh_clk_ops sh7710_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7710_clk_ops[] = {
+ &sh7710_master_clk_ops,
+ &sh7710_module_clk_ops,
+ &sh7710_bus_clk_ops,
+ &sh7710_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7710_clk_ops))
+ *ops = sh7710_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
new file mode 100644
index 00000000..21438a9a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -0,0 +1,71 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7712.c
+ *
+ * SH7712 support for the clock framework
+ *
+ * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk>
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int multipliers[] = { 1, 2, 3 };
+static int divisors[] = { 1, 2, 3, 4, 6 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = (frqcr & 0x0300) >> 8;
+
+ clk->rate *= multipliers[idx];
+}
+
+static struct sh_clk_ops sh7712_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = frqcr & 0x0007;
+
+ return clk->parent->rate / divisors[idx];
+}
+
+static struct sh_clk_ops sh7712_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = __raw_readw(FRQCR);
+ int idx = (frqcr & 0x0030) >> 4;
+
+ return clk->parent->rate / divisors[idx];
+}
+
+static struct sh_clk_ops sh7712_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7712_clk_ops[] = {
+ &sh7712_master_clk_ops,
+ &sh7712_module_clk_ops,
+ &sh7712_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7712_clk_ops))
+ *ops = sh7712_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
new file mode 100644
index 00000000..f6a389c9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -0,0 +1,512 @@
+/*
+ * arch/sh/kernel/cpu/sh3/entry.S
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 - 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <cpu/mmu_context.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ * jmp @k0 ! control-transfer instruction
+ * ldc k1, ssr ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in ptrace.c and ptrace.h
+ *
+ * r0
+ * ...
+ * r15 = stack pointer
+ * spc
+ * pr
+ * ssr
+ * gbr
+ * mach
+ * macl
+ * syscall #
+ *
+ */
+/* Offsets to the stack */
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_PC = (16*4)
+OFF_SR = (16*4+8)
+OFF_TRA = (16*4+6*4)
+
+#define k0 r0
+#define k1 r1
+#define k2 r2
+#define k3 r3
+#define k4 r4
+
+#define g_imask r6 /* r6_bank1 */
+#define k_g_imask r6_bank /* r6_bank1 */
+#define current r7 /* r7_bank1 */
+
+#include <asm/entry-macros.S>
+
+/*
+ * Kernel mode register usage:
+ * k0 scratch
+ * k1 scratch
+ * k2 scratch (Exception code)
+ * k3 scratch (Return address)
+ * k4 scratch
+ * k5 reserved
+ * k6 Global Interrupt Mask (0--15 << 4)
+ * k7 CURRENT_THREAD_INFO (pointer to current thread info)
+ */
+
+!
+! TLB Miss / Initial Page write exception handling
+! _and_
+! TLB hits, but the access violate the protection.
+! It can be valid access, such as stack grow and/or C-O-W.
+!
+!
+! Find the pmd/pte entry and loadtlb
+! If it's not found, cause address error (SEGV)
+!
+! Although this could be written in assembly language (and it'd be faster),
+! this first version depends *much* on C implementation.
+!
+
+#if defined(CONFIG_MMU)
+ .align 2
+ENTRY(tlb_miss_load)
+ bra call_handle_tlbmiss
+ mov #0, r5
+
+ .align 2
+ENTRY(tlb_miss_store)
+ bra call_handle_tlbmiss
+ mov #1, r5
+
+ .align 2
+ENTRY(initial_page_write)
+ bra call_handle_tlbmiss
+ mov #2, r5
+
+ .align 2
+ENTRY(tlb_protection_violation_load)
+ bra call_do_page_fault
+ mov #0, r5
+
+ .align 2
+ENTRY(tlb_protection_violation_store)
+ bra call_do_page_fault
+ mov #1, r5
+
+call_handle_tlbmiss:
+ mov.l 1f, r0
+ mov r5, r8
+ mov.l @r0, r6
+ mov.l 2f, r0
+ sts pr, r10
+ jsr @r0
+ mov r15, r4
+ !
+ tst r0, r0
+ bf/s 0f
+ lds r10, pr
+ rts
+ nop
+0:
+ mov r8, r5
+call_do_page_fault:
+ mov.l 1f, r0
+ mov.l @r0, r6
+
+ mov.l 3f, r0
+ mov.l 4f, r1
+ mov r15, r4
+ jmp @r0
+ lds r1, pr
+
+ .align 2
+1: .long MMU_TEA
+2: .long handle_tlbmiss
+3: .long do_page_fault
+4: .long ret_from_exception
+
+ .align 2
+ENTRY(address_error_load)
+ bra call_dae
+ mov #0,r5 ! writeaccess = 0
+
+ .align 2
+ENTRY(address_error_store)
+ bra call_dae
+ mov #1,r5 ! writeaccess = 1
+
+ .align 2
+call_dae:
+ mov.l 1f, r0
+ mov.l @r0, r6 ! address
+ mov.l 2f, r0
+ jmp @r0
+ mov r15, r4 ! regs
+
+ .align 2
+1: .long MMU_TEA
+2: .long do_address_error
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+ /* Unwind the stack and jmp to the debug entry */
+ENTRY(sh_bios_handler)
+ mov.l 1f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
+ mov k4, r15
+ !
+ mov.l 2f, k0
+ mov.l @k0, k0
+ jmp @k0
+ ldc k3, ssr
+ .align 2
+1: .long 0x300000f0
+2: .long gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+! restore_regs()
+! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
+! - switch bank
+! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
+! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
+! k2 returns original pr
+! k3 returns original sr
+! k4 returns original stack pointer
+! r8 passes SR bitmask, overwritten with restored data on return
+! r9 trashed
+! BL=0 on entry, on exit BL=1 (depending on r8).
+
+ENTRY(restore_regs)
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+ !
+ stc sr, r9
+ or r8, r9
+ ldc r9, sr
+ !
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, k4 ! original stack pointer
+ ldc.l @r15+, spc
+ mov.l @r15+, k2 ! original PR
+ mov.l @r15+, k3 ! original SR
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+ rts
+ add #4, r15 ! Skip syscall number
+
+restore_all:
+ mov.l 7f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
+ !
+ ! Calculate new SR value
+ mov k3, k2 ! original SR value
+ mov #0xfffffff0, k1
+ extu.b k1, k1
+ not k1, k1
+ and k1, k2 ! Mask original SR value
+ !
+ mov k3, k0 ! Calculate IMASK-bits
+ shlr2 k0
+ and #0x3c, k0
+ cmp/eq #0x3c, k0
+ bt/s 6f
+ shll2 k0
+ mov g_imask, k0
+ !
+6: or k0, k2 ! Set the IMASK-bits
+ ldc k2, ssr
+ !
+ mov k4, r15
+ rte
+ nop
+
+ .align 2
+5: .long 0x00001000 ! DSP
+7: .long 0x30000000
+
+! common exception handler
+#include "../../entry-common.S"
+
+! Exception Vector Base
+!
+! Should be aligned page boundary.
+!
+ .balign 4096,0,4096
+ENTRY(vbr_base)
+ .long 0
+!
+! 0x100: General exception vector
+!
+ .balign 256,0,256
+general_exception:
+ bra handle_exception
+ sts pr, k3 ! save original pr value in k3
+
+! prepare_stack()
+! - roll back gRB
+! - switch to kernel stack
+! k0 returns original sp (after roll back)
+! k1 trashed
+! k2 trashed
+
+prepare_stack:
+#ifdef CONFIG_GUSA
+ ! Check for roll back gRB (User and Kernel)
+ mov r15, k0
+ shll k0
+ bf/s 1f
+ shll k0
+ bf/s 1f
+ stc spc, k1
+ stc r0_bank, k0
+ cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
+ bt/s 2f
+ stc r1_bank, k1
+
+ add #-2, k0
+ add r15, k0
+ ldc k0, spc ! PC = saved r0 + r15 - 2
+2: mov k1, r15 ! SP = r1
+1:
+#endif
+ ! Switch to kernel stack if needed
+ stc ssr, k0 ! Is it from kernel space?
+ shll k0 ! Check MD bit (bit30) by shifting it into...
+ shll k0 ! ...the T bit
+ bt/s 1f ! It's a kernel to kernel transition.
+ mov r15, k0 ! save original stack to k0
+ /* User space to kernel */
+ mov #(THREAD_SIZE >> 10), k1
+ shll8 k1 ! k1 := THREAD_SIZE
+ shll2 k1
+ add current, k1
+ mov k1, r15 ! change to kernel stack
+ !
+1:
+ rts
+ nop
+
+!
+! 0x400: Instruction and Data TLB miss exception vector
+!
+ .balign 1024,0,1024
+tlb_miss:
+ sts pr, k3 ! save original pr value in k3
+
+handle_exception:
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 5f, k2 ! vector register address
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov.l @k2, k2 ! read out vector and keep in k2
+
+handle_exception_special:
+ setup_frame_reg
+
+ ! Setup return address and jump to exception handler
+ mov.l 7f, r9 ! fetch return address
+ stc r2_bank, r0 ! k2 (vector)
+ mov.l 6f, r10
+ shlr2 r0
+ shlr r0
+ mov.l @(r0, r10), r10
+ jmp @r10
+ lds r9, pr ! put return address in pr
+
+ .align L1_CACHE_SHIFT
+
+! save_regs()
+! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
+! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
+! - switch bank
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k0 contains original stack pointer*
+! k1 trashed
+! k3 passes original pr*
+! k4 passes SR bitmask
+! BL=1 on entry, on exit BL=0.
+
+ENTRY(save_regs)
+ mov #-1, r1
+ mov.l k1, @-r15 ! set TRA (default: -1)
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ stc.l ssr, @-r15
+ mov.l k3, @-r15 ! original pr in k3
+ stc.l spc, @-r15
+
+ mov.l k0, @-r15 ! original stack pointer in k0
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ mov.l 0f, k3 ! SR bits to set in k3
+
+ ! fall-through
+
+! save_low_regs()
+! - modify SR for bank switch
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k3 passes bits to set in SR
+! k4 passes bits to clear in SR
+
+ENTRY(save_low_regs)
+ stc sr, r8
+ or k3, r8
+ and k4, r8
+ ldc r8, sr
+
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ rts
+ mov.l r0, @-r15
+
+!
+! 0x600: Interrupt / NMI vector
+!
+ .balign 512,0,512
+ENTRY(handle_interrupt)
+ sts pr, k3 ! save original pr value in k3
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov #-1, k2 ! default vector kept in k2
+
+ setup_frame_reg
+
+ stc sr, r0 ! get status register
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bf 9f
+ TRACE_IRQS_OFF
+9:
+
+ ! Setup return address and jump to do_IRQ
+ mov.l 4f, r9 ! fetch return address
+ lds r9, pr ! put return address in pr
+ mov.l 2f, r4
+ mov.l 3f, r9
+ mov.l @r4, r4 ! pass INTEVT vector as arg0
+
+ shlr2 r4
+ shlr r4
+ mov r4, r0 ! save vector->jmp table offset for later
+
+ shlr2 r4 ! vector to IRQ# conversion
+ add #-0x10, r4
+
+ cmp/pz r4 ! is it a valid IRQ?
+ bt 10f
+
+ /*
+ * We got here as a result of taking the INTEVT path for something
+ * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
+ * path and special case the event dispatch instead. This is the
+ * expected path for the NMI (and any other brilliantly implemented
+ * exception), which effectively wants regular exception dispatch
+ * but is unfortunately reported through INTEVT rather than
+ * EXPEVT. Grr.
+ */
+ mov.l 6f, r9
+ mov.l @(r0, r9), r9
+ jmp @r9
+ mov r15, r8 ! trap handlers take saved regs in r8
+
+10:
+ jmp @r9 ! Off to do_IRQ() we go.
+ mov r15, r5 ! pass saved registers as arg1
+
+ENTRY(exception_none)
+ rts
+ nop
+
+ .align L1_CACHE_SHIFT
+exception_data:
+0: .long 0x000080f0 ! FD=1, IMASK=15
+1: .long 0xcfffffff ! RB=0, BL=0
+2: .long INTEVT
+3: .long do_IRQ
+4: .long ret_from_irq
+5: .long EXPEVT
+6: .long exception_handling_table
+7: .long ret_from_exception
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
new file mode 100644
index 00000000..99b4d020
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -0,0 +1,59 @@
+/*
+ * arch/sh/kernel/cpu/sh3/ex.S
+ *
+ * The SH-3 and SH-4 exception vector table.
+
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/linkage.h>
+
+#if !defined(CONFIG_MMU)
+#define tlb_miss_load exception_error
+#define tlb_miss_store exception_error
+#define initial_page_write exception_error
+#define tlb_protection_violation_load exception_error
+#define tlb_protection_violation_store exception_error
+#define address_error_load exception_error
+#define address_error_store exception_error
+#endif
+
+#if !defined(CONFIG_SH_FPU)
+#define fpu_error_trap_handler exception_error
+#endif
+
+#if !defined(CONFIG_KGDB)
+#define kgdb_handle_exception exception_error
+#endif
+
+ .align 2
+ .data
+
+ENTRY(exception_handling_table)
+ .long exception_error /* 000 */
+ .long exception_error
+ .long tlb_miss_load /* 040 */
+ .long tlb_miss_store
+ .long initial_page_write
+ .long tlb_protection_violation_load
+ .long tlb_protection_violation_store
+ .long address_error_load
+ .long address_error_store /* 100 */
+ .long fpu_error_trap_handler /* 120 */
+ .long exception_error /* 140 */
+ .long system_call ! Unconditional Trap /* 160 */
+ .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
+ .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
+ .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
+ .long breakpoint_trap_handler /* 1E0 */
+
+ /*
+ * Pad the remainder of the table out, exceptions residing in far
+ * away offsets can be manually inserted in to their appropriate
+ * location via set_exception_table_{evt,vec}().
+ */
+ .balign 4096,0,4096
diff --git a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
new file mode 100644
index 00000000..9ca15462
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
@@ -0,0 +1,1242 @@
+/*
+ * SH7720 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7720.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF0_IN_PU,
+ PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU,
+ PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU,
+ PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF0_OUT,
+ PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+
+ PSELA_1_0_00, PSELA_1_0_01, PSELA_1_0_10,
+ PSELA_3_2_00, PSELA_3_2_01, PSELA_3_2_10, PSELA_3_2_11,
+ PSELA_5_4_00, PSELA_5_4_01, PSELA_5_4_10, PSELA_5_4_11,
+ PSELA_7_6_00, PSELA_7_6_01, PSELA_7_6_10,
+ PSELA_9_8_00, PSELA_9_8_01, PSELA_9_8_10,
+ PSELA_11_10_00, PSELA_11_10_01, PSELA_11_10_10,
+ PSELA_13_12_00, PSELA_13_12_10,
+ PSELA_15_14_00, PSELA_15_14_10,
+ PSELB_9_8_00, PSELB_9_8_11,
+ PSELB_11_10_00, PSELB_11_10_01, PSELB_11_10_10, PSELB_11_10_11,
+ PSELB_13_12_00, PSELB_13_12_01, PSELB_13_12_10, PSELB_13_12_11,
+ PSELB_15_14_00, PSELB_15_14_11,
+ PSELC_9_8_00, PSELC_9_8_10,
+ PSELC_11_10_00, PSELC_11_10_10,
+ PSELC_13_12_00, PSELC_13_12_01, PSELC_13_12_10,
+ PSELC_15_14_00, PSELC_15_14_01, PSELC_15_14_10,
+ PSELD_1_0_00, PSELD_1_0_10,
+ PSELD_11_10_00, PSELD_11_10_01,
+ PSELD_15_14_00, PSELD_15_14_01, PSELD_15_14_10,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, RAS_MARK, CAS_MARK, CKE_MARK,
+ CS5B_CE1A_MARK, CS6B_CE1B_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, A20_MARK, A19_MARK, A0_MARK,
+ REFOUT_MARK, IRQOUT_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK,
+ LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK,
+ LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK,
+ LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK,
+ LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_M_DISP_MARK,
+ LCD_CL1_MARK, LCD_CL2_MARK,
+ LCD_DON_MARK, LCD_FLM_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK,
+ AFE_RXIN_MARK, AFE_RDET_MARK,
+ AFE_FS_MARK, AFE_TXOUT_MARK,
+ AFE_SCLK_MARK, AFE_RLYCNT_MARK,
+ AFE_HC1_MARK,
+ IIC_SCL_MARK, IIC_SDA_MARK,
+ DA1_MARK, DA0_MARK,
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+ USB1D_RCV_MARK, USB1D_TXSE0_MARK,
+ USB1D_TXDPLS_MARK, USB1D_DMNS_MARK,
+ USB1D_DPLS_MARK, USB1D_SPEED_MARK,
+ USB1D_TXENL_MARK,
+ USB2_PWR_EN_MARK, USB1_PWR_EN_USBF_UPLUP_MARK, USB1D_SUSPEND_MARK,
+ IRQ5_MARK, IRQ4_MARK,
+ IRQ3_IRL3_MARK, IRQ2_IRL2_MARK,
+ IRQ1_IRL1_MARK, IRQ0_IRL0_MARK,
+ PCC_REG_MARK, PCC_DRV_MARK,
+ PCC_BVD2_MARK, PCC_BVD1_MARK,
+ PCC_CD2_MARK, PCC_CD1_MARK,
+ PCC_RESET_MARK, PCC_RDY_MARK,
+ PCC_VS2_MARK, PCC_VS1_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK, ASEBRKAK_MARK, TRST_MARK,
+ TMS_MARK, TDO_MARK, TDI_MARK, TCK_MARK,
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, TEND0_MARK,
+ SIOF0_SYNC_MARK, SIOF0_MCLK_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF0_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_MCLK_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIOF1_SCK_MARK,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ TPU_TO1_MARK, TPU_TO0_MARK,
+ TPU_TI3B_MARK, TPU_TI3A_MARK,
+ TPU_TI2B_MARK, TPU_TI2A_MARK,
+ TPU_TO3_MARK, TPU_TO2_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ MMC_DAT_MARK, MMC_CMD_MARK,
+ MMC_CLK_MARK, MMC_VDDON_MARK,
+ MMC_ODMOD_MARK,
+ STATUS0_MARK, STATUS1_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE6_DATA, PTE6_IN),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF6_DATA, PTF6_IN),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTP GPIO */
+ PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU),
+ PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU),
+ PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU),
+ PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU),
+ PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCD_DATA7_MARK, PTC7_FN),
+ PINMUX_DATA(LCD_DATA6_MARK, PTC6_FN),
+ PINMUX_DATA(LCD_DATA5_MARK, PTC5_FN),
+ PINMUX_DATA(LCD_DATA4_MARK, PTC4_FN),
+ PINMUX_DATA(LCD_DATA3_MARK, PTC3_FN),
+ PINMUX_DATA(LCD_DATA2_MARK, PTC2_FN),
+ PINMUX_DATA(LCD_DATA1_MARK, PTC1_FN),
+ PINMUX_DATA(LCD_DATA0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCD_DATA15_MARK, PTD7_FN),
+ PINMUX_DATA(LCD_DATA14_MARK, PTD6_FN),
+ PINMUX_DATA(LCD_DATA13_MARK, PTD5_FN),
+ PINMUX_DATA(LCD_DATA12_MARK, PTD4_FN),
+ PINMUX_DATA(LCD_DATA11_MARK, PTD3_FN),
+ PINMUX_DATA(LCD_DATA10_MARK, PTD2_FN),
+ PINMUX_DATA(LCD_DATA9_MARK, PTD1_FN),
+ PINMUX_DATA(LCD_DATA8_MARK, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(IIC_SCL_MARK, PSELB_9_8_00, PTE6_FN),
+ PINMUX_DATA(AFE_RXIN_MARK, PSELB_9_8_11, PTE6_FN),
+ PINMUX_DATA(IIC_SDA_MARK, PSELB_9_8_00, PTE5_FN),
+ PINMUX_DATA(AFE_RDET_MARK, PSELB_9_8_11, PTE5_FN),
+ PINMUX_DATA(LCD_M_DISP_MARK, PTE4_FN),
+ PINMUX_DATA(LCD_CL1_MARK, PTE3_FN),
+ PINMUX_DATA(LCD_CL2_MARK, PTE2_FN),
+ PINMUX_DATA(LCD_DON_MARK, PTE1_FN),
+ PINMUX_DATA(LCD_FLM_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(DA1_MARK, PTF6_FN),
+ PINMUX_DATA(DA0_MARK, PTF5_FN),
+ PINMUX_DATA(AN3_MARK, PTF4_FN),
+ PINMUX_DATA(AN2_MARK, PTF3_FN),
+ PINMUX_DATA(AN1_MARK, PTF2_FN),
+ PINMUX_DATA(AN0_MARK, PTF1_FN),
+ PINMUX_DATA(ADTRG_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(USB1D_RCV_MARK, PSELA_3_2_00, PTG6_FN),
+ PINMUX_DATA(AFE_FS_MARK, PSELA_3_2_01, PTG6_FN),
+ PINMUX_DATA(PCC_REG_MARK, PSELA_3_2_10, PTG6_FN),
+ PINMUX_DATA(IRQ5_MARK, PSELA_3_2_11, PTG6_FN),
+ PINMUX_DATA(USB1D_TXSE0_MARK, PSELA_5_4_00, PTG5_FN),
+ PINMUX_DATA(AFE_TXOUT_MARK, PSELA_5_4_01, PTG5_FN),
+ PINMUX_DATA(PCC_DRV_MARK, PSELA_5_4_10, PTG5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSELA_5_4_11, PTG5_FN),
+ PINMUX_DATA(USB1D_TXDPLS_MARK, PSELA_7_6_00, PTG4_FN),
+ PINMUX_DATA(AFE_SCLK_MARK, PSELA_7_6_01, PTG4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSELA_7_6_10, PTG4_FN),
+ PINMUX_DATA(USB1D_DMNS_MARK, PSELA_9_8_00, PTG3_FN),
+ PINMUX_DATA(AFE_RLYCNT_MARK, PSELA_9_8_01, PTG3_FN),
+ PINMUX_DATA(PCC_BVD2_MARK, PSELA_9_8_10, PTG3_FN),
+ PINMUX_DATA(USB1D_DPLS_MARK, PSELA_11_10_00, PTG2_FN),
+ PINMUX_DATA(AFE_HC1_MARK, PSELA_11_10_01, PTG2_FN),
+ PINMUX_DATA(PCC_BVD1_MARK, PSELA_11_10_10, PTG2_FN),
+ PINMUX_DATA(USB1D_SPEED_MARK, PSELA_13_12_00, PTG1_FN),
+ PINMUX_DATA(PCC_CD2_MARK, PSELA_13_12_10, PTG1_FN),
+ PINMUX_DATA(USB1D_TXENL_MARK, PSELA_15_14_00, PTG0_FN),
+ PINMUX_DATA(PCC_CD1_MARK, PSELA_15_14_10, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(RAS_MARK, PTH6_FN),
+ PINMUX_DATA(CAS_MARK, PTH5_FN),
+ PINMUX_DATA(CKE_MARK, PTH4_FN),
+ PINMUX_DATA(STATUS1_MARK, PTH3_FN),
+ PINMUX_DATA(STATUS0_MARK, PTH2_FN),
+ PINMUX_DATA(USB2_PWR_EN_MARK, PTH1_FN),
+ PINMUX_DATA(USB1_PWR_EN_USBF_UPLUP_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(AUDCK_MARK, PTJ6_FN),
+ PINMUX_DATA(ASEBRKAK_MARK, PTJ5_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTJ4_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTJ3_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTJ2_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTJ1_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(PCC_RESET_MARK, PTK3_FN),
+ PINMUX_DATA(PCC_RDY_MARK, PTK2_FN),
+ PINMUX_DATA(PCC_VS2_MARK, PTK1_FN),
+ PINMUX_DATA(PCC_VS1_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(TRST_MARK, PTL7_FN),
+ PINMUX_DATA(TMS_MARK, PTL6_FN),
+ PINMUX_DATA(TDO_MARK, PTL5_FN),
+ PINMUX_DATA(TDI_MARK, PTL4_FN),
+ PINMUX_DATA(TCK_MARK, PTL3_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DREQ1_MARK, PTM7_FN),
+ PINMUX_DATA(DREQ0_MARK, PTM6_FN),
+ PINMUX_DATA(DACK1_MARK, PTM5_FN),
+ PINMUX_DATA(DACK0_MARK, PTM4_FN),
+ PINMUX_DATA(TEND1_MARK, PTM3_FN),
+ PINMUX_DATA(TEND0_MARK, PTM2_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTM1_FN),
+ PINMUX_DATA(CS6B_CE1B_MARK, PTM0_FN),
+
+ /* PTP FN */
+ PINMUX_DATA(USB1D_SUSPEND_MARK, PSELA_1_0_00, PTP4_FN),
+ PINMUX_DATA(REFOUT_MARK, PSELA_1_0_01, PTP4_FN),
+ PINMUX_DATA(IRQOUT_MARK, PSELA_1_0_10, PTP4_FN),
+ PINMUX_DATA(IRQ3_IRL3_MARK, PTP3_FN),
+ PINMUX_DATA(IRQ2_IRL2_MARK, PTP2_FN),
+ PINMUX_DATA(IRQ1_IRL1_MARK, PTP1_FN),
+ PINMUX_DATA(IRQ0_IRL0_MARK, PTP0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(A25_MARK, PTR7_FN),
+ PINMUX_DATA(A24_MARK, PTR6_FN),
+ PINMUX_DATA(A23_MARK, PTR5_FN),
+ PINMUX_DATA(A22_MARK, PTR4_FN),
+ PINMUX_DATA(A21_MARK, PTR3_FN),
+ PINMUX_DATA(A20_MARK, PTR2_FN),
+ PINMUX_DATA(A19_MARK, PTR1_FN),
+ PINMUX_DATA(A0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SIOF0_SYNC_MARK, PTS4_FN),
+ PINMUX_DATA(SIOF0_MCLK_MARK, PTS3_FN),
+ PINMUX_DATA(SIOF0_TXD_MARK, PTS2_FN),
+ PINMUX_DATA(SIOF0_RXD_MARK, PTS1_FN),
+ PINMUX_DATA(SIOF0_SCK_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_CTS_MARK, PSELB_15_14_00, PTT4_FN),
+ PINMUX_DATA(TPU_TO1_MARK, PSELB_15_14_11, PTT4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSELB_15_14_00, PTT3_FN),
+ PINMUX_DATA(TPU_TO0_MARK, PSELB_15_14_11, PTT3_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, PTT2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, PTT1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(SIOF1_SYNC_MARK, PTU4_FN),
+ PINMUX_DATA(SIOF1_MCLK_MARK, PSELD_11_10_00, PTU3_FN),
+ PINMUX_DATA(TPU_TI3B_MARK, PSELD_11_10_01, PTU3_FN),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSELD_15_14_00, PTU2_FN),
+ PINMUX_DATA(TPU_TI3A_MARK, PSELD_15_14_01, PTU2_FN),
+ PINMUX_DATA(MMC_DAT_MARK, PSELD_15_14_10, PTU2_FN),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSELC_13_12_00, PTU1_FN),
+ PINMUX_DATA(TPU_TI2B_MARK, PSELC_13_12_01, PTU1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PSELC_13_12_10, PTU1_FN),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSELC_15_14_00, PTU0_FN),
+ PINMUX_DATA(TPU_TI2A_MARK, PSELC_15_14_01, PTU0_FN),
+ PINMUX_DATA(MMC_CLK_MARK, PSELC_15_14_10, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(SCIF1_CTS_MARK, PSELB_11_10_00, PTV4_FN),
+ PINMUX_DATA(TPU_TO3_MARK, PSELB_11_10_01, PTV4_FN),
+ PINMUX_DATA(MMC_VDDON_MARK, PSELB_11_10_10, PTV4_FN),
+ PINMUX_DATA(LCD_VEPWC_MARK, PSELB_11_10_11, PTV4_FN),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSELB_13_12_00, PTV3_FN),
+ PINMUX_DATA(TPU_TO2_MARK, PSELB_13_12_01, PTV3_FN),
+ PINMUX_DATA(MMC_ODMOD_MARK, PSELB_13_12_10, PTV3_FN),
+ PINMUX_DATA(LCD_VCPWC_MARK, PSELB_13_12_11, PTV3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSELC_9_8_00, PTV2_FN),
+ PINMUX_DATA(SIM_D_MARK, PSELC_9_8_10, PTV2_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSELC_11_10_00, PTV1_FN),
+ PINMUX_DATA(SIM_RST_MARK, PSELC_11_10_10, PTV1_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSELD_1_0_00, PTV0_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+
+ /* AFEIF */
+ PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK),
+
+ /* IIC */
+ PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK),
+ PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK),
+
+ /* DAC */
+ PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* USB */
+ PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK),
+
+ PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP,
+ USB1_PWR_EN_USBF_UPLUP_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK),
+
+ /* PCC */
+ PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK),
+
+ /* HUDI */
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK),
+ PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK),
+ PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK),
+ PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK),
+ PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK),
+ PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK),
+
+ /* SYSC */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ PTE6_FN, 0, 0, PTE6_IN,
+ PTE5_FN, 0, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6_FN, 0, 0, PTF6_IN,
+ PTF5_FN, 0, 0, PTF5_IN,
+ PTF4_FN, 0, 0, PTF4_IN,
+ PTF3_FN, 0, 0, PTF3_IN,
+ PTF2_FN, 0, 0, PTF2_IN,
+ PTF1_FN, 0, 0, PTF1_IN,
+ PTF0_FN, 0, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN,
+ PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN,
+ PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN,
+ PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN,
+ PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN,
+ PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN,
+ PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ 0, 0, 0, 0,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ 0, 0, 0, 0,
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN,
+ PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN,
+ PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN,
+ PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN,
+ PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050142, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050144, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050148, 8) {
+ 0, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405014a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405014c, 8) {
+ 0, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405014e, 8) {
+ 0, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050150, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050152, 8) {
+ 0, 0, 0, 0,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050154, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050156, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xa4050158, 8) {
+ 0, 0, 0, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405015a, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405015c, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa405015e, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050160, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050162, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7720_pinmux_info = {
+ .name = "sh7720_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_STATUS1,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7720_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
new file mode 100644
index 00000000..bf23c322
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -0,0 +1,111 @@
+/*
+ * arch/sh/kernel/cpu/sh3/probe.c
+ *
+ * CPU Subtype Probing for SH-3.
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+
+void __cpuinit cpu_probe(void)
+{
+ unsigned long addr0, addr1, data0, data1, data2, data3;
+
+ jump_to_uncached();
+ /*
+ * Check if the entry shadows or not.
+ * When shadowed, it's 128-entry system.
+ * Otherwise, it's 256-entry system.
+ */
+ addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
+ addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
+
+ /* First, write back & invalidate */
+ data0 = __raw_readl(addr0);
+ __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
+ data1 = __raw_readl(addr1);
+ __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
+
+ /* Next, check if there's shadow or not */
+ data0 = __raw_readl(addr0);
+ data0 ^= SH_CACHE_VALID;
+ __raw_writel(data0, addr0);
+ data1 = __raw_readl(addr1);
+ data2 = data1 ^ SH_CACHE_VALID;
+ __raw_writel(data2, addr1);
+ data3 = __raw_readl(addr0);
+
+ /* Lastly, invaliate them. */
+ __raw_writel(data0&~SH_CACHE_VALID, addr0);
+ __raw_writel(data2&~SH_CACHE_VALID, addr1);
+
+ back_to_cached();
+
+ boot_cpu_data.dcache.ways = 4;
+ boot_cpu_data.dcache.entry_shift = 4;
+ boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.dcache.flags = 0;
+
+ /*
+ * 7709A/7729 has 16K cache (256-entry), while 7702 has only
+ * 2K(direct) 7702 is not supported (yet)
+ */
+ if (data0 == data1 && data2 == data3) { /* Shadow */
+ boot_cpu_data.dcache.way_incr = (1 << 11);
+ boot_cpu_data.dcache.entry_mask = 0x7f0;
+ boot_cpu_data.dcache.sets = 128;
+ boot_cpu_data.type = CPU_SH7708;
+
+ boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC;
+ } else { /* 7709A or 7729 */
+ boot_cpu_data.dcache.way_incr = (1 << 12);
+ boot_cpu_data.dcache.entry_mask = 0xff0;
+ boot_cpu_data.dcache.sets = 256;
+ boot_cpu_data.type = CPU_SH7729;
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7706)
+ boot_cpu_data.type = CPU_SH7706;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7710)
+ boot_cpu_data.type = CPU_SH7710;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7712)
+ boot_cpu_data.type = CPU_SH7712;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ boot_cpu_data.type = CPU_SH7720;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7721)
+ boot_cpu_data.type = CPU_SH7721;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7705)
+ boot_cpu_data.type = CPU_SH7705;
+
+#if defined(CONFIG_SH7705_CACHE_32KB)
+ boot_cpu_data.dcache.way_incr = (1 << 13);
+ boot_cpu_data.dcache.entry_mask = 0x1ff0;
+ boot_cpu_data.dcache.sets = 512;
+ __raw_writel(CCR_CACHE_32KB, CCR3_REG);
+#else
+ __raw_writel(CCR_CACHE_16KB, CCR3_REG);
+#endif
+#endif
+ }
+
+ /*
+ * SH-3 doesn't have separate caches
+ */
+ boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
+ boot_cpu_data.icache = boot_cpu_data.dcache;
+
+ boot_cpu_data.family = CPU_FAMILY_SH3;
+}
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh770x.c b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
new file mode 100644
index 00000000..4f7242c6
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
@@ -0,0 +1,33 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define SCPCR 0xA4000116
+#define SCPDR 0xA4000136
+
+static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
+ __raw_writew(data & 0x0fcf, SCPCR);
+
+ if (!(cflag & CRTSCTS)) {
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP4MD1,0,
+ Set SCP6MD1,0 = {01} (output) */
+ __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+
+ data = __raw_readb(SCPDR);
+ /* Set /RTS2 (bit6) = 0 */
+ __raw_writeb(data & 0xbf, SCPDR);
+ }
+}
+
+struct plat_sci_port_ops sh770x_sci_port_ops = {
+ .init_pins = sh770x_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7710.c b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
new file mode 100644
index 00000000..42190ef6
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
@@ -0,0 +1,20 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define PACR 0xa4050100
+#define PBCR 0xa4050102
+
+static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (port->mapbase == 0xA4400000) {
+ __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
+ __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
+ } else if (port->mapbase == 0xA4410000)
+ __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
+}
+
+struct plat_sci_port_ops sh7710_sci_port_ops = {
+ .init_pins = sh7710_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
new file mode 100644
index 00000000..8832c526
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
@@ -0,0 +1,37 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+#include <asm/gpio.h>
+
+static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (cflag & CRTSCTS) {
+ /* enable RTS/CTS */
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 9-2; enable all scif pins but sck */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xfc03), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 9-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xfc03), PORT_PVCR);
+ }
+ } else {
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 5-2; enable only tx and rx */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xffc3), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 5-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xffc3), PORT_PVCR);
+ }
+ }
+}
+
+struct plat_sci_port_ops sh7720_sci_port_ops = {
+ .init_pins = sh7720_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
new file mode 100644
index 00000000..53be70b9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -0,0 +1,71 @@
+/*
+ * Shared SH3 Setup code
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+};
+
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+};
+
+static struct intc_vect vectors_irq45[] __initdata = {
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4000004, 0, 8, /* IRR0 */
+ { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa4000010, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh3-irq0123",
+ vectors_irq0123, NULL, NULL,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45",
+ vectors_irq45, NULL, NULL,
+ prio_registers, sense_registers, ack_registers);
+
+#define INTC_ICR1 0xa4000010UL
+#define INTC_ICR1_IRQLVL (1<<14)
+
+void __init plat_irq_setup_pins(int mode)
+{
+ if (mode == IRQ_MODE_IRQ) {
+ __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
+ register_intc_controller(&intc_desc_irq0123);
+ return;
+ }
+ BUG();
+}
+
+void __init plat_irq_setup_sh3(void)
+{
+ register_intc_controller(&intc_desc_irq45);
+}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
new file mode 100644
index 00000000..2309618c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -0,0 +1,253 @@
+/*
+ * SH7705 Setup
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ * Copyright (C) 2007 Nobuhiro Iwamatsu
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <asm/rtc.h>
+#include <cpu/serial.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+ PINT07, PINT815,
+
+ DMAC, SCIF0, SCIF2, ADC_ADI, USB,
+
+ TPU0, TPU1, TPU2, TPU3,
+ TMU0, TMU1, TMU2,
+
+ RTC, WDT, REF_RCMI,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
+ INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
+ INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8e0),
+ INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
+ INTC_VECT(SCIF2, 0x960),
+ INTC_VECT(ADC_ADI, 0x980),
+ INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40),
+ INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20),
+ INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(REF_RCMI, 0x580),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
+ { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, IRQ5, IRQ4 } },
+ { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, SCIF0, SCIF2, ADC_ADI } },
+ { 0xa4080000, 0, 16, 4, /* IPRF */ { 0, 0, USB } },
+ { 0xa4080002, 0, 16, 4, /* IPRG */ { TPU0, TPU1 } },
+ { 0xa4080004, 0, 16, 4, /* IPRH */ { TPU2, TPU3 } },
+
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE |
+ SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0,
+ .scbrr_algo_id = SCBRR_ALGO_4,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
+ .scbrr_algo_id = SCBRR_ALGO_4,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xfffffec0,
+ .end = 0xfffffec0 + 0x1e,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_rtc_platform_info rtc_info = {
+ .capabilities = RTC_CAP_4_DIGIT_YEAR,
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+ .dev = {
+ .platform_data = &rtc_info,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xfffffe94,
+ .end = 0xfffffe9f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0xe,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xfffffea0,
+ .end = 0xfffffeab,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1a,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xfffffeac,
+ .end = 0xfffffebb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7705_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+};
+
+static int __init sh7705_devices_setup(void)
+{
+ return platform_add_devices(sh7705_devices,
+ ARRAY_SIZE(sh7705_devices));
+}
+arch_initcall(sh7705_devices_setup);
+
+static struct platform_device *sh7705_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7705_early_devices,
+ ARRAY_SIZE(sh7705_early_devices));
+}
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
+}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
new file mode 100644
index 00000000..3f3d5fe5
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -0,0 +1,311 @@
+/*
+ * SH3 Setup code for SH7706, SH7707, SH7708, SH7709
+ *
+ * Copyright (C) 2007 Magnus Damm
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Based on setup-sh7709.c
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <cpu/serial.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+ PINT07, PINT815,
+ DMAC, SCIF0, SCIF2, SCI, ADC_ADI,
+ LCDC, PCC0, PCC1,
+ TMU0, TMU1, TMU2,
+ RTC, WDT, REF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCI, 0x4e0), INTC_VECT(SCI, 0x500),
+ INTC_VECT(SCI, 0x520), INTC_VECT(SCI, 0x540),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(REF, 0x580),
+ INTC_VECT(REF, 0x5a0),
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
+ INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
+ INTC_VECT(ADC_ADI, 0x980),
+ INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
+ INTC_VECT(SCIF2, 0x940), INTC_VECT(SCIF2, 0x960),
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707)
+ INTC_VECT(LCDC, 0x9a0),
+ INTC_VECT(PCC0, 0x9c0), INTC_VECT(PCC1, 0x9e0),
+#endif
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
+ { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, 0, SCIF2, ADC_ADI } },
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, } },
+ { 0xa400001a, 0, 16, 4, /* IPRE */ { 0, SCIF0 } },
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707)
+ { 0xa400001c, 0, 16, 4, /* IPRF */ { 0, LCDC, PCC0, PCC1, } },
+#endif
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xfffffec0,
+ .end = 0xfffffec0 + 0x1e,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffffe80,
+ .port_reg = 0xa4000136,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4000150,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH3_SCIF_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xa4000140,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_IRDA,
+ .irqs = { 52, 52, 52, 52 },
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+#endif
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xfffffe94,
+ .end = 0xfffffe9f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0xe,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xfffffea0,
+ .end = 0xfffffeab,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1a,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xfffffeac,
+ .end = 0xfffffebb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh770x_devices[] __initdata = {
+ &scif0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
+#endif
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+};
+
+static int __init sh770x_devices_setup(void)
+{
+ return platform_add_devices(sh770x_devices,
+ ARRAY_SIZE(sh770x_devices));
+}
+arch_initcall(sh770x_devices_setup);
+
+static struct platform_device *sh770x_early_devices[] __initdata = {
+ &scif0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
+#endif
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh770x_early_devices,
+ ARRAY_SIZE(sh770x_early_devices));
+}
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ plat_irq_setup_sh3();
+#endif
+}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
new file mode 100644
index 00000000..78f6b01d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -0,0 +1,252 @@
+/*
+ * SH3 Setup code for SH7710, SH7712
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ * Copyright (C) 2007 Nobuhiro Iwamatsu
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <asm/rtc.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+ DMAC1, SCIF0, SCIF1, DMAC2, IPSEC,
+ EDMAC0, EDMAC1, EDMAC2,
+ SIOF0, SIOF1,
+
+ TMU0, TMU1, TMU2,
+ RTC, WDT, REF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(DMAC1, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC1, 0x840), INTC_VECT(DMAC1, 0x860),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
+ INTC_VECT(SCIF1, 0x900), INTC_VECT(SCIF1, 0x920),
+ INTC_VECT(SCIF1, 0x940), INTC_VECT(SCIF1, 0x960),
+ INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
+#ifdef CONFIG_CPU_SUBTYPE_SH7710
+ INTC_VECT(IPSEC, 0xbe0),
+#endif
+ INTC_VECT(EDMAC0, 0xc00), INTC_VECT(EDMAC1, 0xc20),
+ INTC_VECT(EDMAC2, 0xc40),
+ INTC_VECT(SIOF0, 0xe00), INTC_VECT(SIOF0, 0xe20),
+ INTC_VECT(SIOF0, 0xe40), INTC_VECT(SIOF0, 0xe60),
+ INTC_VECT(SIOF1, 0xe80), INTC_VECT(SIOF1, 0xea0),
+ INTC_VECT(SIOF1, 0xec0), INTC_VECT(SIOF1, 0xee0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(REF, 0x580),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
+ { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
+ { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC1, SCIF0, SCIF1 } },
+ { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC, DMAC2 } },
+ { 0xa4080002, 0, 16, 4, /* IPRG */ { EDMAC0, EDMAC1, EDMAC2 } },
+ { 0xa4080004, 0, 16, 4, /* IPRH */ { 0, 0, 0, SIOF0 } },
+ { 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa413fec0,
+ .end = 0xa413fec0 + 0x1e,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_rtc_platform_info rtc_info = {
+ .capabilities = RTC_CAP_4_DIGIT_YEAR,
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+ .dev = {
+ .platform_data = &rtc_info,
+ },
+};
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+ SCSCR_CKE1 | SCSCR_CKE0,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+ SCSCR_CKE1 | SCSCR_CKE0,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xa412fe94,
+ .end = 0xa412fe9f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0xe,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xa412fea0,
+ .end = 0xa412feab,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1a,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xa412feac,
+ .end = 0xa412feb5,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7710_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+};
+
+static int __init sh7710_devices_setup(void)
+{
+ return platform_add_devices(sh7710_devices,
+ ARRAY_SIZE(sh7710_devices));
+}
+arch_initcall(sh7710_devices_setup);
+
+static struct platform_device *sh7710_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7710_early_devices,
+ ARRAY_SIZE(sh7710_early_devices));
+}
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
+}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
new file mode 100644
index 00000000..94920345
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -0,0 +1,470 @@
+/*
+ * Setup code for SH7720, SH7721.
+ *
+ * Copyright (C) 2007 Markus Brunner, Mark Jonas
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Based on arch/sh/kernel/cpu/sh4/setup-sh7750.c:
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <asm/rtc.h>
+#include <cpu/serial.h>
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa413fec0,
+ .end = 0xa413fec0 + 0x28 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_rtc_platform_info rtc_info = {
+ .capabilities = RTC_CAP_4_DIGIT_YEAR,
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+ .dev = {
+ .platform_data = &rtc_info,
+ },
+};
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4430000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .scbrr_algo_id = SCBRR_ALGO_4,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4438000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .scbrr_algo_id = SCBRR_ALGO_4,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xA4428000,
+ .end = 0xA44280FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 67,
+ .end = 67,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .name = "sh_udc",
+ .start = 0xA4420000,
+ .end = 0xA44200FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "sh_udc",
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
+static struct sh_timer_config cmt0_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 0,
+ .clockevent_rating = 125,
+ .clocksource_rating = 125,
+};
+
+static struct resource cmt0_resources[] = {
+ [0] = {
+ .start = 0x044a0010,
+ .end = 0x044a001b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt0_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt0_platform_data,
+ },
+ .resource = cmt0_resources,
+ .num_resources = ARRAY_SIZE(cmt0_resources),
+};
+
+static struct sh_timer_config cmt1_platform_data = {
+ .channel_offset = 0x20,
+ .timer_bit = 1,
+};
+
+static struct resource cmt1_resources[] = {
+ [0] = {
+ .start = 0x044a0020,
+ .end = 0x044a002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt1_device = {
+ .name = "sh_cmt",
+ .id = 1,
+ .dev = {
+ .platform_data = &cmt1_platform_data,
+ },
+ .resource = cmt1_resources,
+ .num_resources = ARRAY_SIZE(cmt1_resources),
+};
+
+static struct sh_timer_config cmt2_platform_data = {
+ .channel_offset = 0x30,
+ .timer_bit = 2,
+};
+
+static struct resource cmt2_resources[] = {
+ [0] = {
+ .start = 0x044a0030,
+ .end = 0x044a003b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt2_device = {
+ .name = "sh_cmt",
+ .id = 2,
+ .dev = {
+ .platform_data = &cmt2_platform_data,
+ },
+ .resource = cmt2_resources,
+ .num_resources = ARRAY_SIZE(cmt2_resources),
+};
+
+static struct sh_timer_config cmt3_platform_data = {
+ .channel_offset = 0x40,
+ .timer_bit = 3,
+};
+
+static struct resource cmt3_resources[] = {
+ [0] = {
+ .start = 0x044a0040,
+ .end = 0x044a004b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt3_device = {
+ .name = "sh_cmt",
+ .id = 3,
+ .dev = {
+ .platform_data = &cmt3_platform_data,
+ },
+ .resource = cmt3_resources,
+ .num_resources = ARRAY_SIZE(cmt3_resources),
+};
+
+static struct sh_timer_config cmt4_platform_data = {
+ .channel_offset = 0x50,
+ .timer_bit = 4,
+};
+
+static struct resource cmt4_resources[] = {
+ [0] = {
+ .start = 0x044a0050,
+ .end = 0x044a005b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt4_device = {
+ .name = "sh_cmt",
+ .id = 4,
+ .dev = {
+ .platform_data = &cmt4_platform_data,
+ },
+ .resource = cmt4_resources,
+ .num_resources = ARRAY_SIZE(cmt4_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x02,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xa412fe94,
+ .end = 0xa412fe9f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0xe,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xa412fea0,
+ .end = 0xa412feab,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1a,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xa412feac,
+ .end = 0xa412feb5,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7720_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &cmt0_device,
+ &cmt1_device,
+ &cmt2_device,
+ &cmt3_device,
+ &cmt4_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+ &usb_ohci_device,
+ &usbf_device,
+};
+
+static int __init sh7720_devices_setup(void)
+{
+ return platform_add_devices(sh7720_devices,
+ ARRAY_SIZE(sh7720_devices));
+}
+arch_initcall(sh7720_devices_setup);
+
+static struct platform_device *sh7720_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &cmt0_device,
+ &cmt1_device,
+ &cmt2_device,
+ &cmt3_device,
+ &cmt4_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7720_early_devices,
+ ARRAY_SIZE(sh7720_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ TMU0, TMU1, TMU2, RTC,
+ WDT, REF_RCMI, SIM,
+ IRQ0, IRQ1, IRQ2, IRQ3,
+ USBF_SPD, TMU_SUNI, IRQ5, IRQ4,
+ DMAC1, LCDC, SSL,
+ ADC, DMAC2, USBFI, CMT,
+ SCIF0, SCIF1,
+ PINT07, PINT815, TPU, IIC,
+ SIOF0, SIOF1, MMC, PCC,
+ USBHI, AFEIF,
+ H_UDI,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(RTC, 0x480),
+ INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SIM, 0x4e0), INTC_VECT(SIM, 0x500),
+ INTC_VECT(SIM, 0x520), INTC_VECT(SIM, 0x540),
+ INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580),
+ /* H_UDI cannot be masked */ INTC_VECT(TMU_SUNI, 0x6c0),
+ INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1, 0x800),
+ INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC1, 0x840),
+ INTC_VECT(DMAC1, 0x860), INTC_VECT(LCDC, 0x900),
+#if defined(CONFIG_CPU_SUBTYPE_SH7720)
+ INTC_VECT(SSL, 0x980),
+#endif
+ INTC_VECT(USBFI, 0xa20), INTC_VECT(USBFI, 0xa40),
+ INTC_VECT(USBHI, 0xa60),
+ INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
+ INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
+ INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
+ INTC_VECT(PINT815, 0xca0), INTC_VECT(SIOF0, 0xd00),
+ INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU, 0xd80),
+ INTC_VECT(TPU, 0xda0), INTC_VECT(TPU, 0xdc0),
+ INTC_VECT(TPU, 0xde0), INTC_VECT(IIC, 0xe00),
+ INTC_VECT(MMC, 0xe80), INTC_VECT(MMC, 0xea0),
+ INTC_VECT(MMC, 0xec0), INTC_VECT(MMC, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(PCC, 0xf60),
+ INTC_VECT(AFEIF, 0xfe0),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
+ { 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
+ { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
+ { 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
+ { 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
+ { 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
+ { 0xA4080006UL, 0, 16, 4, /* IPRI */ { SIOF0, SIOF1, MMC, PCC } },
+ { 0xA4080008UL, 0, 16, 4, /* IPRJ */ { 0, USBHI, 0, AFEIF } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
+}
diff --git a/arch/sh/kernel/cpu/sh3/swsusp.S b/arch/sh/kernel/cpu/sh3/swsusp.S
new file mode 100644
index 00000000..01145426
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/swsusp.S
@@ -0,0 +1,147 @@
+/*
+ * arch/sh/kernel/cpu/sh3/swsusp.S
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+#define k0 r0
+#define k1 r1
+#define k2 r2
+#define k3 r3
+#define k4 r4
+
+! swsusp_arch_resume()
+! - copy restore_pblist pages
+! - restore registers from swsusp_arch_regs_cpu0
+
+ENTRY(swsusp_arch_resume)
+ mov.l 1f, r15
+ mov.l 2f, r4
+ mov.l @r4, r4
+
+swsusp_copy_loop:
+ mov r4, r0
+ cmp/eq #0, r0
+ bt swsusp_restore_regs
+
+ mov.l @(PBE_ADDRESS, r4), r2
+ mov.l @(PBE_ORIG_ADDRESS, r4), r5
+
+ mov #(PAGE_SIZE >> 10), r3
+ shll8 r3
+ shlr2 r3 /* PAGE_SIZE / 16 */
+swsusp_copy_page:
+ dt r3
+ mov.l @r2+,r1 /* 16n+0 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+4 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+8 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+12 */
+ mov.l r1,@r5
+ bf/s swsusp_copy_page
+ add #4,r5
+
+ bra swsusp_copy_loop
+ mov.l @(PBE_NEXT, r4), r4
+
+swsusp_restore_regs:
+ ! BL=0: R7->R0 is bank0
+ mov.l 3f, r8
+ mov.l 4f, r5
+ jsr @r5
+ nop
+
+ ! BL=1: R7->R0 is bank1
+ lds k2, pr
+ ldc k3, ssr
+
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ rte
+ nop
+ ! BL=0: R7->R0 is bank0
+
+ .align 2
+1: .long swsusp_arch_regs_cpu0
+2: .long restore_pblist
+3: .long 0x20000000 ! RB=1
+4: .long restore_regs
+
+! swsusp_arch_suspend()
+! - prepare pc for resume, return from function without swsusp_save on resume
+! - save registers in swsusp_arch_regs_cpu0
+! - call swsusp_save write suspend image
+
+ENTRY(swsusp_arch_suspend)
+ sts pr, r0 ! save pr in r0
+ mov r15, r2 ! save sp in r2
+ mov r8, r5 ! save r8 in r5
+ stc sr, r1
+ ldc r1, ssr ! save sr in ssr
+ mov.l 1f, r1
+ ldc r1, spc ! setup pc value for resuming
+ mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack
+ mov.l 6f, r3
+ add r3, r15 ! save from top of structure
+
+ ! BL=0: R7->R0 is bank0
+ mov.l 2f, r3 ! get new SR value for bank1
+ mov #0, r4
+ mov.l 7f, r1
+ jsr @r1 ! switch to bank1 and save bank1 r7->r0
+ not r4, r4
+
+ ! BL=1: R7->R0 is bank1
+ stc r2_bank, k0 ! fetch old sp from r2_bank0
+ mov.l 3f, k4 ! SR bits to clear in k4
+ mov.l 8f, k1
+ jsr @k1 ! switch to bank0 and save all regs
+ stc r0_bank, k3 ! fetch old pr from r0_bank0
+
+ ! BL=0: R7->R0 is bank0
+ mov r2, r15 ! restore old sp
+ mov r5, r8 ! restore old r8
+ stc ssr, r1
+ ldc r1, sr ! restore old sr
+ lds r0, pr ! restore old pr
+ mov.l 4f, r0
+ jmp @r0
+ nop
+
+swsusp_call_save:
+ mov r2, r15 ! restore old sp
+ mov r5, r8 ! restore old r8
+ lds r0, pr ! restore old pr
+ rts
+ mov #0, r0
+
+ .align 2
+1: .long swsusp_call_save
+2: .long 0x20000000 ! RB=1
+3: .long 0xdfffffff ! RB=0
+4: .long swsusp_save
+5: .long swsusp_arch_regs_cpu0
+6: .long SWSUSP_ARCH_REGS_SIZE
+7: .long save_low_regs
+8: .long save_regs
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
new file mode 100644
index 00000000..3a1dbc70
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the Linux/SuperH SH-4 backends.
+#
+
+obj-y := probe.o common.o
+common-y += $(addprefix ../sh3/, entry.o ex.o)
+
+obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o)
+obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
+obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
+
+# Perf events
+perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o
+
+# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7750S) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7091) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o
+obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o
+
+# Primary on-chip clocks (common)
+ifndef CONFIG_CPU_SH4A
+clock-$(CONFIG_CPU_SH4) := clock-sh4.o
+endif
+
+# Additional clocks by subtype
+clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
+
+obj-y += $(clock-y)
+obj-$(CONFIG_PERF_EVENTS) += $(perf-y)
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
new file mode 100644
index 00000000..4b5bab5f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -0,0 +1,177 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+ *
+ * Additional SH4-202 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+#define CPG2_FRQCR3 0xfe0a0018
+
+static int frqcr3_divisors[] = { 1, 2, 3, 4, 6, 8, 16 };
+static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 };
+
+static unsigned long emi_clk_recalc(struct clk *clk)
+{
+ int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
+}
+
+static inline int frqcr3_lookup(struct clk *clk, unsigned long rate)
+{
+ int divisor = clk->parent->rate / rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++)
+ if (frqcr3_divisors[i] == divisor)
+ return frqcr3_values[i];
+
+ /* Safe fallback */
+ return 5;
+}
+
+static struct sh_clk_ops sh4202_emi_clk_ops = {
+ .recalc = emi_clk_recalc,
+};
+
+static struct clk sh4202_emi_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh4202_emi_clk_ops,
+};
+
+static unsigned long femi_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
+}
+
+static struct sh_clk_ops sh4202_femi_clk_ops = {
+ .recalc = femi_clk_recalc,
+};
+
+static struct clk sh4202_femi_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh4202_femi_clk_ops,
+};
+
+static void shoc_clk_init(struct clk *clk)
+{
+ int i;
+
+ /*
+ * For some reason, the shoc_clk seems to be set to some really
+ * insane value at boot (values outside of the allowable frequency
+ * range for instance). We deal with this by scaling it back down
+ * to something sensible just in case.
+ *
+ * Start scaling from the high end down until we find something
+ * that passes rate verification..
+ */
+ for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
+ int divisor = frqcr3_divisors[i];
+
+ if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
+ break;
+ }
+
+ WARN_ON(i == ARRAY_SIZE(frqcr3_divisors)); /* Undefined clock */
+}
+
+static unsigned long shoc_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
+}
+
+static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *bclk = clk_get(NULL, "bus_clk");
+ unsigned long bclk_rate = clk_get_rate(bclk);
+
+ clk_put(bclk);
+
+ if (rate > bclk_rate)
+ return 1;
+ if (rate > 66000000)
+ return 1;
+
+ return 0;
+}
+
+static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long frqcr3;
+ unsigned int tmp;
+
+ /* Make sure we have something sensible to switch to */
+ if (shoc_clk_verify_rate(clk, rate) != 0)
+ return -EINVAL;
+
+ tmp = frqcr3_lookup(clk, rate);
+
+ frqcr3 = __raw_readl(CPG2_FRQCR3);
+ frqcr3 &= ~(0x0007 << 6);
+ frqcr3 |= tmp << 6;
+ __raw_writel(frqcr3, CPG2_FRQCR3);
+
+ clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
+
+ return 0;
+}
+
+static struct sh_clk_ops sh4202_shoc_clk_ops = {
+ .init = shoc_clk_init,
+ .recalc = shoc_clk_recalc,
+ .set_rate = shoc_clk_set_rate,
+};
+
+static struct clk sh4202_shoc_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh4202_shoc_clk_ops,
+};
+
+static struct clk *sh4202_onchip_clocks[] = {
+ &sh4202_emi_clk,
+ &sh4202_femi_clk,
+ &sh4202_shoc_clk,
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
+ CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk),
+ CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk),
+};
+
+int __init arch_clk_init(void)
+{
+ struct clk *clk;
+ int i, ret = 0;
+
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
+ for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
+ struct clk *clkp = sh4202_onchip_clocks[i];
+
+ clkp->parent = clk;
+ ret |= clk_register(clkp);
+ }
+
+ clk_put(clk);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c
new file mode 100644
index 00000000..99e5ec8b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c
@@ -0,0 +1,80 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-sh4.c
+ *
+ * Generic SH-4 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * FRQCR parsing hacked out of arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
+#define bfc_divisors ifc_divisors /* Same */
+static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
+}
+
+static struct sh_clk_ops sh4_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh4_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
+ return clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh4_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh4_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh4_clk_ops[] = {
+ &sh4_master_clk_ops,
+ &sh4_module_clk_ops,
+ &sh4_bus_clk_ops,
+ &sh4_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh4_clk_ops))
+ *ops = sh4_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
new file mode 100644
index 00000000..69ab4d3c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -0,0 +1,429 @@
+/*
+ * Save/restore floating point context for signal handlers.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
+ *
+ * FIXME! These routines have not been tested for big endian case.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/io.h>
+#include <cpu/fpu.h>
+#include <asm/processor.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+/* The PR (precision) bit in the FP Status Register must be clear when
+ * an frchg instruction is executed, otherwise the instruction is undefined.
+ * Executing frchg with PR set causes a trap on some SH4 implementations.
+ */
+
+#define FPSCR_RCHG 0x00000000
+extern unsigned long long float64_div(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_mul(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_add(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
+extern unsigned long long float64_sub(unsigned long long a,
+ unsigned long long b);
+extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
+extern unsigned long int float64_to_float32(unsigned long long a);
+static unsigned int fpu_exception_flags;
+
+/*
+ * Save FPU registers onto task structure.
+ */
+void save_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile ("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "lds %2, fpscr\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
+ "lds %3, fpscr\n\t":"=r" (dummy)
+ :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
+ "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
+ :"memory");
+
+ disable_fpu();
+}
+
+void restore_fpu(struct task_struct *tsk)
+{
+ unsigned long dummy;
+
+ enable_fpu();
+ asm volatile ("lds %2, fpscr\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
+ "frchg\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
+ :"=r" (dummy)
+ :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
+ :"memory");
+ disable_fpu();
+}
+
+/**
+ * denormal_to_double - Given denormalized float number,
+ * store double float
+ *
+ * @fpu: Pointer to sh_fpu_hard structure
+ * @n: Index to FP register
+ */
+static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
+{
+ unsigned long du, dl;
+ unsigned long x = fpu->fpul;
+ int exp = 1023 - 126;
+
+ if (x != 0 && (x & 0x7f800000) == 0) {
+ du = (x & 0x80000000);
+ while ((x & 0x00800000) == 0) {
+ x <<= 1;
+ exp--;
+ }
+ x &= 0x007fffff;
+ du |= (exp << 20) | (x >> 3);
+ dl = x << 29;
+
+ fpu->fp_regs[n] = du;
+ fpu->fp_regs[n + 1] = dl;
+ }
+}
+
+/**
+ * ieee_fpe_handler - Handle denormalized number exception
+ *
+ * @regs: Pointer to register structure
+ *
+ * Returns 1 when it's handled (should not cause exception).
+ */
+static int ieee_fpe_handler(struct pt_regs *regs)
+{
+ unsigned short insn = *(unsigned short *)regs->pc;
+ unsigned short finsn;
+ unsigned long nextpc;
+ int nib[4] = {
+ (insn >> 12) & 0xf,
+ (insn >> 8) & 0xf,
+ (insn >> 4) & 0xf,
+ insn & 0xf
+ };
+
+ if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
+ regs->pr = regs->pc + 4; /* bsr & jsr */
+
+ if (nib[0] == 0xa || nib[0] == 0xb) {
+ /* bra & bsr */
+ nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xd) {
+ /* bt/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
+ else
+ nextpc = regs->pc + 4;
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x8 && nib[1] == 0xf) {
+ /* bf/s */
+ if (regs->sr & 1)
+ nextpc = regs->pc + 4;
+ else
+ nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x4 && nib[3] == 0xb &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* jmp & jsr */
+ nextpc = regs->regs[nib[1]];
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
+ (nib[2] == 0x0 || nib[2] == 0x2)) {
+ /* braf & bsrf */
+ nextpc = regs->pc + 4 + regs->regs[nib[1]];
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else if (insn == 0x000b) {
+ /* rts */
+ nextpc = regs->pr;
+ finsn = *(unsigned short *)(regs->pc + 2);
+ } else {
+ nextpc = regs->pc + instruction_size(insn);
+ finsn = insn;
+ }
+
+ if ((finsn & 0xf1ff) == 0xf0ad) {
+ /* fcnvsd */
+ struct task_struct *tsk = current;
+
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
+ /* FPU error */
+ denormal_to_double(&tsk->thread.xstate->hardfpu,
+ (finsn >> 8) & 0xf);
+ else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00f) == 0xf002) {
+ /* fmul */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
+ llx = float64_mul(llx, lly);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_mul(hx, hy);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf00e) == 0xf000) {
+ /* fadd, fsub */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
+ if ((finsn & 0xf00f) == 0xf000)
+ llx = float64_add(llx, lly);
+ else
+ llx = float64_sub(llx, lly);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ if ((finsn & 0xf00f) == 0xf000)
+ hx = float32_add(hx, hy);
+ else
+ hx = float32_sub(hx, hy);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf003) == 0xf003) {
+ /* fdiv */
+ struct task_struct *tsk = current;
+ int fpscr;
+ int n, m, prec;
+ unsigned int hx, hy;
+
+ n = (finsn >> 8) & 0xf;
+ m = (finsn >> 4) & 0xf;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
+ prec = fpscr & FPSCR_DBL_PRECISION;
+
+ if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (prec && ((hx & 0x7fffffff) < 0x00100000
+ || (hy & 0x7fffffff) < 0x00100000))) {
+ long long llx, lly;
+
+ /* FPU error because of denormal (doubles) */
+ llx = ((long long)hx << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
+ lly = ((long long)hy << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
+
+ llx = float64_div(llx, lly);
+
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
+ } else if ((fpscr & FPSCR_CAUSE_ERROR)
+ && (!prec && ((hx & 0x7fffffff) < 0x00800000
+ || (hy & 0x7fffffff) < 0x00800000))) {
+ /* FPU error because of denormal (floats) */
+ hx = float32_div(hx, hy);
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf0bd) == 0xf0bd) {
+ /* fcnvds - double to single precision convert */
+ struct task_struct *tsk = current;
+ int m;
+ unsigned int hx;
+
+ m = (finsn >> 8) & 0x7;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[m];
+
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
+ && ((hx & 0x7fffffff) < 0x00100000)) {
+ /* subnormal double to float conversion */
+ long long llx;
+
+ llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
+
+ tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ }
+
+ return 0;
+}
+
+void float_raise(unsigned int flags)
+{
+ fpu_exception_flags |= flags;
+}
+
+int float_rounding_mode(void)
+{
+ struct task_struct *tsk = current;
+ int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
+ return roundingMode;
+}
+
+BUILD_TRAP_HANDLER(fpu_error)
+{
+ struct task_struct *tsk = current;
+ TRAP_HANDLER_DECL;
+
+ __unlazy_fpu(tsk, regs);
+ fpu_exception_flags = 0;
+ if (ieee_fpe_handler(regs)) {
+ tsk->thread.xstate->hardfpu.fpscr &=
+ ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
+ tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
+ /* Set the FPSCR flag as well as cause bits - simply
+ * replicate the cause */
+ tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
+ grab_fpu(regs);
+ restore_fpu(tsk);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+ (fpu_exception_flags >> 2)) == 0) {
+ return;
+ }
+ }
+
+ force_sig(SIGFPE, tsk);
+}
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
new file mode 100644
index 00000000..fa4f724b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -0,0 +1,268 @@
+/*
+ * Performance events support for SH7750-style performance counters
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PM_CR_BASE 0xff000084 /* 16-bit */
+#define PM_CTR_BASE 0xff100004 /* 32-bit */
+
+#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
+#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
+#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
+
+#define PMCR_PMM_MASK 0x0000003f
+
+#define PMCR_CLKF 0x00000100
+#define PMCR_PMCLR 0x00002000
+#define PMCR_PMST 0x00004000
+#define PMCR_PMEN 0x00008000
+
+static struct sh_pmu sh7750_pmu;
+
+/*
+ * There are a number of events supported by each counter (33 in total).
+ * Since we have 2 counters, each counter will take the event code as it
+ * corresponds to the PMCR PMM setting. Each counter can be configured
+ * independently.
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x01 Operand read access
+ * 0x02 Operand write access
+ * 0x03 UTLB miss
+ * 0x04 Operand cache read miss
+ * 0x05 Operand cache write miss
+ * 0x06 Instruction fetch (w/ cache)
+ * 0x07 Instruction TLB miss
+ * 0x08 Instruction cache miss
+ * 0x09 All operand accesses
+ * 0x0a All instruction accesses
+ * 0x0b OC RAM operand access
+ * 0x0d On-chip I/O space access
+ * 0x0e Operand access (r/w)
+ * 0x0f Operand cache miss (r/w)
+ * 0x10 Branch instruction
+ * 0x11 Branch taken
+ * 0x12 BSR/BSRF/JSR
+ * 0x13 Instruction execution
+ * 0x14 Instruction execution in parallel
+ * 0x15 FPU Instruction execution
+ * 0x16 Interrupt
+ * 0x17 NMI
+ * 0x18 trapa instruction execution
+ * 0x19 UBCA match
+ * 0x1a UBCB match
+ * 0x21 Instruction cache fill
+ * 0x22 Operand cache fill
+ * 0x23 Elapsed time
+ * 0x24 Pipeline freeze by I-cache miss
+ * 0x25 Pipeline freeze by D-cache miss
+ * 0x27 Pipeline freeze by branch instruction
+ * 0x28 Pipeline freeze by CPU register
+ * 0x29 Pipeline freeze by FPU
+ */
+
+static const int sh7750_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh7750_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0001,
+ [ C(RESULT_MISS) ] = 0x0004,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0002,
+ [ C(RESULT_MISS) ] = 0x0005,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0006,
+ [ C(RESULT_MISS) ] = 0x0008,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0003,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0007,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh7750_event_map(int event)
+{
+ return sh7750_general_events[event];
+}
+
+static u64 sh7750_pmu_read(int idx)
+{
+ return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
+ __raw_readl(PMCTRL(idx));
+}
+
+static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readw(PMCR(idx));
+ tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
+ __raw_writew(tmp, PMCR(idx));
+}
+
+static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
+ __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
+}
+
+static void sh7750_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
+}
+
+static void sh7750_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
+}
+
+static struct sh_pmu sh7750_pmu = {
+ .name = "sh7750",
+ .num_events = 2,
+ .event_map = sh7750_event_map,
+ .max_events = ARRAY_SIZE(sh7750_general_events),
+ .raw_event_mask = PMCR_PMM_MASK,
+ .cache_events = &sh7750_cache_events,
+ .read = sh7750_pmu_read,
+ .disable = sh7750_pmu_disable,
+ .enable = sh7750_pmu_enable,
+ .disable_all = sh7750_pmu_disable_all,
+ .enable_all = sh7750_pmu_enable_all,
+};
+
+static int __init sh7750_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh7750_pmu);
+}
+early_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
new file mode 100644
index 00000000..971cf0fc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -0,0 +1,260 @@
+/*
+ * arch/sh/kernel/cpu/sh4/probe.c
+ *
+ * CPU Subtype Probing for SH-4.
+ *
+ * Copyright (C) 2001 - 2007 Paul Mundt
+ * Copyright (C) 2003 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+void __cpuinit cpu_probe(void)
+{
+ unsigned long pvr, prr, cvr;
+ unsigned long size;
+
+ static unsigned long sizes[16] = {
+ [1] = (1 << 12),
+ [2] = (1 << 13),
+ [4] = (1 << 14),
+ [8] = (1 << 15),
+ [9] = (1 << 16)
+ };
+
+ pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
+ prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
+ cvr = (__raw_readl(CCN_CVR));
+
+ /*
+ * Setup some sane SH-4 defaults for the icache
+ */
+ boot_cpu_data.icache.way_incr = (1 << 13);
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.sets = 256;
+ boot_cpu_data.icache.ways = 1;
+ boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+
+ /*
+ * And again for the dcache ..
+ */
+ boot_cpu_data.dcache.way_incr = (1 << 14);
+ boot_cpu_data.dcache.entry_shift = 5;
+ boot_cpu_data.dcache.sets = 512;
+ boot_cpu_data.dcache.ways = 1;
+ boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
+
+ /* We don't know the chip cut */
+ boot_cpu_data.cut_major = boot_cpu_data.cut_minor = -1;
+
+ /*
+ * Setup some generic flags we can probe on SH-4A parts
+ */
+ if (((pvr >> 16) & 0xff) == 0x10) {
+ boot_cpu_data.family = CPU_FAMILY_SH4A;
+
+ if ((cvr & 0x10000000) == 0) {
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+ boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP;
+ }
+
+ boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER;
+ boot_cpu_data.cut_major = pvr & 0x7f;
+
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.dcache.ways = 4;
+ } else {
+ /* And some SH-4 defaults.. */
+ boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
+ boot_cpu_data.family = CPU_FAMILY_SH4;
+ }
+
+ /* FPU detection works for almost everyone */
+ if ((cvr & 0x20000000))
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+
+ /* Mask off the upper chip ID */
+ pvr &= 0xffff;
+
+ /*
+ * Probe the underlying processor version/revision and
+ * adjust cpu_data setup accordingly.
+ */
+ switch (pvr) {
+ case 0x205:
+ boot_cpu_data.type = CPU_SH7750;
+ boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
+ CPU_HAS_PERF_COUNTER;
+ break;
+ case 0x206:
+ boot_cpu_data.type = CPU_SH7750S;
+ boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
+ CPU_HAS_PERF_COUNTER;
+ break;
+ case 0x1100:
+ boot_cpu_data.type = CPU_SH7751;
+ break;
+ case 0x2001:
+ case 0x2004:
+ boot_cpu_data.type = CPU_SH7770;
+ break;
+ case 0x2006:
+ case 0x200A:
+ if (prr == 0x61)
+ boot_cpu_data.type = CPU_SH7781;
+ else if (prr == 0xa1)
+ boot_cpu_data.type = CPU_SH7763;
+ else
+ boot_cpu_data.type = CPU_SH7780;
+
+ break;
+ case 0x3000:
+ case 0x3003:
+ case 0x3009:
+ boot_cpu_data.type = CPU_SH7343;
+ break;
+ case 0x3004:
+ case 0x3007:
+ boot_cpu_data.type = CPU_SH7785;
+ break;
+ case 0x4004:
+ case 0x4005:
+ boot_cpu_data.type = CPU_SH7786;
+ boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
+ break;
+ case 0x3008:
+ switch (prr) {
+ case 0x50:
+ case 0x51:
+ boot_cpu_data.type = CPU_SH7723;
+ boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ break;
+ case 0x70:
+ boot_cpu_data.type = CPU_SH7366;
+ break;
+ case 0xa0:
+ case 0xa1:
+ boot_cpu_data.type = CPU_SH7722;
+ break;
+ }
+ break;
+ case 0x300b:
+ switch (prr) {
+ case 0x20:
+ boot_cpu_data.type = CPU_SH7724;
+ boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ break;
+ case 0x10:
+ case 0x11:
+ boot_cpu_data.type = CPU_SH7757;
+ break;
+ case 0xd0:
+ case 0x40: /* yon-ten-go */
+ boot_cpu_data.type = CPU_SH7372;
+ break;
+
+ }
+ break;
+ case 0x4000: /* 1st cut */
+ case 0x4001: /* 2nd cut */
+ boot_cpu_data.type = CPU_SHX3;
+ break;
+ case 0x700:
+ boot_cpu_data.type = CPU_SH4_501;
+ boot_cpu_data.flags &= ~CPU_HAS_FPU;
+ boot_cpu_data.icache.ways = 2;
+ boot_cpu_data.dcache.ways = 2;
+ break;
+ case 0x600:
+ boot_cpu_data.type = CPU_SH4_202;
+ boot_cpu_data.icache.ways = 2;
+ boot_cpu_data.dcache.ways = 2;
+ break;
+ case 0x500 ... 0x501:
+ switch (prr) {
+ case 0x10:
+ boot_cpu_data.type = CPU_SH7750R;
+ break;
+ case 0x11:
+ boot_cpu_data.type = CPU_SH7751R;
+ break;
+ case 0x50 ... 0x5f:
+ boot_cpu_data.type = CPU_SH7760;
+ break;
+ }
+
+ boot_cpu_data.icache.ways = 2;
+ boot_cpu_data.dcache.ways = 2;
+
+ break;
+ }
+
+ /*
+ * On anything that's not a direct-mapped cache, look to the CVR
+ * for I/D-cache specifics.
+ */
+ if (boot_cpu_data.icache.ways > 1) {
+ size = sizes[(cvr >> 20) & 0xf];
+ boot_cpu_data.icache.way_incr = (size >> 1);
+ boot_cpu_data.icache.sets = (size >> 6);
+
+ }
+
+ /* And the rest of the D-cache */
+ if (boot_cpu_data.dcache.ways > 1) {
+ size = sizes[(cvr >> 16) & 0xf];
+ boot_cpu_data.dcache.way_incr = (size >> 1);
+ boot_cpu_data.dcache.sets = (size >> 6);
+ }
+
+ /*
+ * SH-4A's have an optional PIPT L2.
+ */
+ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
+ /*
+ * Verify that it really has something hooked up, this
+ * is the safety net for CPUs that have optional L2
+ * support yet do not implement it.
+ */
+ if ((cvr & 0xf) == 0)
+ boot_cpu_data.flags &= ~CPU_HAS_L2_CACHE;
+ else {
+ /*
+ * Silicon and specifications have clearly never
+ * met..
+ */
+ cvr ^= 0xf;
+
+ /*
+ * Size calculation is much more sensible
+ * than it is for the L1.
+ *
+ * Sizes are 128KB, 256KB, 512KB, and 1MB.
+ */
+ size = (cvr & 0xf) << 17;
+
+ boot_cpu_data.scache.way_incr = (1 << 16);
+ boot_cpu_data.scache.entry_shift = 5;
+ boot_cpu_data.scache.ways = 4;
+ boot_cpu_data.scache.linesz = L1_CACHE_BYTES;
+
+ boot_cpu_data.scache.entry_mask =
+ (boot_cpu_data.scache.way_incr -
+ boot_cpu_data.scache.linesz);
+
+ boot_cpu_data.scache.sets = size /
+ (boot_cpu_data.scache.linesz *
+ boot_cpu_data.scache.ways);
+
+ boot_cpu_data.scache.way_size =
+ (boot_cpu_data.scache.sets *
+ boot_cpu_data.scache.linesz);
+ }
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
new file mode 100644
index 00000000..5b283315
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -0,0 +1,200 @@
+/*
+ * SH4-202 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh4202_devices[] __initdata = {
+ &scif0_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+static int __init sh4202_devices_setup(void)
+{
+ return platform_add_devices(sh4202_devices,
+ ARRAY_SIZE(sh4202_devices));
+}
+arch_initcall(sh4202_devices_setup);
+
+static struct platform_device *sh4202_early_devices[] __initdata = {
+ &scif0_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh4202_early_devices,
+ ARRAY_SIZE(sh4202_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
+ HUDI, TMU0, TMU1, TMU2, RTC, SCIF, WDT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
+ INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
+ INTC_VECT(WDT, 0x560),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, 0, 0, 0 } },
+ { 0xffd0000c, 0, 16, 4, /* IPRC */ { 0, 0, SCIF, HUDI } },
+ { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh4-202", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct intc_vect vectors_irlm[] __initdata = {
+ INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
+ INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irlm, "sh4-202_irlm", vectors_irlm, NULL,
+ NULL, prio_registers, NULL);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1<<7)
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ register_intc_controller(&intc_desc_irlm);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
new file mode 100644
index 00000000..98cc0c79
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -0,0 +1,459 @@
+/*
+ * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <linux/sh_timer.h>
+#include <linux/serial_sci.h>
+#include <generated/machtypes.h>
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffc80000,
+ .end = 0xffc80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct plat_sci_port sci_platform_data = {
+ .mapbase = 0xffe00000,
+ .port_reg = 0xffe0001C,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
+ .regshift = 2,
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &sci_platform_data,
+ },
+};
+
+static struct plat_sci_port scif_platform_data = {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xfe100008,
+ .end = 0xfe100013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 72,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xfe100014,
+ .end = 0xfe10001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 76,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+#endif
+
+static struct platform_device *sh7750_devices[] __initdata = {
+ &rtc_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+ &tmu3_device,
+ &tmu4_device,
+#endif
+};
+
+static int __init sh7750_devices_setup(void)
+{
+ if (mach_is_rts7751r2d()) {
+ platform_device_register(&scif_device);
+ } else {
+ platform_device_register(&sci_device);
+ platform_device_register(&scif_device);
+ }
+
+ return platform_add_devices(sh7750_devices,
+ ARRAY_SIZE(sh7750_devices));
+}
+arch_initcall(sh7750_devices_setup);
+
+static struct platform_device *sh7750_early_devices[] __initdata = {
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+ &tmu3_device,
+ &tmu4_device,
+#endif
+};
+
+void __init plat_early_device_setup(void)
+{
+ struct platform_device *dev[1];
+
+ if (mach_is_rts7751r2d()) {
+ scif_platform_data.scscr |= SCSCR_CKE1;
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
+ } else {
+ dev[0] = &sci_device;
+ early_platform_add_devices(dev, 1);
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
+ }
+
+ early_platform_add_devices(sh7750_early_devices,
+ ARRAY_SIZE(sh7750_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
+ HUDI, GPIOI, DMAC,
+ PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
+ PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3,
+ TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF,
+
+ /* interrupt groups */
+ PCIC1,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500),
+ INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540),
+ INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
+ INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
+ { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } },
+ { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
+ { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0,
+ TMU4, TMU3,
+ PCIC1, PCIC0_PCISERR } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
+#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091)
+static struct intc_vect vectors_dma4[] __initdata = {
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x6c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
+ vectors_dma4, NULL,
+ NULL, prio_registers, NULL);
+#endif
+
+/* SH7750R and SH7751R both have 8-channel DMA controllers */
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
+static struct intc_vect vectors_dma8[] __initdata = {
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
+ INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
+ INTC_VECT(DMAC, 0x6c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
+ vectors_dma8, NULL,
+ NULL, prio_registers, NULL);
+#endif
+
+/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+static struct intc_vect vectors_tmu34[] __initdata = {
+ INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, TMU4, TMU3,
+ PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
+ PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2,
+ PCIC1_PCIDMA3, PCIC0_PCISERR } },
+};
+
+static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
+ vectors_tmu34, NULL,
+ mask_registers, prio_registers, NULL);
+#endif
+
+/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */
+static struct intc_vect vectors_irlm[] __initdata = {
+ INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
+ INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
+ NULL, prio_registers, NULL);
+
+/* SH7751 and SH7751R both have PCI */
+#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
+static struct intc_vect vectors_pci[] __initdata = {
+ INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0),
+ INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0),
+ INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60),
+ INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20),
+};
+
+static struct intc_group groups_pci[] __initdata = {
+ INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
+ PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3),
+};
+
+static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
+ mask_registers, prio_registers, NULL);
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091)
+void __init plat_irq_setup(void)
+{
+ /*
+ * same vectors for SH7750, SH7750S and SH7091 except for IRLM,
+ * see below..
+ */
+ register_intc_controller(&intc_desc);
+ register_intc_controller(&intc_desc_dma4);
+}
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R)
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ register_intc_controller(&intc_desc_dma8);
+ register_intc_controller(&intc_desc_tmu34);
+}
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7751)
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ register_intc_controller(&intc_desc_dma4);
+ register_intc_controller(&intc_desc_tmu34);
+ register_intc_controller(&intc_desc_pci);
+}
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7751R)
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+ register_intc_controller(&intc_desc_dma8);
+ register_intc_controller(&intc_desc_tmu34);
+ register_intc_controller(&intc_desc_pci);
+}
+#endif
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1<<7)
+
+void __init plat_irq_setup_pins(int mode)
+{
+#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7091)
+ BUG(); /* impossible to mask interrupts on SH7750 and SH7091 */
+ return;
+#endif
+
+ switch (mode) {
+ case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ register_intc_controller(&intc_desc_irlm);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
new file mode 100644
index 00000000..c0b4c774
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -0,0 +1,336 @@
+/*
+ * SH7760 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/sh_timer.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0, IRL1, IRL2, IRL3,
+ HUDI, GPIOI, DMAC,
+ IRQ4, IRQ5, IRQ6, IRQ7,
+ HCAN20, HCAN21,
+ SSI0, SSI1,
+ HAC0, HAC1,
+ I2C0, I2C1,
+ USB, LCDC,
+ DMABRG0, DMABRG1, DMABRG2,
+ SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
+ SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
+ SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
+ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
+ HSPI,
+ MMCIF0, MMCIF1, MMCIF2, MMCIF3,
+ MFI, ADC, CMT,
+ TMU0, TMU1, TMU2,
+ WDT, REF,
+
+ /* interrupt groups */
+ DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
+ INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
+ INTC_VECT(DMAC, 0x6c0),
+ INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820),
+ INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860),
+ INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920),
+ INTC_VECT(SSI0, 0x940), INTC_VECT(SSI1, 0x960),
+ INTC_VECT(HAC0, 0x980), INTC_VECT(HAC1, 0x9a0),
+ INTC_VECT(I2C0, 0x9c0), INTC_VECT(I2C1, 0x9e0),
+ INTC_VECT(USB, 0xa00), INTC_VECT(LCDC, 0xa20),
+ INTC_VECT(DMABRG0, 0xa80), INTC_VECT(DMABRG1, 0xaa0),
+ INTC_VECT(DMABRG2, 0xac0),
+ INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0),
+ INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0),
+ INTC_VECT(SCIF1_ERI, 0xb00), INTC_VECT(SCIF1_RXI, 0xb20),
+ INTC_VECT(SCIF1_BRI, 0xb40), INTC_VECT(SCIF1_TXI, 0xb60),
+ INTC_VECT(SCIF2_ERI, 0xb80), INTC_VECT(SCIF2_RXI, 0xba0),
+ INTC_VECT(SCIF2_BRI, 0xbc0), INTC_VECT(SCIF2_TXI, 0xbe0),
+ INTC_VECT(SIM_ERI, 0xc00), INTC_VECT(SIM_RXI, 0xc20),
+ INTC_VECT(SIM_TXI, 0xc40), INTC_VECT(SIM_TEI, 0xc60),
+ INTC_VECT(HSPI, 0xc80),
+ INTC_VECT(MMCIF0, 0xd00), INTC_VECT(MMCIF1, 0xd20),
+ INTC_VECT(MMCIF2, 0xd40), INTC_VECT(MMCIF3, 0xd60),
+ INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */
+ INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2),
+ INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
+ INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
+ INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
+ INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
+ SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC,
+ 0, DMABRG0, DMABRG1, DMABRG2,
+ SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
+ SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
+ SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, } },
+ { 0xfe080044, 0xfe080064, 32, /* INTMSK04 / INTMSKCLR04 */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
+ HSPI, MMCIF0, MMCIF1, MMCIF2,
+ MMCIF3, 0, 0, 0, 0, 0, 0, 0,
+ 0, MFI, 0, 0, 0, 0, ADC, CMT, } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
+ { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
+ { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, 0, HUDI } },
+ { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
+ { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfe080004, 0, 32, 4, /* INTPRI04 */ { HCAN20, HCAN21, SSI0, SSI1,
+ HAC0, HAC1, I2C0, I2C1 } },
+ { 0xfe080008, 0, 32, 4, /* INTPRI08 */ { USB, LCDC, DMABRG, SCIF0,
+ SCIF1, SCIF2, SIM, HSPI } },
+ { 0xfe08000c, 0, 32, 4, /* INTPRI0C */ { 0, 0, MMCIF, 0,
+ MFI, 0, ADC, CMT } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct intc_vect vectors_irq[] __initdata = {
+ INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
+ INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfe600000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfe610000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .irqs = { 72, 73, 75, 74 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfe620000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfe480000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCI,
+ .irqs = { 80, 81, 82, 0 },
+ .regshift = 2,
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+
+static struct platform_device *sh7760_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+static int __init sh7760_devices_setup(void)
+{
+ return platform_add_devices(sh7760_devices,
+ ARRAY_SIZE(sh7760_devices));
+}
+arch_initcall(sh7760_devices_setup);
+
+static struct platform_device *sh7760_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7760_early_devices,
+ ARRAY_SIZE(sh7760_early_devices));
+}
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1 << 7)
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ register_intc_controller(&intc_desc_irq);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
new file mode 100644
index 00000000..42edf2e5
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -0,0 +1,930 @@
+/*
+ * Floating point emulation support for subnormalised numbers on SH4
+ * architecture This file is derived from the SoftFloat IEC/IEEE
+ * Floating-point Arithmetic Package, Release 2 the original license of
+ * which is reproduced below.
+ *
+ * ========================================================================
+ *
+ * This C source file is part of the SoftFloat IEC/IEEE Floating-point
+ * Arithmetic Package, Release 2.
+ *
+ * Written by John R. Hauser. This work was made possible in part by the
+ * International Computer Science Institute, located at Suite 600, 1947 Center
+ * Street, Berkeley, California 94704. Funding was partially provided by the
+ * National Science Foundation under grant MIP-9311980. The original version
+ * of this code was written as part of a project to build a fixed-point vector
+ * processor in collaboration with the University of California at Berkeley,
+ * overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+ * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+ * arithmetic/softfloat.html'.
+ *
+ * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+ * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+ * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+ * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+ * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+ *
+ * Derivative works are acceptable, even for commercial purposes, so long as
+ * (1) they include prominent notice that the work is derivative, and (2) they
+ * include prominent notice akin to these three paragraphs for those parts of
+ * this code that are retained.
+ *
+ * ========================================================================
+ *
+ * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com>
+ * and Kamel Khelifi <kamel.khelifi@st.com>
+ */
+#include <linux/kernel.h>
+#include <cpu/fpu.h>
+#include <asm/div64.h>
+
+#define LIT64( a ) a##LL
+
+typedef char flag;
+typedef unsigned char uint8;
+typedef signed char int8;
+typedef int uint16;
+typedef int int16;
+typedef unsigned int uint32;
+typedef signed int int32;
+
+typedef unsigned long long int bits64;
+typedef signed long long int sbits64;
+
+typedef unsigned char bits8;
+typedef signed char sbits8;
+typedef unsigned short int bits16;
+typedef signed short int sbits16;
+typedef unsigned int bits32;
+typedef signed int sbits32;
+
+typedef unsigned long long int uint64;
+typedef signed long long int int64;
+
+typedef unsigned long int float32;
+typedef unsigned long long float64;
+
+extern void float_raise(unsigned int flags); /* in fpu.c */
+extern int float_rounding_mode(void); /* in fpu.c */
+
+bits64 extractFloat64Frac(float64 a);
+flag extractFloat64Sign(float64 a);
+int16 extractFloat64Exp(float64 a);
+int16 extractFloat32Exp(float32 a);
+flag extractFloat32Sign(float32 a);
+bits32 extractFloat32Frac(float32 a);
+float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
+void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
+float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
+void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
+float64 float64_sub(float64 a, float64 b);
+float32 float32_sub(float32 a, float32 b);
+float32 float32_add(float32 a, float32 b);
+float64 float64_add(float64 a, float64 b);
+float64 float64_div(float64 a, float64 b);
+float32 float32_div(float32 a, float32 b);
+float32 float32_mul(float32 a, float32 b);
+float64 float64_mul(float64 a, float64 b);
+float32 float64_to_float32(float64 a);
+void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr);
+void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
+
+static int8 countLeadingZeros32(bits32 a);
+static int8 countLeadingZeros64(bits64 a);
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
+ bits64 zSig);
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
+ bits32 zSig);
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
+static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
+ bits64 * zSigPtr);
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr);
+
+bits64 extractFloat64Frac(float64 a)
+{
+ return a & LIT64(0x000FFFFFFFFFFFFF);
+}
+
+flag extractFloat64Sign(float64 a)
+{
+ return a >> 63;
+}
+
+int16 extractFloat64Exp(float64 a)
+{
+ return (a >> 52) & 0x7FF;
+}
+
+int16 extractFloat32Exp(float32 a)
+{
+ return (a >> 23) & 0xFF;
+}
+
+flag extractFloat32Sign(float32 a)
+{
+ return a >> 31;
+}
+
+bits32 extractFloat32Frac(float32 a)
+{
+ return a & 0x007FFFFF;
+}
+
+float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
+}
+
+void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
+{
+ bits64 z;
+
+ if (count == 0) {
+ z = a;
+ } else if (count < 64) {
+ z = (a >> count) | ((a << ((-count) & 63)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static int8 countLeadingZeros32(bits32 a)
+{
+ static const int8 countLeadingZerosHigh[] = {
+ 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < 0x10000) {
+ shiftCount += 16;
+ a <<= 16;
+ }
+ if (a < 0x1000000) {
+ shiftCount += 8;
+ a <<= 8;
+ }
+ shiftCount += countLeadingZerosHigh[a >> 24];
+ return shiftCount;
+
+}
+
+static int8 countLeadingZeros64(bits64 a)
+{
+ int8 shiftCount;
+
+ shiftCount = 0;
+ if (a < ((bits64) 1) << 32) {
+ shiftCount += 32;
+ } else {
+ a >>= 32;
+ }
+ shiftCount += countLeadingZeros32(a);
+ return shiftCount;
+
+}
+
+static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(zSig) - 1;
+ return roundAndPackFloat64(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+
+}
+
+static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 10;
+ bSig <<= 10;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign ^ 1, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ bSig |= LIT64(0x4000000000000000);
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x4000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ aSig |= LIT64(0x4000000000000000);
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
+
+}
+static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 9;
+ bSig <<= 9;
+ if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= LIT64(0x2000000000000000);
+ }
+ shift64RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0x7FF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat64(zSign, 0, (aSig + bSig) >> 9);
+ zSig = LIT64(0x4000000000000000) + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= LIT64(0x2000000000000000);
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits64) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
+}
+
+void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
+{
+ bits32 z;
+ if (count == 0) {
+ z = a;
+ } else if (count < 32) {
+ z = (a >> count) | ((a << ((-count) & 31)) != 0);
+ } else {
+ z = (a != 0);
+ }
+ *zPtr = z;
+}
+
+static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ flag roundNearestEven;
+ int8 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x7F;
+ if (0xFD <= (bits16) zExp) {
+ if ((0xFD < zExp)
+ || ((zExp == 0xFD)
+ && ((sbits32) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat32(zSign, 0xFF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement < 0x80000000);
+ shift32RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x7F;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 7;
+ zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(zSig) - 1;
+ return roundAndPackFloat32(zSign, zExp - shiftCount,
+ zSig << shiftCount);
+}
+
+static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
+{
+ flag roundNearestEven;
+ int16 roundIncrement, roundBits;
+ flag isTiny;
+
+ /* SH4 has only 2 rounding modes - round to nearest and round to zero */
+ roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
+ roundIncrement = 0x200;
+ if (!roundNearestEven) {
+ roundIncrement = 0;
+ }
+ roundBits = zSig & 0x3FF;
+ if (0x7FD <= (bits16) zExp) {
+ if ((0x7FD < zExp)
+ || ((zExp == 0x7FD)
+ && ((sbits64) (zSig + roundIncrement) < 0))
+ ) {
+ float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
+ return packFloat64(zSign, 0x7FF,
+ 0) - (roundIncrement == 0);
+ }
+ if (zExp < 0) {
+ isTiny = (zExp < -1)
+ || (zSig + roundIncrement <
+ LIT64(0x8000000000000000));
+ shift64RightJamming(zSig, -zExp, &zSig);
+ zExp = 0;
+ roundBits = zSig & 0x3FF;
+ if (isTiny && roundBits)
+ float_raise(FPSCR_CAUSE_UNDERFLOW);
+ }
+ }
+ if (roundBits)
+ float_raise(FPSCR_CAUSE_INEXACT);
+ zSig = (zSig + roundIncrement) >> 10;
+ zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
+ if (zSig == 0)
+ zExp = 0;
+ return packFloat64(zSign, zExp, zSig);
+
+}
+
+static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 7;
+ bSig <<= 7;
+ if (0 < expDiff)
+ goto aExpBigger;
+ if (expDiff < 0)
+ goto bExpBigger;
+ if (aExp == 0) {
+ aExp = 1;
+ bExp = 1;
+ }
+ if (bSig < aSig)
+ goto aBigger;
+ if (aSig < bSig)
+ goto bBigger;
+ return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
+ bExpBigger:
+ if (bExp == 0xFF) {
+ return packFloat32(zSign ^ 1, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x40000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ bSig |= 0x40000000;
+ bBigger:
+ zSig = bSig - aSig;
+ zExp = bExp;
+ zSign ^= 1;
+ goto normalizeRoundAndPack;
+ aExpBigger:
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x40000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ aSig |= 0x40000000;
+ aBigger:
+ zSig = aSig - bSig;
+ zExp = aExp;
+ normalizeRoundAndPack:
+ --zExp;
+ return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
+{
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig, zSig;
+ int16 expDiff;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ expDiff = aExp - bExp;
+ aSig <<= 6;
+ bSig <<= 6;
+ if (0 < expDiff) {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (bExp == 0) {
+ --expDiff;
+ } else {
+ bSig |= 0x20000000;
+ }
+ shift32RightJamming(bSig, expDiff, &bSig);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (aExp == 0) {
+ ++expDiff;
+ } else {
+ aSig |= 0x20000000;
+ }
+ shift32RightJamming(aSig, -expDiff, &aSig);
+ zExp = bExp;
+ } else {
+ if (aExp == 0xFF) {
+ return a;
+ }
+ if (aExp == 0)
+ return packFloat32(zSign, 0, (aSig + bSig) >> 6);
+ zSig = 0x40000000 + aSig + bSig;
+ zExp = aExp;
+ goto roundAndPack;
+ }
+ aSig |= 0x20000000;
+ zSig = (aSig + bSig) << 1;
+ --zExp;
+ if ((sbits32) zSig < 0) {
+ zSig = aSig + bSig;
+ ++zExp;
+ }
+ roundAndPack:
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_sub(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return subFloat64Sigs(a, b, aSign);
+ } else {
+ return addFloat64Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_sub(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return subFloat32Sigs(a, b, aSign);
+ } else {
+ return addFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float32 float32_add(float32 a, float32 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat32Sign(a);
+ bSign = extractFloat32Sign(b);
+ if (aSign == bSign) {
+ return addFloat32Sigs(a, b, aSign);
+ } else {
+ return subFloat32Sigs(a, b, aSign);
+ }
+
+}
+
+float64 float64_add(float64 a, float64 b)
+{
+ flag aSign, bSign;
+
+ aSign = extractFloat64Sign(a);
+ bSign = extractFloat64Sign(b);
+ if (aSign == bSign) {
+ return addFloat64Sigs(a, b, aSign);
+ } else {
+ return subFloat64Sigs(a, b, aSign);
+ }
+}
+
+static void
+normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros64(aSig) - 11;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+}
+
+void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ bits64 z1;
+
+ z1 = a1 + b1;
+ *z1Ptr = z1;
+ *z0Ptr = a0 + b0 + (z1 < a1);
+}
+
+void
+sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+ bits64 * z1Ptr)
+{
+ *z1Ptr = a1 - b1;
+ *z0Ptr = a0 - b0 - (a1 < b1);
+}
+
+static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
+{
+ bits64 b0, b1;
+ bits64 rem0, rem1, term0, term1;
+ bits64 z, tmp;
+ if (b <= a0)
+ return LIT64(0xFFFFFFFFFFFFFFFF);
+ b0 = b >> 32;
+ tmp = a0;
+ do_div(tmp, b0);
+
+ z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32;
+ mul64To128(b, z, &term0, &term1);
+ sub128(a0, a1, term0, term1, &rem0, &rem1);
+ while (((sbits64) rem0) < 0) {
+ z -= LIT64(0x100000000);
+ b1 = b << 32;
+ add128(rem0, rem1, b0, b1, &rem0, &rem1);
+ }
+ rem0 = (rem0 << 32) | (rem1 >> 32);
+ tmp = rem0;
+ do_div(tmp, b0);
+ z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp;
+ return z;
+}
+
+void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
+{
+ bits32 aHigh, aLow, bHigh, bLow;
+ bits64 z0, zMiddleA, zMiddleB, z1;
+
+ aLow = a;
+ aHigh = a >> 32;
+ bLow = b;
+ bHigh = b >> 32;
+ z1 = ((bits64) aLow) * bLow;
+ zMiddleA = ((bits64) aLow) * bHigh;
+ zMiddleB = ((bits64) aHigh) * bLow;
+ z0 = ((bits64) aHigh) * bHigh;
+ zMiddleA += zMiddleB;
+ z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
+ zMiddleA <<= 32;
+ z1 += zMiddleA;
+ z0 += (z1 < zMiddleA);
+ *z1Ptr = z1;
+ *z0Ptr = z0;
+
+}
+
+static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
+ bits32 * zSigPtr)
+{
+ int8 shiftCount;
+
+ shiftCount = countLeadingZeros32(aSig) - 8;
+ *zSigPtr = aSig << shiftCount;
+ *zExpPtr = 1 - shiftCount;
+
+}
+
+float64 float64_div(float64 a, float64 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits64 aSig, bSig, zSig;
+ bits64 rem0, rem1;
+ bits64 term0, term1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (bExp == 0x7FF) {
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ if (bExp == 0x7FF) {
+ return packFloat64(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ if ((aExp | aSig) == 0) {
+ float_raise(FPSCR_CAUSE_INVALID);
+ }
+ return packFloat64(zSign, 0x7FF, 0);
+ }
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x3FD;
+ aSig = (aSig | LIT64(0x0010000000000000)) << 10;
+ bSig = (bSig | LIT64(0x0010000000000000)) << 11;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = estimateDiv128To64(aSig, 0, bSig);
+ if ((zSig & 0x1FF) <= 2) {
+ mul64To128(bSig, zSig, &term0, &term1);
+ sub128(aSig, 0, term0, term1, &rem0, &rem1);
+ while ((sbits64) rem0 < 0) {
+ --zSig;
+ add128(rem0, rem1, 0, bSig, &rem0, &rem1);
+ }
+ zSig |= (rem1 != 0);
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig);
+
+}
+
+float32 float32_div(float32 a, float32 b)
+{
+ flag aSign, bSign, zSign;
+ int16 aExp, bExp, zExp;
+ bits32 aSig, bSig;
+ uint64_t zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0xFF) {
+ if (bExp == 0xFF) {
+ }
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ if (bExp == 0xFF) {
+ return packFloat32(zSign, 0, 0);
+ }
+ if (bExp == 0) {
+ if (bSig == 0) {
+ return packFloat32(zSign, 0xFF, 0);
+ }
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ zExp = aExp - bExp + 0x7D;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ if (bSig <= (aSig + aSig)) {
+ aSig >>= 1;
+ ++zExp;
+ }
+ zSig = (((bits64) aSig) << 32);
+ do_div(zSig, bSig);
+
+ if ((zSig & 0x3F) == 0) {
+ zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
+ }
+ return roundAndPackFloat32(zSign, zExp, (bits32)zSig);
+
+}
+
+float32 float32_mul(float32 a, float32 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned int aSig, bSig;
+ unsigned long long zSig64;
+ unsigned int zSig;
+
+ aSig = extractFloat32Frac(a);
+ aExp = extractFloat32Exp(a);
+ aSign = extractFloat32Sign(a);
+ bSig = extractFloat32Frac(b);
+ bExp = extractFloat32Exp(b);
+ bSign = extractFloat32Sign(b);
+ zSign = aSign ^ bSign;
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat32(zSign, 0, 0);
+ normalizeFloat32Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
+ return roundAndPackFloat32(zSign, 0xff, 0);
+
+ zExp = aExp + bExp - 0x7F;
+ aSig = (aSig | 0x00800000) << 7;
+ bSig = (bSig | 0x00800000) << 8;
+ shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
+ zSig = zSig64;
+ if (0 <= (signed int)(zSig << 1)) {
+ zSig <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat32(zSign, zExp, zSig);
+
+}
+
+float64 float64_mul(float64 a, float64 b)
+{
+ char aSign, bSign, zSign;
+ int aExp, bExp, zExp;
+ unsigned long long int aSig, bSig, zSig0, zSig1;
+
+ aSig = extractFloat64Frac(a);
+ aExp = extractFloat64Exp(a);
+ aSign = extractFloat64Sign(a);
+ bSig = extractFloat64Frac(b);
+ bExp = extractFloat64Exp(b);
+ bSign = extractFloat64Sign(b);
+ zSign = aSign ^ bSign;
+
+ if (aExp == 0) {
+ if (aSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(aSig, &aExp, &aSig);
+ }
+ if (bExp == 0) {
+ if (bSig == 0)
+ return packFloat64(zSign, 0, 0);
+ normalizeFloat64Subnormal(bSig, &bExp, &bSig);
+ }
+ if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
+ return roundAndPackFloat64(zSign, 0x7ff, 0);
+
+ zExp = aExp + bExp - 0x3FF;
+ aSig = (aSig | 0x0010000000000000LL) << 10;
+ bSig = (bSig | 0x0010000000000000LL) << 11;
+ mul64To128(aSig, bSig, &zSig0, &zSig1);
+ zSig0 |= (zSig1 != 0);
+ if (0 <= (signed long long int)(zSig0 << 1)) {
+ zSig0 <<= 1;
+ --zExp;
+ }
+ return roundAndPackFloat64(zSign, zExp, zSig0);
+}
+
+/*
+ * -------------------------------------------------------------------------------
+ * Returns the result of converting the double-precision floating-point value
+ * `a' to the single-precision floating-point format. The conversion is
+ * performed according to the IEC/IEEE Standard for Binary Floating-point
+ * Arithmetic.
+ * -------------------------------------------------------------------------------
+ * */
+float32 float64_to_float32(float64 a)
+{
+ flag aSign;
+ int16 aExp;
+ bits64 aSig;
+ bits32 zSig;
+
+ aSig = extractFloat64Frac( a );
+ aExp = extractFloat64Exp( a );
+ aSign = extractFloat64Sign( a );
+
+ shift64RightJamming( aSig, 22, &aSig );
+ zSig = aSig;
+ if ( aExp || zSig ) {
+ zSig |= 0x40000000;
+ aExp -= 0x381;
+ }
+ return roundAndPackFloat32(aSign, aExp, zSig);
+}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
new file mode 100644
index 00000000..0a47bd3e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -0,0 +1,416 @@
+/*
+ * arch/sh/kernel/cpu/sh4/sq.c
+ *
+ * General management API for SH-4 integrated Store Queues
+ *
+ * Copyright (C) 2001 - 2006 Paul Mundt
+ * Copyright (C) 2001, 2002 M. R. Brown
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <cpu/sq.h>
+
+struct sq_mapping;
+
+struct sq_mapping {
+ const char *name;
+
+ unsigned long sq_addr;
+ unsigned long addr;
+ unsigned int size;
+
+ struct sq_mapping *next;
+};
+
+static struct sq_mapping *sq_mapping_list;
+static DEFINE_SPINLOCK(sq_mapping_lock);
+static struct kmem_cache *sq_cache;
+static unsigned long *sq_bitmap;
+
+#define store_queue_barrier() \
+do { \
+ (void)__raw_readl(P4SEG_STORE_QUE); \
+ __raw_writel(0, P4SEG_STORE_QUE + 0); \
+ __raw_writel(0, P4SEG_STORE_QUE + 8); \
+} while (0);
+
+/**
+ * sq_flush_range - Flush (prefetch) a specific SQ range
+ * @start: the store queue address to start flushing from
+ * @len: the length to flush
+ *
+ * Flushes the store queue cache from @start to @start + @len in a
+ * linear fashion.
+ */
+void sq_flush_range(unsigned long start, unsigned int len)
+{
+ unsigned long *sq = (unsigned long *)start;
+
+ /* Flush the queues */
+ for (len >>= 5; len--; sq += 8)
+ prefetchw(sq);
+
+ /* Wait for completion */
+ store_queue_barrier();
+}
+EXPORT_SYMBOL(sq_flush_range);
+
+static inline void sq_mapping_list_add(struct sq_mapping *map)
+{
+ struct sq_mapping **p, *tmp;
+
+ spin_lock_irq(&sq_mapping_lock);
+
+ p = &sq_mapping_list;
+ while ((tmp = *p) != NULL)
+ p = &tmp->next;
+
+ map->next = tmp;
+ *p = map;
+
+ spin_unlock_irq(&sq_mapping_lock);
+}
+
+static inline void sq_mapping_list_del(struct sq_mapping *map)
+{
+ struct sq_mapping **p, *tmp;
+
+ spin_lock_irq(&sq_mapping_lock);
+
+ for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
+ if (tmp == map) {
+ *p = tmp->next;
+ break;
+ }
+
+ spin_unlock_irq(&sq_mapping_lock);
+}
+
+static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
+{
+#if defined(CONFIG_MMU)
+ struct vm_struct *vma;
+
+ vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
+ if (!vma)
+ return -ENOMEM;
+
+ vma->phys_addr = map->addr;
+
+ if (ioremap_page_range((unsigned long)vma->addr,
+ (unsigned long)vma->addr + map->size,
+ vma->phys_addr, prot)) {
+ vunmap(vma->addr);
+ return -EAGAIN;
+ }
+#else
+ /*
+ * Without an MMU (or with it turned off), this is much more
+ * straightforward, as we can just load up each queue's QACR with
+ * the physical address appropriately masked.
+ */
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
+#endif
+
+ return 0;
+}
+
+/**
+ * sq_remap - Map a physical address through the Store Queues
+ * @phys: Physical address of mapping.
+ * @size: Length of mapping.
+ * @name: User invoking mapping.
+ * @prot: Protection bits.
+ *
+ * Remaps the physical address @phys through the next available store queue
+ * address of @size length. @name is logged at boot time as well as through
+ * the sysfs interface.
+ */
+unsigned long sq_remap(unsigned long phys, unsigned int size,
+ const char *name, pgprot_t prot)
+{
+ struct sq_mapping *map;
+ unsigned long end;
+ unsigned int psz;
+ int ret, page;
+
+ /* Don't allow wraparound or zero size */
+ end = phys + size - 1;
+ if (unlikely(!size || end < phys))
+ return -EINVAL;
+ /* Don't allow anyone to remap normal memory.. */
+ if (unlikely(phys < virt_to_phys(high_memory)))
+ return -EINVAL;
+
+ phys &= PAGE_MASK;
+ size = PAGE_ALIGN(end + 1) - phys;
+
+ map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
+ if (unlikely(!map))
+ return -ENOMEM;
+
+ map->addr = phys;
+ map->size = size;
+ map->name = name;
+
+ page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
+ get_order(map->size));
+ if (unlikely(page < 0)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
+
+ ret = __sq_remap(map, prot);
+ if (unlikely(ret != 0))
+ goto out;
+
+ psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
+ likely(map->name) ? map->name : "???",
+ psz, psz == 1 ? " " : "s",
+ map->sq_addr, map->addr);
+
+ sq_mapping_list_add(map);
+
+ return map->sq_addr;
+
+out:
+ kmem_cache_free(sq_cache, map);
+ return ret;
+}
+EXPORT_SYMBOL(sq_remap);
+
+/**
+ * sq_unmap - Unmap a Store Queue allocation
+ * @vaddr: Pre-allocated Store Queue mapping.
+ *
+ * Unmaps the store queue allocation @map that was previously created by
+ * sq_remap(). Also frees up the pte that was previously inserted into
+ * the kernel page table and discards the UTLB translation.
+ */
+void sq_unmap(unsigned long vaddr)
+{
+ struct sq_mapping **p, *map;
+ int page;
+
+ for (p = &sq_mapping_list; (map = *p); p = &map->next)
+ if (map->sq_addr == vaddr)
+ break;
+
+ if (unlikely(!map)) {
+ printk("%s: bad store queue address 0x%08lx\n",
+ __func__, vaddr);
+ return;
+ }
+
+ page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
+ bitmap_release_region(sq_bitmap, page, get_order(map->size));
+
+#ifdef CONFIG_MMU
+ {
+ /*
+ * Tear down the VMA in the MMU case.
+ */
+ struct vm_struct *vma;
+
+ vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
+ if (!vma) {
+ printk(KERN_ERR "%s: bad address 0x%08lx\n",
+ __func__, map->sq_addr);
+ return;
+ }
+ }
+#endif
+
+ sq_mapping_list_del(map);
+
+ kmem_cache_free(sq_cache, map);
+}
+EXPORT_SYMBOL(sq_unmap);
+
+/*
+ * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
+ * there is any other easy way to add things on a per-cpu basis without
+ * putting the directory entries somewhere stupid and having to create
+ * links in sysfs by hand back in to the per-cpu directories.
+ *
+ * Some day we may want to have an additional abstraction per store
+ * queue, but considering the kobject hell we already have to deal with,
+ * it's simply not worth the trouble.
+ */
+static struct kobject *sq_kobject[NR_CPUS];
+
+struct sq_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(char *buf);
+ ssize_t (*store)(const char *buf, size_t count);
+};
+
+#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
+
+static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
+
+ if (likely(sattr->show))
+ return sattr->show(buf);
+
+ return -EIO;
+}
+
+static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
+
+ if (likely(sattr->store))
+ return sattr->store(buf, count);
+
+ return -EIO;
+}
+
+static ssize_t mapping_show(char *buf)
+{
+ struct sq_mapping **list, *entry;
+ char *p = buf;
+
+ for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
+ p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
+ entry->sq_addr, entry->sq_addr + entry->size,
+ entry->addr, entry->name);
+
+ return p - buf;
+}
+
+static ssize_t mapping_store(const char *buf, size_t count)
+{
+ unsigned long base = 0, len = 0;
+
+ sscanf(buf, "%lx %lx", &base, &len);
+ if (!base)
+ return -EIO;
+
+ if (likely(len)) {
+ int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
+ if (ret < 0)
+ return ret;
+ } else
+ sq_unmap(base);
+
+ return count;
+}
+
+static struct sq_sysfs_attr mapping_attr =
+ __ATTR(mapping, 0644, mapping_show, mapping_store);
+
+static struct attribute *sq_sysfs_attrs[] = {
+ &mapping_attr.attr,
+ NULL,
+};
+
+static const struct sysfs_ops sq_sysfs_ops = {
+ .show = sq_sysfs_show,
+ .store = sq_sysfs_store,
+};
+
+static struct kobj_type ktype_percpu_entry = {
+ .sysfs_ops = &sq_sysfs_ops,
+ .default_attrs = sq_sysfs_attrs,
+};
+
+static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id;
+ struct kobject *kobj;
+ int error;
+
+ sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (unlikely(!sq_kobject[cpu]))
+ return -ENOMEM;
+
+ kobj = sq_kobject[cpu];
+ error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
+ "%s", "sq");
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ return error;
+}
+
+static int sq_dev_remove(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id;
+ struct kobject *kobj = sq_kobject[cpu];
+
+ kobject_put(kobj);
+ return 0;
+}
+
+static struct subsys_interface sq_interface = {
+ .name = "sq",
+ .subsys = &cpu_subsys,
+ .add_dev = sq_dev_add,
+ .remove_dev = sq_dev_remove,
+};
+
+static int __init sq_api_init(void)
+{
+ unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
+ unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
+ int ret = -ENOMEM;
+
+ printk(KERN_NOTICE "sq: Registering store queue API.\n");
+
+ sq_cache = kmem_cache_create("store_queue_cache",
+ sizeof(struct sq_mapping), 0, 0, NULL);
+ if (unlikely(!sq_cache))
+ return ret;
+
+ sq_bitmap = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!sq_bitmap))
+ goto out;
+
+ ret = subsys_interface_register(&sq_interface);
+ if (unlikely(ret != 0))
+ goto out;
+
+ return 0;
+
+out:
+ kfree(sq_bitmap);
+ kmem_cache_destroy(sq_cache);
+
+ return ret;
+}
+
+static void __exit sq_api_exit(void)
+{
+ subsys_interface_unregister(&sq_interface);
+ kfree(sq_bitmap);
+ kmem_cache_destroy(sq_cache);
+}
+
+module_init(sq_api_init);
+module_exit(sq_api_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
+MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
+MODULE_LICENSE("GPL");
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
new file mode 100644
index 00000000..0b22d108
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -0,0 +1,49 @@
+#
+# Makefile for the Linux/SuperH SH-4 backends.
+#
+
+# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o serial-sh7722.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
+obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o intc-shx3.o
+
+# SMP setup
+smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
+
+# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
+clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
+
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o
+
+obj-y += $(clock-y)
+obj-$(CONFIG_SMP) += $(smp-y)
+obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
new file mode 100644
index 00000000..ea01a72f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -0,0 +1,289 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+ *
+ * SH7343 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+/* SH7343 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
+ DIV4_SIUA, DIV4_SIUB, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
+
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
+ MSTP109, MSTP108, MSTP100,
+ MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
+ MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
+ MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+
+ [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
+ [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
+ [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
+ [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP004]),
+
+ CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
+ CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
new file mode 100644
index 00000000..7ac07b4f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -0,0 +1,282 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+ *
+ * SH7366 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+/* SH7366 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
+ DIV4_SIUA, DIV4_SIUB, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
+
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP002, MSTP001,
+ MSTP109, MSTP100,
+ MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217,
+ MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+
+ [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
+ [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0),
+ [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+
+ CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
+ CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
+ CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
new file mode 100644
index 00000000..8e1f9701
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -0,0 +1,267 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+ *
+ * SH7722 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
+#include <asm/clock.h>
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
+
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ [HWBLK_URAM] = SH_CLK_MSTP32(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_XYMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [HWBLK_TMU] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+
+ [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
+
+ [HWBLK_SDHI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
+ [HWBLK_USBF] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
+ [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[HWBLK_TMU]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[HWBLK_TMU]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[HWBLK_TMU]),
+
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
new file mode 100644
index 00000000..35f75cf0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -0,0 +1,317 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+ *
+ * SH7723 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
+#include <asm/clock.h>
+#include <cpu/sh7723.h>
+
+/* SH7723 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0),
+};
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+static struct clk mstp_clks[] = {
+ /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
+ [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
+ [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
+ [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
+ [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
+ [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
+ [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
+ [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
+ [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
+ [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
+ [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
+ [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
+ [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
+ [HWBLK_MERAM] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 0, 0),
+
+ [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
+
+ [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 28, 0),
+ [HWBLK_ADC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
+ [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
+ [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
+ [HWBLK_ICB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, CLK_ENABLE_ON_INIT),
+ [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
+ [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
+ [HWBLK_USB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 11, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 10, 0),
+ [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [HWBLK_VEU2H1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU2H0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[HWBLK_MERAM]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[HWBLK_TMU1]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
+
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret |= clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
new file mode 100644
index 00000000..2a879016
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -0,0 +1,381 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+ *
+ * SH7724 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
+#include <asm/clock.h>
+#include <cpu/sh7724.h>
+
+/* SH7724 registers */
+#define FRQCRA 0xa4150000
+#define FRQCRB 0xa4150004
+#define VCLKCR 0xa4150048
+#define FCLKACR 0xa4150008
+#define FCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define SPUCLKCR 0xa415003c
+#define FLLFRQ 0xa4150050
+#define LSTATS 0xa4150060
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The fll multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long fll_recalc(struct clk *clk)
+{
+ unsigned long mult = 0;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(FLLFRQ) & 0x3ff;
+
+ if (__raw_readl(FLLFRQ) & 0x4000)
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops fll_clk_ops = {
+ .recalc = fll_recalc,
+};
+
+static struct clk fll_clk = {
+ .ops = &fll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+/* A fixed divide-by-3 block use by the div6 clocks */
+static unsigned long div3_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 3;
+}
+
+static struct sh_clk_ops div3_clk_ops = {
+ .recalc = div3_recalc,
+};
+
+static struct clk div3_clk = {
+ .ops = &div3_clk_ops,
+ .parent = &pll_clk,
+};
+
+/* External input clock (pin name: FSIMCKA/FSIMCKB/DV_CLKI ) */
+struct clk sh7724_fsimcka_clk = {
+};
+
+struct clk sh7724_fsimckb_clk = {
+};
+
+struct clk sh7724_dv_clki = {
+};
+
+static struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &fll_clk,
+ &pll_clk,
+ &div3_clk,
+ &sh7724_fsimcka_clk,
+ &sh7724_fsimckb_clk,
+ &sh7724_dv_clki,
+};
+
+static void div4_kick(struct clk *clk)
+{
+ unsigned long value;
+
+ /* set KICK bit in FRQCRA to update hardware setting */
+ value = __raw_readl(FRQCRA);
+ value |= (1 << 31);
+ __raw_writel(value, FRQCRA);
+}
+
+static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+ .kick = div4_kick,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0),
+ [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
+};
+
+enum { DIV6_V, DIV6_I, DIV6_S, DIV6_FA, DIV6_FB, DIV6_NR };
+
+/* Indices are important - they are the actual src selecting values */
+static struct clk *common_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+};
+
+static struct clk *vclkcr_parent[8] = {
+ [0] = &div3_clk,
+ [2] = &sh7724_dv_clki,
+ [4] = &extal_clk,
+};
+
+static struct clk *fclkacr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimcka_clk,
+ [3] = NULL,
+};
+
+static struct clk *fclkbcr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimckb_clk,
+ [3] = NULL,
+};
+
+static struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6_EXT(VCLKCR, 0,
+ vclkcr_parent, ARRAY_SIZE(vclkcr_parent), 12, 3),
+ [DIV6_I] = SH_CLK_DIV6_EXT(IRDACLKCR, 0,
+ common_parent, ARRAY_SIZE(common_parent), 6, 1),
+ [DIV6_S] = SH_CLK_DIV6_EXT(SPUCLKCR, CLK_ENABLE_ON_INIT,
+ common_parent, ARRAY_SIZE(common_parent), 6, 1),
+ [DIV6_FA] = SH_CLK_DIV6_EXT(FCLKACR, 0,
+ fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
+ [DIV6_FB] = SH_CLK_DIV6_EXT(FCLKBCR, 0,
+ fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [HWBLK_RSMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
+ [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
+ [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
+ [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
+ [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
+ [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
+ [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
+ [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
+ [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
+ [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
+ [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
+ [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
+
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 12, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 11, 0),
+ [HWBLK_IIC0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_IIC1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+
+ [HWBLK_MMC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 29, 0),
+ [HWBLK_ETHER] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 28, 0),
+ [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 26, 0),
+ [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
+ [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
+ [HWBLK_USB1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, 0),
+ [HWBLK_USB0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 20, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 19, 0),
+ [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
+ [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
+ [HWBLK_VEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 15, 0),
+ [HWBLK_CEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 13, 0),
+ [HWBLK_BEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 12, 0),
+ [HWBLK_2DDMAC] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 10, 0),
+ [HWBLK_SPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
+ [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("fll_clk", &fll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+ CLKDEV_CON_ID("div3_clk", &div3_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+ CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
+ CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
+ CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[HWBLK_TMU1]),
+
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
+ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
+ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
+
+ CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]),
+ CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]),
+ CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[HWBLK_ETHER]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[HWBLK_CEU1]),
+ CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
+ CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
+ CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU0]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or fll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &fll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
new file mode 100644
index 00000000..04ab5aea
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -0,0 +1,155 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-sh7757.c
+ *
+ * SH7757 support for the clock framework
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 48000000,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int multiplier;
+
+ multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16;
+
+ return clk->parent->rate * multiplier;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6,
+ 1, 1, 1, 16, 1, 24, 1, 1 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ /*
+ * P clock is always enable, because some P clock modules is used
+ * by Host PC.
+ */
+ [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc80030
+#define MSTPCR1 0xffc80034
+#define MSTPCR2 0xffc10028
+
+enum { MSTP004, MSTP000, MSTP127, MSTP114, MSTP113, MSTP112,
+ MSTP111, MSTP110, MSTP103, MSTP102, MSTP220,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 27, 0),
+ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
+ [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
+ [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
+ [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
+ [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
+ [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
+
+ /* MSTPCR2 */
+ [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic3", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic4", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic5", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic6", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic7", &mstp_clks[MSTP000]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP113]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP114]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]),
+
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]),
+ CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]),
+ CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
+ CLKDEV_CON_ID("rspi2", &mstp_clks[MSTP127]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
+
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
new file mode 100644
index 00000000..7707e35a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -0,0 +1,119 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+ *
+ * SH7763 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
+static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
+}
+
+static struct sh_clk_ops sh7763_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
+ return clk->parent->rate / p0fc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7763_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
+ return clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7763_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static struct sh_clk_ops sh7763_cpu_clk_ops = {
+ .recalc = followparent_recalc,
+};
+
+static struct sh_clk_ops *sh7763_clk_ops[] = {
+ &sh7763_master_clk_ops,
+ &sh7763_module_clk_ops,
+ &sh7763_bus_clk_ops,
+ &sh7763_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7763_clk_ops))
+ *ops = sh7763_clk_ops[idx];
+}
+
+static unsigned long shyway_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
+ return clk->parent->rate / cfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7763_shyway_clk_ops = {
+ .recalc = shyway_clk_recalc,
+};
+
+static struct clk sh7763_shyway_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh7763_shyway_clk_ops,
+};
+
+/*
+ * Additional SH7763-specific on-chip clocks that aren't already part of the
+ * clock framework
+ */
+static struct clk *sh7763_onchip_clocks[] = {
+ &sh7763_shyway_clk,
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
+};
+
+int __init arch_clk_init(void)
+{
+ struct clk *clk;
+ int i, ret = 0;
+
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
+ for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
+ struct clk *clkp = sh7763_onchip_clocks[i];
+
+ clkp->parent = clk;
+ ret |= clk_register(clkp);
+ }
+
+ clk_put(clk);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
new file mode 100644
index 00000000..5d36f334
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -0,0 +1,73 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+ *
+ * SH7770 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int ifc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 1 };
+static int bfc_divisors[] = { 1, 1, 1, 1, 1, 8,12, 1 };
+static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
+}
+
+static struct sh_clk_ops sh7770_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7770_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readl(FRQCR) & 0x000f);
+ return clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7770_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7770_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7770_clk_ops[] = {
+ &sh7770_master_clk_ops,
+ &sh7770_module_clk_ops,
+ &sh7770_bus_clk_ops,
+ &sh7770_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7770_clk_ops))
+ *ops = sh7770_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
new file mode 100644
index 00000000..793dae42
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -0,0 +1,125 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+ *
+ * SH7780 support for the clock framework
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int ifc_divisors[] = { 2, 4 };
+static int bfc_divisors[] = { 1, 1, 1, 8, 12, 16, 24, 1 };
+static int pfc_divisors[] = { 1, 24, 24, 1 };
+static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
+}
+
+static struct sh_clk_ops sh7780_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readl(FRQCR) & 0x0003);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7780_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
+ return clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7780_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7780_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7780_clk_ops[] = {
+ &sh7780_master_clk_ops,
+ &sh7780_module_clk_ops,
+ &sh7780_bus_clk_ops,
+ &sh7780_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7780_clk_ops))
+ *ops = sh7780_clk_ops[idx];
+}
+
+static unsigned long shyway_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
+ return clk->parent->rate / cfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7780_shyway_clk_ops = {
+ .recalc = shyway_clk_recalc,
+};
+
+static struct clk sh7780_shyway_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh7780_shyway_clk_ops,
+};
+
+/*
+ * Additional SH7780-specific on-chip clocks that aren't already part of the
+ * clock framework
+ */
+static struct clk *sh7780_onchip_clocks[] = {
+ &sh7780_shyway_clk,
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
+};
+
+int __init arch_clk_init(void)
+{
+ struct clk *clk;
+ int i, ret = 0;
+
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
+ for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
+ struct clk *clkp = sh7780_onchip_clocks[i];
+
+ clkp->parent = clk;
+ ret |= clk_register(clkp);
+ }
+
+ clk_put(clk);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
new file mode 100644
index 00000000..ab1c58f2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -0,0 +1,181 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+ *
+ * SH7785 support for the clock framework
+ *
+ * Copyright (C) 2007 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <cpu/sh7785.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int multiplier;
+
+ multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72;
+
+ return clk->parent->rate * multiplier;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
+ DIV4_DU, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_DU] = DIV4(4, 0x0ff0, 0),
+ [DIV4_GA] = DIV4(8, 0x0030, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc80030
+#define MSTPCR1 0xffc80034
+
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP119, MSTP117, MSTP105, MSTP104, MSTP100,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0),
+ [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+ /* MSTPCR1 */
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+
+ CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP117]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
new file mode 100644
index 00000000..49170948
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+ *
+ * SH7786 support for the clock framework
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int multiplier;
+
+ /*
+ * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
+ * while modes 3, 4, and 5 use an x32.
+ */
+ multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
+
+ return clk->parent->rate * multiplier;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0b40, 0),
+ [DIV4_DU] = DIV4(4, 0x0010, 0),
+ [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc40030
+#define MSTPCR1 0xffc40034
+
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
+ MSTP005, MSTP004, MSTP002,
+ MSTP112, MSTP110, MSTP109, MSTP108,
+ MSTP105, MSTP104, MSTP103, MSTP102,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
+ [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+ /* MSTPCR1 */
+ [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
+ [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
+ [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
+ [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.6", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.7", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.8", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.9", &mstp_clks[MSTP011]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.10", &mstp_clks[MSTP011]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.11", &mstp_clks[MSTP011]),
+
+ CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
+ CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
+ CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
+ CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
+ CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
new file mode 100644
index 00000000..0f11b392
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -0,0 +1,155 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-shx3.c
+ *
+ * SH-X3 support for the clock framework
+ *
+ * Copyright (C) 2006-2007 Renesas Technology Corp.
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ * Copyright (C) 2006-2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 16666666,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ /* PLL1 has a fixed x72 multiplier. */
+ return clk->parent->rate * 72;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_SHA] = DIV4(4, 0x0ff0, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc00030
+#define MSTPCR1 0xffc00034
+
+enum { MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP001, MSTP000, MSTP119, MSTP105,
+ MSTP104, MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
+ CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
new file mode 100644
index 00000000..78c97148
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
@@ -0,0 +1,34 @@
+/*
+ * Shared support for SH-X3 interrupt controllers.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/init.h>
+
+#define INTACK 0xfe4100b8
+#define INTACKCLR 0xfe4100bc
+#define INTC_USERIMASK 0xfe411000
+
+#ifdef CONFIG_INTC_BALANCING
+unsigned int irq_lookup(unsigned int irq)
+{
+ return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
+}
+
+void irq_finish(unsigned int irq)
+{
+ __raw_writel(irq2evt(irq), INTACKCLR);
+}
+#endif
+
+static int __init shx3_irq_setup(void)
+{
+ return register_intc_userimask(INTC_USERIMASK);
+}
+arch_initcall(shx3_irq_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
new file mode 100644
index 00000000..84a2c396
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -0,0 +1,302 @@
+/*
+ * Performance events support for SH-4A performance counters
+ *
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
+#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
+
+#define CCBR_CIT_MASK (0x7ff << 6)
+#define CCBR_DUC (1 << 3)
+#define CCBR_CMDS (1 << 1)
+#define CCBR_PPCE (1 << 0)
+
+#ifdef CONFIG_CPU_SHX3
+/*
+ * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
+ * and PMCTR locations remains tentatively constant. This change remains
+ * wholly undocumented, and was simply found through trial and error.
+ *
+ * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
+ * it's unclear when this ceased to be the case. For now we always use
+ * the new location (if future parts keep up with this trend then
+ * scanning for them at runtime also remains a viable option.)
+ *
+ * The gap in the register space also suggests that there are other
+ * undocumented counters, so this will need to be revisited at a later
+ * point in time.
+ */
+#define PPC_PMCAT 0xfc100240
+#else
+#define PPC_PMCAT 0xfc100080
+#endif
+
+#define PMCAT_OVF3 (1 << 27)
+#define PMCAT_CNN3 (1 << 26)
+#define PMCAT_CLR3 (1 << 25)
+#define PMCAT_OVF2 (1 << 19)
+#define PMCAT_CLR2 (1 << 17)
+#define PMCAT_OVF1 (1 << 11)
+#define PMCAT_CNN1 (1 << 10)
+#define PMCAT_CLR1 (1 << 9)
+#define PMCAT_OVF0 (1 << 3)
+#define PMCAT_CLR0 (1 << 1)
+
+static struct sh_pmu sh4a_pmu;
+
+/*
+ * Supported raw event codes:
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x0000 number of elapsed cycles
+ * 0x0200 number of elapsed cycles in privileged mode
+ * 0x0280 number of elapsed cycles while SR.BL is asserted
+ * 0x0202 instruction execution
+ * 0x0203 instruction execution in parallel
+ * 0x0204 number of unconditional branches
+ * 0x0208 number of exceptions
+ * 0x0209 number of interrupts
+ * 0x0220 UTLB miss caused by instruction fetch
+ * 0x0222 UTLB miss caused by operand access
+ * 0x02a0 number of ITLB misses
+ * 0x0028 number of accesses to instruction memories
+ * 0x0029 number of accesses to instruction cache
+ * 0x002a instruction cache miss
+ * 0x022e number of access to instruction X/Y memory
+ * 0x0030 number of reads to operand memories
+ * 0x0038 number of writes to operand memories
+ * 0x0031 number of operand cache read accesses
+ * 0x0039 number of operand cache write accesses
+ * 0x0032 operand cache read miss
+ * 0x003a operand cache write miss
+ * 0x0236 number of reads to operand X/Y memory
+ * 0x023e number of writes to operand X/Y memory
+ * 0x0237 number of reads to operand U memory
+ * 0x023f number of writes to operand U memory
+ * 0x0337 number of U memory read buffer misses
+ * 0x02b4 number of wait cycles due to operand read access
+ * 0x02bc number of wait cycles due to operand write access
+ * 0x0033 number of wait cycles due to operand cache read miss
+ * 0x003b number of wait cycles due to operand cache write miss
+ */
+
+/*
+ * Special reserved bits used by hardware emulators, read values will
+ * vary, but writes must always be 0.
+ */
+#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
+
+static const int sh4a_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh4a_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0031,
+ [ C(RESULT_MISS) ] = 0x0032,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0039,
+ [ C(RESULT_MISS) ] = 0x003a,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0029,
+ [ C(RESULT_MISS) ] = 0x002a,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0030,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0038,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0222,
+ [ C(RESULT_MISS) ] = 0x0220,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x02a0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh4a_event_map(int event)
+{
+ return sh4a_general_events[event];
+}
+
+static u64 sh4a_pmu_read(int idx)
+{
+ return __raw_readl(PPC_PMCTR(idx));
+}
+
+static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
+ __raw_writel(tmp, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_PMCAT);
+ tmp &= ~PMCAT_EMU_CLR_MASK;
+ tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
+ __raw_writel(tmp, PPC_PMCAT);
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
+ __raw_writel(tmp, PPC_CCBR(idx));
+
+ __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
+}
+
+static void sh4a_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
+}
+
+static struct sh_pmu sh4a_pmu = {
+ .name = "sh4a",
+ .num_events = 2,
+ .event_map = sh4a_event_map,
+ .max_events = ARRAY_SIZE(sh4a_general_events),
+ .raw_event_mask = 0x3ff,
+ .cache_events = &sh4a_cache_events,
+ .read = sh4a_pmu_read,
+ .disable = sh4a_pmu_disable,
+ .enable = sh4a_pmu_enable,
+ .disable_all = sh4a_pmu_disable_all,
+ .enable_all = sh4a_pmu_enable_all,
+};
+
+static int __init sh4a_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh4a_pmu);
+}
+early_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
new file mode 100644
index 00000000..0688a750
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -0,0 +1,1784 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7722.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN,
+ PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN,
+ PTJ1_IN, PTJ0_IN,
+ PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN,
+ PTR2_IN,
+ PTS4_IN, PTS2_IN, PTS1_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN,
+ PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD,
+ PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD,
+ PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD,
+ PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD,
+ PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD,
+ PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD,
+ PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD,
+ PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD,
+ PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD,
+ PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD,
+ PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD,
+ PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD,
+ PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD,
+ PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD,
+ PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD,
+ PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD,
+ PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD,
+ PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD,
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTC7_IN_PU, PTC5_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU,
+ PTJ1_IN_PU, PTJ0_IN_PU,
+ PTQ0_IN_PU,
+ PTR2_IN_PU,
+ PTX6_IN_PU,
+ PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU,
+ PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA5_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT,
+ PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT,
+ PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT,
+ PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT,
+ PTS3_OUT, PTS2_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ SCIF2_TXD_MARK, SCIF2_RXD_MARK,
+ SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK,
+ SIOTXD_MARK, SIORXD_MARK,
+ SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK,
+ SIOSCK_MARK, SIOMCK_MARK,
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK,
+ VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK,
+ VIO_HD2_MARK, VIO_CLK2_MARK,
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK,
+ LCDCS2_MARK,
+ IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK,
+ HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK,
+ HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK,
+ HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK,
+ HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK,
+ HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK,
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+ SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK,
+ SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK,
+ SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK,
+ SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK,
+ AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ DACK_MARK, DREQ0_MARK,
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+ STATUS0_MARK, PDSTATUS_MARK,
+ SIOF0_MCK_MARK, SIOF0_SCK_MARK,
+ SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF1_MCK_MARK, SIOF1_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK,
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+ TPUTO_MARK,
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+ PINMUX_MARK_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4,
+ VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK,
+ HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48,
+ IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4,
+ SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK,
+ A25, A24, A23, A22, IRQ5, IRQ4_BS,
+ PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR,
+ SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD,
+ AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0,
+ LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS,
+ LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC,
+ STATUS0, PDSTATUS, IRQ1, IRQ0,
+ SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC,
+ SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0,
+ LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12,
+ LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8,
+ LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4,
+ LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0,
+ HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56,
+ SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN,
+ SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0,
+ LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2,
+ SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD,
+ SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD,
+ FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE,
+ NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8,
+ FRB_VIO_CLK2, FCE_VIO_HD2,
+ NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11,
+ VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK,
+ VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD,
+ VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS,
+ CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20,
+ LCDD19_DV_CLKI, LCDD18_DV_CLK,
+ KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0,
+ KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6,
+
+ PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7,
+ PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2,
+ PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD,
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO,
+ PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV,
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8,
+
+ HIZA14_KEYSC, HIZA14_HIZ,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ HIZB4_SIUA, HIZB4_HIZ,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ,
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ PINMUX_FUNCTION_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD),
+
+ /* PTB */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_OUT),
+
+ /* PTE */
+ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD),
+ PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD),
+ PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD),
+ PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD),
+ PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD),
+ PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD),
+
+ /* PTF */
+ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD),
+ PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD),
+ PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD),
+ PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD),
+ PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD),
+ PINMUX_DATA(PTF0_DATA, PTF0_OUT),
+
+ /* PTG */
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH */
+ PINMUX_DATA(PTH7_DATA, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD),
+ PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD),
+ PINMUX_DATA(PTH4_DATA, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD),
+ PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD),
+
+ /* PTJ */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU),
+
+ /* PTK */
+ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD),
+ PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD),
+ PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD),
+ PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD),
+ PINMUX_DATA(PTK1_DATA, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD),
+
+ /* PTL */
+ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD),
+ PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD),
+ PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD),
+ PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD),
+ PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD),
+ PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD),
+ PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD),
+ PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD),
+
+ /* PTM */
+ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD),
+ PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD),
+ PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD),
+ PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD),
+ PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD),
+ PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD),
+ PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD),
+ PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD),
+
+ /* PTN */
+ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN),
+ PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN),
+ PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN),
+ PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN),
+ PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN),
+ PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN),
+ PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN),
+ PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN),
+
+ /* PTQ */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU),
+
+ /* PTR */
+ PINMUX_DATA(PTR4_DATA, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_OUT),
+
+ /* PTS */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD),
+ PINMUX_DATA(PTS3_DATA, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD),
+ PINMUX_DATA(PTS0_DATA, PTS0_OUT),
+
+ /* PTT */
+ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD),
+ PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD),
+ PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD),
+ PINMUX_DATA(PTT0_DATA, PTT0_OUT),
+
+ /* PTU */
+ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD),
+ PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD),
+ PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD),
+ PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD),
+
+ /* PTV */
+ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD),
+ PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD),
+ PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD),
+ PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD),
+ PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD),
+
+ /* PTW */
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD),
+ PINMUX_DATA(PTW5_DATA, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD),
+ PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD),
+ PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD),
+ PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD),
+ PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD),
+
+ /* PTX */
+ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD),
+ PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD),
+ PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD),
+ PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD),
+ PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD),
+ PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD),
+ PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD),
+
+ /* PTY */
+ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU),
+
+ /* PTZ */
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU),
+
+ /* SCIF0 */
+ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD),
+ PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD),
+ PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO),
+
+ /* SCIF1 */
+ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK),
+
+ /* SCIF2 */
+ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK),
+
+ /* SIO */
+ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR),
+ PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6),
+
+ /* CEU */
+ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK),
+ PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(VIO_D4_MARK, VIO_D4),
+ PINMUX_DATA(VIO_D3_MARK, VIO_D3),
+ PINMUX_DATA(VIO_D2_MARK, VIO_D2),
+ PINMUX_DATA(VIO_D1_MARK, VIO_D1),
+ PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK),
+ PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK),
+ PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FCE_VIO_HD2),
+ PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FRB_VIO_CLK2),
+
+ /* LCDC */
+ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23),
+ PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22),
+ PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21),
+ PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20),
+ PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI),
+ PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK),
+ PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD17_DV_HSYNC),
+ PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD16_DV_VSYNC),
+ PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15),
+ PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14),
+ PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13),
+ PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12),
+ PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11),
+ PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10),
+ PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9),
+ PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8),
+ PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7),
+ PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6),
+ PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5),
+ PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4),
+ PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3),
+ PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2),
+ PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1),
+ PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0),
+ PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK),
+ /* Main LCD */
+ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN),
+ /* Main LCD - RGB Mode */
+ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS),
+ /* Main LCD - SYS Mode */
+ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS),
+ PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD),
+ /* Sub LCD - SYS Mode */
+ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK),
+ PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2),
+
+ /* BSC */
+ PINMUX_DATA(IOIS16_MARK, IOIS16),
+ PINMUX_DATA(A25_MARK, A25),
+ PINMUX_DATA(A24_MARK, A24),
+ PINMUX_DATA(A23_MARK, A23),
+ PINMUX_DATA(A22_MARK, A22),
+ PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS),
+ PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2),
+ PINMUX_DATA(WAIT_MARK, WAIT),
+ PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B),
+
+ /* SBSC */
+ PINMUX_DATA(HPD63_MARK, HPD63),
+ PINMUX_DATA(HPD62_MARK, HPD62),
+ PINMUX_DATA(HPD61_MARK, HPD61),
+ PINMUX_DATA(HPD60_MARK, HPD60),
+ PINMUX_DATA(HPD59_MARK, HPD59),
+ PINMUX_DATA(HPD58_MARK, HPD58),
+ PINMUX_DATA(HPD57_MARK, HPD57),
+ PINMUX_DATA(HPD56_MARK, HPD56),
+ PINMUX_DATA(HPD55_MARK, HPD55),
+ PINMUX_DATA(HPD54_MARK, HPD54),
+ PINMUX_DATA(HPD53_MARK, HPD53),
+ PINMUX_DATA(HPD52_MARK, HPD52),
+ PINMUX_DATA(HPD51_MARK, HPD51),
+ PINMUX_DATA(HPD50_MARK, HPD50),
+ PINMUX_DATA(HPD49_MARK, HPD49),
+ PINMUX_DATA(HPD48_MARK, HPD48),
+ PINMUX_DATA(HPDQM7_MARK, HPDQM7),
+ PINMUX_DATA(HPDQM6_MARK, HPDQM6),
+ PINMUX_DATA(HPDQM5_MARK, HPDQM5),
+ PINMUX_DATA(HPDQM4_MARK, HPDQM4),
+
+ /* IRQ */
+ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0),
+ PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1),
+ PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2),
+ PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3,
+ HIZC11_IRQ3, PTQ0),
+ PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS),
+ PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5),
+ PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7),
+
+ /* SDHI */
+ PINMUX_DATA(SDHICD_MARK, SDHICD),
+ PINMUX_DATA(SDHIWP_MARK, SDHIWP),
+ PINMUX_DATA(SDHID3_MARK, SDHID3),
+ PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2),
+ PINMUX_DATA(SDHID1_MARK, SDHID1),
+ PINMUX_DATA(SDHID0_MARK, SDHID0),
+ PINMUX_DATA(SDHICMD_MARK, SDHICMD),
+ PINMUX_DATA(SDHICLK_MARK, SDHICLK),
+
+ /* SIU - Port A */
+ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
+ PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
+ PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
+
+ /* SIU - Port B */
+ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR),
+ PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6),
+ PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6),
+
+ /* AUD */
+ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC),
+ PINMUX_DATA(AUDATA3_MARK, AUDATA3),
+ PINMUX_DATA(AUDATA2_MARK, AUDATA2),
+ PINMUX_DATA(AUDATA1_MARK, AUDATA1),
+ PINMUX_DATA(AUDATA0_MARK, AUDATA0),
+
+ /* DMAC */
+ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK),
+ PINMUX_DATA(DREQ0_MARK, DREQ0),
+
+ /* VOU */
+ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI),
+ PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK),
+ PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC),
+ PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15),
+ PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14),
+ PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13),
+ PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12),
+ PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11),
+ PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10),
+ PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9),
+ PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8),
+ PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7),
+ PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6),
+ PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5),
+ PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4),
+ PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3),
+ PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2),
+ PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1),
+ PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0),
+
+ /* CPG */
+ PINMUX_DATA(STATUS0_MARK, STATUS0),
+ PINMUX_DATA(PDSTATUS_MARK, PDSTATUS),
+
+ /* SIOF0 */
+ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0),
+ PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC),
+ PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST),
+ PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_SIOF0_TXD, PTQ1),
+ PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN,
+ PSB6_SIOF0_RXD, PTQ2),
+
+ /* SIOF1 */
+ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK,
+ PSB1_SIOF1_MCK, PTK0),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD),
+
+ /* SIM */
+ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0),
+ PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1),
+ PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST),
+
+ /* TSIF */
+ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2),
+ PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC),
+
+ /* IRDA */
+ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2),
+ PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_IRDA_OUT, PTQ1),
+
+ /* TPU */
+ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO),
+
+ /* FLCTL */
+ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2),
+ PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(FCDE_MARK, FCDE),
+ PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(FSC_MARK, FSC),
+ PINMUX_DATA(FWE_MARK, FWE),
+ PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2),
+
+ /* KEYSC */
+ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1),
+ PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2),
+ PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3),
+ PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7),
+ PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0),
+ PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1),
+ PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2),
+ PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+
+ /* SIO */
+ PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ /* Sub LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+
+ /* SBSC */
+ PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* SDHI */
+ PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK),
+
+ /* SIU - Port A */
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK),
+
+ /* SIU - Port B */
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
+ VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
+ VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN,
+ VIO_D4, 0, PTA4_IN_PD, PTA4_IN,
+ VIO_D3, 0, PTA3_IN_PD, PTA3_IN,
+ VIO_D2, 0, PTA2_IN_PD, PTA2_IN,
+ VIO_D1, 0, PTA1_IN_PD, PTA1_IN,
+ VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ HPD55, PTB7_OUT, 0, PTB7_IN,
+ HPD54, PTB6_OUT, 0, PTB6_IN,
+ HPD53, PTB5_OUT, 0, PTB5_IN,
+ HPD52, PTB4_OUT, 0, PTB4_IN,
+ HPD51, PTB3_OUT, 0, PTB3_IN,
+ HPD50, PTB2_OUT, 0, PTB2_IN,
+ HPD49, PTB1_OUT, 0, PTB1_IN,
+ HPD48, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ 0, 0, PTC7_IN_PU, PTC7_IN,
+ 0, 0, 0, 0,
+ IOIS16, 0, PTC5_IN_PU, PTC5_IN,
+ HPDQM7, PTC4_OUT, 0, PTC4_IN,
+ HPDQM6, PTC3_OUT, 0, PTC3_IN,
+ HPDQM5, PTC2_OUT, 0, PTC2_IN,
+ 0, 0, 0, 0,
+ HPDQM4, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ SDHICD, 0, PTD7_IN_PU, PTD7_IN,
+ SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ SDHICLK, PTD0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN,
+ A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN,
+ A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN,
+ A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN,
+ IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN,
+ SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN,
+ SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN,
+ SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN,
+ SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN,
+ SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN,
+ SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ AUDSYNC, PTG4_OUT, 0, 0,
+ AUDATA3, PTG3_OUT, 0, 0,
+ AUDATA2, PTG2_OUT, 0, 0,
+ AUDATA1, PTG1_OUT, 0, 0,
+ AUDATA0, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
+ LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN,
+ LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN,
+ LCDDISP_LCDRS, PTH4_OUT, 0, 0,
+ LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
+ LCDDON_LCDDON2, PTH2_OUT, 0, 0,
+ LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN,
+ LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ STATUS0, PTJ7_OUT, 0, 0,
+ 0, PTJ6_OUT, 0, 0,
+ PDSTATUS, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN,
+ SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN,
+ SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN,
+ SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN,
+ SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN,
+ SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
+ PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN,
+ LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN,
+ LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN,
+ LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN,
+ LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN,
+ LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN,
+ LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN,
+ LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN,
+ LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN,
+ LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN,
+ LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN,
+ LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN,
+ LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN,
+ LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN,
+ LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ HPD63, PTN7_OUT, 0, PTN7_IN,
+ HPD62, PTN6_OUT, 0, PTN6_IN,
+ HPD61, PTN5_OUT, 0, PTN5_IN,
+ HPD60, PTN4_OUT, 0, PTN4_IN,
+ HPD59, PTN3_OUT, 0, PTN3_IN,
+ HPD58, PTN2_OUT, 0, PTN2_IN,
+ HPD57, PTN1_OUT, 0, PTN1_IN,
+ HPD56, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
+ SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN,
+ SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN,
+ SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN,
+ PTQ2, 0, PTQ2_IN_PD, PTQ2_IN,
+ PTQ1, PTQ1_OUT, 0, 0,
+ PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ LCDRD, PTR4_OUT, 0, 0,
+ CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
+ WAIT, 0, PTR2_IN_PU, PTR2_IN,
+ LCDDCK_LCDWR, PTR1_OUT, 0, 0,
+ LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN,
+ SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
+ SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN,
+ SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN,
+ SCIF0_TXD, PTS0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN,
+ FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN,
+ FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN,
+ DREQ0, 0, PTT1_IN_PD, PTT1_IN,
+ FCDE, PTT0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN,
+ NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN,
+ NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN,
+ FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN,
+ FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN,
+ NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN,
+ NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN,
+ NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN,
+ NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ 0, 0, 0, 0,
+ VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN,
+ VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
+ VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN,
+ VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN,
+ VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN,
+ VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN,
+ VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ 0, 0, 0, 0,
+ CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN,
+ LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN,
+ LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN,
+ LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN,
+ LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN,
+ LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ KEYOUT1, PTY1_OUT, 0, 0,
+ KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN,
+ KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN,
+ KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN,
+ KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN,
+ KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_KEYIN0, PSA15_IRQ6,
+ PSA14_KEYIN4, PSA14_IRQ7,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA9_IRQ4, PSA9_BS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA4_IRQ2, PSA4_SDHID2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ PSB15_SIOTXD, PSB15_SIUBOSLD,
+ PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR,
+ PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR,
+ PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB,
+ PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT,
+ PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK,
+ PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC,
+ PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK,
+ PSB0_SIUAOSLD, PSB0_SIOF1_TXD }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD,
+ PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
+ PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSC0_NAF, PSC0_VIO }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PSD13_VIO, PSD13_SCIF2,
+ PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1,
+ PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB,
+ PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD,
+ PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ 0, 0,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2,
+ 0, 0,
+ PSD0_LCDD19_LCDD0, PSD0_DV }
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
+ PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSE3_FLCTL, PSE3_VIO,
+ PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9,
+ PSE0_NAF0, PSE0_VIO_D8 }
+ },
+ { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) {
+ 0, 0,
+ HIZA14_KEYSC, HIZA14_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZB4_SIUA, HIZB4_HIZ,
+ 0, 0,
+ 0, 0,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ }
+ },
+ { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) {
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, 0, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, 0, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ 0, 0, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, 0, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ 0, 0, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ 0, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ 0, 0, 0, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ 0, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ 0, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ 0, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ 0, 0, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7722_pinmux_info = {
+ .name = "sh7722_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_KEYOUT5_IN5,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7722_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
new file mode 100644
index 00000000..88bf5ecd
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
@@ -0,0 +1,1909 @@
+/*
+ * SH7723 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7723.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB2_IN_PU, PTB1_IN_PU,
+ PTR2_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2,
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2,
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3,
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_PTT_TXD_MARK, SCIF0_PTT_RXD_MARK,
+ SCIF0_PTT_SCK_MARK, SCIF0_PTU_TXD_MARK,
+ SCIF0_PTU_RXD_MARK, SCIF0_PTU_SCK_MARK,
+
+ SCIF1_PTS_TXD_MARK, SCIF1_PTS_RXD_MARK,
+ SCIF1_PTS_SCK_MARK, SCIF1_PTV_TXD_MARK,
+ SCIF1_PTV_RXD_MARK, SCIF1_PTV_SCK_MARK,
+
+ SCIF2_PTT_TXD_MARK, SCIF2_PTT_RXD_MARK,
+ SCIF2_PTT_SCK_MARK, SCIF2_PTU_TXD_MARK,
+ SCIF2_PTU_RXD_MARK, SCIF2_PTU_SCK_MARK,
+
+ SCIF3_PTS_TXD_MARK, SCIF3_PTS_RXD_MARK,
+ SCIF3_PTS_SCK_MARK, SCIF3_PTS_RTS_MARK,
+ SCIF3_PTS_CTS_MARK, SCIF3_PTV_TXD_MARK,
+ SCIF3_PTV_RXD_MARK, SCIF3_PTV_SCK_MARK,
+ SCIF3_PTV_RTS_MARK, SCIF3_PTV_CTS_MARK,
+
+ SCIF4_PTE_TXD_MARK, SCIF4_PTE_RXD_MARK,
+ SCIF4_PTE_SCK_MARK, SCIF4_PTN_TXD_MARK,
+ SCIF4_PTN_RXD_MARK, SCIF4_PTN_SCK_MARK,
+
+ SCIF5_PTE_TXD_MARK, SCIF5_PTE_RXD_MARK,
+ SCIF5_PTE_SCK_MARK, SCIF5_PTN_TXD_MARK,
+ SCIF5_PTN_RXD_MARK, SCIF5_PTN_SCK_MARK,
+
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_FLD_MARK, VIO_CKO_MARK,
+ VIO_VD1_MARK, VIO_HD1_MARK, VIO_CLK1_MARK,
+ VIO_HD2_MARK, VIO_VD2_MARK, VIO_CLK2_MARK,
+
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDLCLK_PTR_MARK, LCDLCLK_PTW_MARK,
+
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK,
+
+ SDHI0CD_PTD_MARK, SDHI0WP_PTD_MARK,
+ SDHI0D3_PTD_MARK, SDHI0D2_PTD_MARK,
+ SDHI0D1_PTD_MARK, SDHI0D0_PTD_MARK,
+ SDHI0CMD_PTD_MARK, SDHI0CLK_PTD_MARK,
+
+ SDHI0CD_PTS_MARK, SDHI0WP_PTS_MARK,
+ SDHI0D3_PTS_MARK, SDHI0D2_PTS_MARK,
+ SDHI0D1_PTS_MARK, SDHI0D0_PTS_MARK,
+ SDHI0CMD_PTS_MARK, SDHI0CLK_PTS_MARK,
+
+ SDHI1CD_MARK, SDHI1WP_MARK, SDHI1D3_MARK, SDHI1D2_MARK,
+ SDHI1D1_MARK, SDHI1D0_MARK, SDHI1CMD_MARK, SDHI1CLK_MARK,
+
+ SIUAFCK_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAISLD_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAOSLD_MARK, SIUAMCK_MARK,
+ SIUAISPD_MARK, SIUAOSPD_MARK,
+
+ SIUBFCK_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBISLD_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBOSLD_MARK, SIUBMCK_MARK,
+
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+
+ MSIOF0_PTF_TXD_MARK, MSIOF0_PTF_RXD_MARK, MSIOF0_PTF_MCK_MARK,
+ MSIOF0_PTF_TSYNC_MARK, MSIOF0_PTF_TSCK_MARK, MSIOF0_PTF_RSYNC_MARK,
+ MSIOF0_PTF_RSCK_MARK, MSIOF0_PTF_SS1_MARK, MSIOF0_PTF_SS2_MARK,
+
+ MSIOF0_PTT_TXD_MARK, MSIOF0_PTT_RXD_MARK, MSIOF0_PTX_MCK_MARK,
+ MSIOF0_PTT_TSYNC_MARK, MSIOF0_PTT_TSCK_MARK, MSIOF0_PTT_RSYNC_MARK,
+ MSIOF0_PTT_RSCK_MARK, MSIOF0_PTT_SS1_MARK, MSIOF0_PTT_SS2_MARK,
+
+ MSIOF1_TXD_MARK, MSIOF1_RXD_MARK, MSIOF1_MCK_MARK,
+ MSIOF1_TSYNC_MARK, MSIOF1_TSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_RSCK_MARK, MSIOF1_SS1_MARK, MSIOF1_SS2_MARK,
+
+ TS0_SDAT_MARK, TS0_SCK_MARK, TS0_SDEN_MARK, TS0_SPSYNC_MARK,
+
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+
+ STATUS0_MARK, PDSTATUS_MARK,
+
+ TPUTO3_MARK, TPUTO2_MARK, TPUTO1_MARK, TPUTO0_MARK,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, WAIT_MARK, BS_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ CS6B_CE1B_MARK, CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK, CS5A_CE2A_MARK,
+ WE3_ICIOWR_MARK, WE2_ICIORD_MARK,
+
+ IDED15_MARK, IDED14_MARK, IDED13_MARK, IDED12_MARK,
+ IDED11_MARK, IDED10_MARK, IDED9_MARK, IDED8_MARK,
+ IDED7_MARK, IDED6_MARK, IDED5_MARK, IDED4_MARK,
+ IDED3_MARK, IDED2_MARK, IDED1_MARK, IDED0_MARK,
+ DIRECTION_MARK, EXBUF_ENB_MARK, IDERST_MARK, IODACK_MARK,
+ IODREQ_MARK, IDEIORDY_MARK, IDEINT_MARK, IDEIOWR_MARK,
+ IDEIORD_MARK, IDECS1_MARK, IDECS0_MARK, IDEA2_MARK,
+ IDEA1_MARK, IDEA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_PSA14_FN1, PTA7_FN),
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_PSA14_FN2, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_PSA14_FN1, PTA6_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_PSA14_FN2, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_PSA14_FN1, PTA5_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_PSA14_FN2, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_PSA14_FN1, PTA4_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_PSA14_FN2, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_PSA14_FN1, PTA3_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_PSA14_FN2, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_PSA14_FN1, PTA2_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_PSA14_FN2, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_PSA14_FN1, PTA1_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_PSA14_FN2, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_PSA14_FN1, PTA0_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_PSA14_FN2, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_PSA14_FN1, PTB2_FN),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_PSA14_FN2, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_PSA14_FN1, PTB1_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_PSA14_FN2, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_PSA14_FN1, PTB0_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_PSA14_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IDED15_MARK, PSA11_PSA10_FN1, PTC7_FN),
+ PINMUX_DATA(SDHI1CD_MARK, PSA11_PSA10_FN2, PTC7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA11_PSA10_FN1, PTC6_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSA11_PSA10_FN2, PTC6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA11_PSA10_FN1, PTC5_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSA11_PSA10_FN2, PTC5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA11_PSA10_FN1, PTC4_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSA11_PSA10_FN2, PTC4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA11_PSA10_FN1, PTC3_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA11_PSA10_FN2, PTC3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA11_PSA10_FN1, PTC2_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA11_PSA10_FN2, PTC2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA11_PSA10_FN1, PTC1_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA11_PSA10_FN2, PTC1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA11_PSA10_FN1, PTC0_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA11_PSA10_FN2, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(IDED7_MARK, PSA11_PSA10_FN1, PTD7_FN),
+ PINMUX_DATA(SDHI0CD_PTD_MARK, PSA11_PSA10_FN2, PTD7_FN),
+ PINMUX_DATA(IDED6_MARK, PSA11_PSA10_FN1, PTD6_FN),
+ PINMUX_DATA(SDHI0WP_PTD_MARK, PSA11_PSA10_FN2, PTD6_FN),
+ PINMUX_DATA(IDED5_MARK, PSA11_PSA10_FN1, PTD5_FN),
+ PINMUX_DATA(SDHI0D3_PTD_MARK, PSA11_PSA10_FN2, PTD5_FN),
+ PINMUX_DATA(IDED4_MARK, PSA11_PSA10_FN1, PTD4_FN),
+ PINMUX_DATA(SDHI0D2_PTD_MARK, PSA11_PSA10_FN2, PTD4_FN),
+ PINMUX_DATA(IDED3_MARK, PSA11_PSA10_FN1, PTD3_FN),
+ PINMUX_DATA(SDHI0D1_PTD_MARK, PSA11_PSA10_FN2, PTD3_FN),
+ PINMUX_DATA(IDED2_MARK, PSA11_PSA10_FN1, PTD2_FN),
+ PINMUX_DATA(SDHI0D0_PTD_MARK, PSA11_PSA10_FN2, PTD2_FN),
+ PINMUX_DATA(IDED1_MARK, PSA11_PSA10_FN1, PTD1_FN),
+ PINMUX_DATA(SDHI0CMD_PTD_MARK, PSA11_PSA10_FN2, PTD1_FN),
+ PINMUX_DATA(IDED0_MARK, PSA11_PSA10_FN1, PTD0_FN),
+ PINMUX_DATA(SDHI0CLK_PTD_MARK, PSA11_PSA10_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(DIRECTION_MARK, PSA11_PSA10_FN1, PTE5_FN),
+ PINMUX_DATA(SCIF5_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE5_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSA11_PSA10_FN1, PTE4_FN),
+ PINMUX_DATA(SCIF5_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE4_FN),
+ PINMUX_DATA(IDERST_MARK, PSA11_PSA10_FN1, PTE3_FN),
+ PINMUX_DATA(SCIF5_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE3_FN),
+ PINMUX_DATA(IODACK_MARK, PSA11_PSA10_FN1, PTE2_FN),
+ PINMUX_DATA(SCIF4_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE2_FN),
+ PINMUX_DATA(IODREQ_MARK, PSA11_PSA10_FN1, PTE1_FN),
+ PINMUX_DATA(SCIF4_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE1_FN),
+ PINMUX_DATA(IDEIORDY_MARK, PSA11_PSA10_FN1, PTE0_FN),
+ PINMUX_DATA(SCIF4_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(IDEINT_MARK, PTF7_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA5_PSA4_FN1, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS2_MARK, PSA5_PSA4_FN2, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSYNC_MARK, PSA5_PSA4_FN3, PTF6_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA5_PSA4_FN1, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS1_MARK, PSA5_PSA4_FN2, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSCK_MARK, PSA5_PSA4_FN3, PTF5_FN),
+ PINMUX_DATA(IDECS1_MARK, PSA11_PSA10_FN1, PTF4_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSYNC_MARK, PSA11_PSA10_FN2, PTF4_FN),
+ PINMUX_DATA(IDECS0_MARK, PSA11_PSA10_FN1, PTF3_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSCK_MARK, PSA11_PSA10_FN2, PTF3_FN),
+ PINMUX_DATA(IDEA2_MARK, PSA11_PSA10_FN1, PTF2_FN),
+ PINMUX_DATA(MSIOF0_PTF_RXD_MARK, PSA11_PSA10_FN2, PTF2_FN),
+ PINMUX_DATA(IDEA1_MARK, PSA11_PSA10_FN1, PTF1_FN),
+ PINMUX_DATA(MSIOF0_PTF_TXD_MARK, PSA11_PSA10_FN2, PTF1_FN),
+ PINMUX_DATA(IDEA0_MARK, PSA11_PSA10_FN1, PTF0_FN),
+ PINMUX_DATA(MSIOF0_PTF_MCK_MARK, PSA11_PSA10_FN2, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PSA3_PSA2_FN1, PTG3_FN),
+ PINMUX_DATA(TPUTO3_MARK, PSA3_PSA2_FN2, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PSA3_PSA2_FN1, PTG2_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSA3_PSA2_FN2, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PSA3_PSA2_FN1, PTG1_FN),
+ PINMUX_DATA(TPUTO1_MARK, PSA3_PSA2_FN2, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PSA3_PSA2_FN1, PTG0_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSA3_PSA2_FN2, PTG0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(LCDVCPWC_MARK, PTH7_FN),
+ PINMUX_DATA(LCDRD_MARK, PSB15_PSB14_FN1, PTH6_FN),
+ PINMUX_DATA(DV_CLKI_MARK, PSB15_PSB14_FN2, PTH6_FN),
+ PINMUX_DATA(LCDVSYN_MARK, PSB15_PSB14_FN1, PTH5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSB15_PSB14_FN2, PTH5_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSB13_PSB12_LCDC_RGB, PTH4_FN),
+ PINMUX_DATA(LCDRS_MARK, PSB13_PSB12_LCDC_SYS, PTH4_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSB13_PSB12_LCDC_RGB, PTH3_FN),
+ PINMUX_DATA(LCDCS_MARK, PSB13_PSB12_LCDC_SYS, PTH3_FN),
+ PINMUX_DATA(LCDDON_MARK, PTH2_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSB13_PSB12_LCDC_RGB, PTH1_FN),
+ PINMUX_DATA(LCDWR_MARK, PSB13_PSB12_LCDC_SYS, PTH1_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(STATUS0_MARK, PTJ7_FN),
+ PINMUX_DATA(PDSTATUS_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(SIUAFCK_MARK, PTK7_FN),
+ PINMUX_DATA(SIUAILR_MARK, PSB9_PSB8_FN1, PTK6_FN),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSB9_PSB8_FN2, PTK6_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSB9_PSB8_FN3, PTK6_FN),
+ PINMUX_DATA(SIUAIBT_MARK, PSB9_PSB8_FN1, PTK5_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSB9_PSB8_FN2, PTK5_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSB9_PSB8_FN3, PTK5_FN),
+ PINMUX_DATA(SIUAISLD_MARK, PSB7_PSB6_FN1, PTK4_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK4_FN),
+ PINMUX_DATA(SIUAOLR_MARK, PSB7_PSB6_FN1, PTK3_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSB7_PSB6_FN2, PTK3_FN),
+ PINMUX_DATA(SIUAOBT_MARK, PSB7_PSB6_FN1, PTK2_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSB7_PSB6_FN2, PTK2_FN),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB7_PSB6_FN1, PTK1_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK1_FN),
+ PINMUX_DATA(SIUAMCK_MARK, PSB7_PSB6_FN1, PTK0_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSB7_PSB6_FN2, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(LCDD15_MARK, PSB5_PSB4_FN1, PTL7_FN),
+ PINMUX_DATA(DV_D15_MARK, PSB5_PSB4_FN2, PTL7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSB5_PSB4_FN1, PTL6_FN),
+ PINMUX_DATA(DV_D14_MARK, PSB5_PSB4_FN2, PTL6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSB5_PSB4_FN1, PTL5_FN),
+ PINMUX_DATA(DV_D13_MARK, PSB5_PSB4_FN2, PTL5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSB5_PSB4_FN1, PTL4_FN),
+ PINMUX_DATA(DV_D12_MARK, PSB5_PSB4_FN2, PTL4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSB5_PSB4_FN1, PTL3_FN),
+ PINMUX_DATA(DV_D11_MARK, PSB5_PSB4_FN2, PTL3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSB5_PSB4_FN1, PTL2_FN),
+ PINMUX_DATA(DV_D10_MARK, PSB5_PSB4_FN2, PTL2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSB5_PSB4_FN1, PTL1_FN),
+ PINMUX_DATA(DV_D9_MARK, PSB5_PSB4_FN2, PTL1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSB5_PSB4_FN1, PTL0_FN),
+ PINMUX_DATA(DV_D8_MARK, PSB5_PSB4_FN2, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(LCDD7_MARK, PSB5_PSB4_FN1, PTM7_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB5_PSB4_FN2, PTM7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSB5_PSB4_FN1, PTM6_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB5_PSB4_FN2, PTM6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSB5_PSB4_FN1, PTM5_FN),
+ PINMUX_DATA(DV_D5_MARK, PSB5_PSB4_FN2, PTM5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSB5_PSB4_FN1, PTM4_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB5_PSB4_FN2, PTM4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSB5_PSB4_FN1, PTM3_FN),
+ PINMUX_DATA(DV_D3_MARK, PSB5_PSB4_FN2, PTM3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSB5_PSB4_FN1, PTM2_FN),
+ PINMUX_DATA(DV_D2_MARK, PSB5_PSB4_FN2, PTM2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSB5_PSB4_FN1, PTM1_FN),
+ PINMUX_DATA(DV_D1_MARK, PSB5_PSB4_FN2, PTM1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSB5_PSB4_FN1, PTM0_FN),
+ PINMUX_DATA(DV_D0_MARK, PSB5_PSB4_FN2, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(LCDD23_MARK, PSB3_PSB2_FN1, PTN7_FN),
+ PINMUX_DATA(SCIF5_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN7_FN),
+ PINMUX_DATA(LCDD22_MARK, PSB3_PSB2_FN1, PTN6_FN),
+ PINMUX_DATA(SCIF5_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN6_FN),
+ PINMUX_DATA(LCDD21_MARK, PSB3_PSB2_FN1, PTN5_FN),
+ PINMUX_DATA(SCIF5_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSB3_PSB2_FN1, PTN4_FN),
+ PINMUX_DATA(SCIF4_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSB3_PSB2_FN1, PTN3_FN),
+ PINMUX_DATA(SCIF4_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSB3_PSB2_FN1, PTN2_FN),
+ PINMUX_DATA(SCIF4_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSB5_PSB4_FN1, PTN1_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSB5_PSB4_FN2, PTN1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSB5_PSB4_FN1, PTN0_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_PSB4_FN2, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(AN3_MARK, PTQ3_FN),
+ PINMUX_DATA(AN2_MARK, PTQ2_FN),
+ PINMUX_DATA(AN1_MARK, PTQ1_FN),
+ PINMUX_DATA(AN0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA13_PSA12_FN1, PTR3_FN),
+ PINMUX_DATA(LCDLCLK_PTR_MARK, PSA13_PSA12_FN2, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SCIF1_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS7_FN),
+ PINMUX_DATA(SDHI0CD_PTS_MARK, PSC15_PSC14_FN2, PTS7_FN),
+ PINMUX_DATA(SCIF1_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS6_FN),
+ PINMUX_DATA(SDHI0WP_PTS_MARK, PSC15_PSC14_FN2, PTS6_FN),
+ PINMUX_DATA(SCIF1_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS5_FN),
+ PINMUX_DATA(SDHI0D3_PTS_MARK, PSC15_PSC14_FN2, PTS5_FN),
+ PINMUX_DATA(SCIF3_PTS_CTS_MARK, PSC15_PSC14_FN1, PTS4_FN),
+ PINMUX_DATA(SDHI0D2_PTS_MARK, PSC15_PSC14_FN2, PTS4_FN),
+ PINMUX_DATA(SCIF3_PTS_RTS_MARK, PSC15_PSC14_FN1, PTS3_FN),
+ PINMUX_DATA(SDHI0D1_PTS_MARK, PSC15_PSC14_FN2, PTS3_FN),
+ PINMUX_DATA(SCIF3_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS2_FN),
+ PINMUX_DATA(SDHI0D0_PTS_MARK, PSC15_PSC14_FN2, PTS2_FN),
+ PINMUX_DATA(SCIF3_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS1_FN),
+ PINMUX_DATA(SDHI0CMD_PTS_MARK, PSC15_PSC14_FN2, PTS1_FN),
+ PINMUX_DATA(SCIF3_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS0_FN),
+ PINMUX_DATA(SDHI0CLK_PTS_MARK, PSC15_PSC14_FN2, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_PTT_SCK_MARK, PSC13_PSC12_FN1, PTT5_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSCK_MARK, PSC13_PSC12_FN2, PTT5_FN),
+ PINMUX_DATA(SCIF0_PTT_RXD_MARK, PSC13_PSC12_FN1, PTT4_FN),
+ PINMUX_DATA(MSIOF0_PTT_RXD_MARK, PSC13_PSC12_FN2, PTT4_FN),
+ PINMUX_DATA(SCIF0_PTT_TXD_MARK, PSC13_PSC12_FN1, PTT3_FN),
+ PINMUX_DATA(MSIOF0_PTT_TXD_MARK, PSC13_PSC12_FN2, PTT3_FN),
+ PINMUX_DATA(SCIF2_PTT_SCK_MARK, PSC11_PSC10_FN1, PTT2_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSYNC_MARK, PSC11_PSC10_FN2, PTT2_FN),
+ PINMUX_DATA(SCIF2_PTT_RXD_MARK, PSC11_PSC10_FN1, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS1_MARK, PSC11_PSC10_FN2, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSCK_MARK, PSC11_PSC10_FN3, PTT1_FN),
+ PINMUX_DATA(SCIF2_PTT_TXD_MARK, PSC11_PSC10_FN1, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS2_MARK, PSC11_PSC10_FN2, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSYNC_MARK, PSC11_PSC10_FN3, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(FCDE_MARK, PSC9_PSC8_FN1, PTU5_FN),
+ PINMUX_DATA(SCIF0_PTU_SCK_MARK, PSC9_PSC8_FN2, PTU5_FN),
+ PINMUX_DATA(FSC_MARK, PSC9_PSC8_FN1, PTU4_FN),
+ PINMUX_DATA(SCIF0_PTU_RXD_MARK, PSC9_PSC8_FN2, PTU4_FN),
+ PINMUX_DATA(FWE_MARK, PSC9_PSC8_FN1, PTU3_FN),
+ PINMUX_DATA(SCIF0_PTU_TXD_MARK, PSC9_PSC8_FN2, PTU3_FN),
+ PINMUX_DATA(FOE_MARK, PSC7_PSC6_FN1, PTU2_FN),
+ PINMUX_DATA(SCIF2_PTU_SCK_MARK, PSC7_PSC6_FN2, PTU2_FN),
+ PINMUX_DATA(VIO_VD2_MARK, PSC7_PSC6_FN3, PTU2_FN),
+ PINMUX_DATA(FRB_MARK, PSC7_PSC6_FN1, PTU1_FN),
+ PINMUX_DATA(SCIF2_PTU_RXD_MARK, PSC7_PSC6_FN2, PTU1_FN),
+ PINMUX_DATA(VIO_CLK2_MARK, PSC7_PSC6_FN3, PTU1_FN),
+ PINMUX_DATA(FCE_MARK, PSC7_PSC6_FN1, PTU0_FN),
+ PINMUX_DATA(SCIF2_PTU_TXD_MARK, PSC7_PSC6_FN2, PTU0_FN),
+ PINMUX_DATA(VIO_HD2_MARK, PSC7_PSC6_FN3, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(NAF7_MARK, PSC7_PSC6_FN1, PTV7_FN),
+ PINMUX_DATA(SCIF1_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV7_FN),
+ PINMUX_DATA(VIO_D15_MARK, PSC7_PSC6_FN3, PTV7_FN),
+ PINMUX_DATA(NAF6_MARK, PSC7_PSC6_FN1, PTV6_FN),
+ PINMUX_DATA(SCIF1_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV6_FN),
+ PINMUX_DATA(VIO_D14_MARK, PSC7_PSC6_FN3, PTV6_FN),
+ PINMUX_DATA(NAF5_MARK, PSC7_PSC6_FN1, PTV5_FN),
+ PINMUX_DATA(SCIF1_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV5_FN),
+ PINMUX_DATA(VIO_D13_MARK, PSC7_PSC6_FN3, PTV5_FN),
+ PINMUX_DATA(NAF4_MARK, PSC7_PSC6_FN1, PTV4_FN),
+ PINMUX_DATA(SCIF3_PTV_CTS_MARK, PSC7_PSC6_FN2, PTV4_FN),
+ PINMUX_DATA(VIO_D12_MARK, PSC7_PSC6_FN3, PTV4_FN),
+ PINMUX_DATA(NAF3_MARK, PSC7_PSC6_FN1, PTV3_FN),
+ PINMUX_DATA(SCIF3_PTV_RTS_MARK, PSC7_PSC6_FN2, PTV3_FN),
+ PINMUX_DATA(VIO_D11_MARK, PSC7_PSC6_FN3, PTV3_FN),
+ PINMUX_DATA(NAF2_MARK, PSC7_PSC6_FN1, PTV2_FN),
+ PINMUX_DATA(SCIF3_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV2_FN),
+ PINMUX_DATA(VIO_D10_MARK, PSC7_PSC6_FN3, PTV2_FN),
+ PINMUX_DATA(NAF1_MARK, PSC7_PSC6_FN1, PTV1_FN),
+ PINMUX_DATA(SCIF3_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV1_FN),
+ PINMUX_DATA(VIO_D9_MARK, PSC7_PSC6_FN3, PTV1_FN),
+ PINMUX_DATA(NAF0_MARK, PSC7_PSC6_FN1, PTV0_FN),
+ PINMUX_DATA(SCIF3_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV0_FN),
+ PINMUX_DATA(VIO_D8_MARK, PSC7_PSC6_FN3, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(IRQ7_MARK, PTW7_FN),
+ PINMUX_DATA(IRQ6_MARK, PTW6_FN),
+ PINMUX_DATA(IRQ5_MARK, PTW5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSD15_PSD14_FN1, PTW4_FN),
+ PINMUX_DATA(LCDLCLK_PTW_MARK, PSD15_PSD14_FN2, PTW4_FN),
+ PINMUX_DATA(IRQ3_MARK, PSD13_PSD12_FN1, PTW3_FN),
+ PINMUX_DATA(ADTRG_MARK, PSD13_PSD12_FN2, PTW3_FN),
+ PINMUX_DATA(IRQ2_MARK, PSD11_PSD10_FN1, PTW2_FN),
+ PINMUX_DATA(BS_MARK, PSD11_PSD10_FN2, PTW2_FN),
+ PINMUX_DATA(VIO_CKO_MARK, PSD11_PSD10_FN3, PTW2_FN),
+ PINMUX_DATA(IRQ1_MARK, PSD9_PSD8_FN1, PTW1_FN),
+ PINMUX_DATA(SIUAISPD_MARK, PSD9_PSD8_FN2, PTW1_FN),
+ PINMUX_DATA(IRQ0_MARK, PSD7_PSD6_FN1, PTW0_FN),
+ PINMUX_DATA(SIUAOSPD_MARK, PSD7_PSD6_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DACK1_MARK, PTX7_FN),
+ PINMUX_DATA(DREQ1_MARK, PSD3_PSD2_FN1, PTX6_FN),
+ PINMUX_DATA(MSIOF0_PTX_MCK_MARK, PSD3_PSD2_FN2, PTX6_FN),
+ PINMUX_DATA(DACK1_MARK, PTX5_FN),
+ PINMUX_DATA(IRDA_OUT_MARK, PSD5_PSD4_FN2, PTX5_FN),
+ PINMUX_DATA(DREQ1_MARK, PTX4_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSD5_PSD4_FN2, PTX4_FN),
+ PINMUX_DATA(TS0_SDAT_MARK, PTX3_FN),
+ PINMUX_DATA(TS0_SCK_MARK, PTX2_FN),
+ PINMUX_DATA(TS0_SDEN_MARK, PTX1_FN),
+ PINMUX_DATA(TS0_SPSYNC_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(VIO_D7_MARK, PTY7_FN),
+ PINMUX_DATA(VIO_D6_MARK, PTY6_FN),
+ PINMUX_DATA(VIO_D5_MARK, PTY5_FN),
+ PINMUX_DATA(VIO_D4_MARK, PTY4_FN),
+ PINMUX_DATA(VIO_D3_MARK, PTY3_FN),
+ PINMUX_DATA(VIO_D2_MARK, PTY2_FN),
+ PINMUX_DATA(VIO_D1_MARK, PTY1_FN),
+ PINMUX_DATA(VIO_D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(SIUBOBT_MARK, PTZ7_FN),
+ PINMUX_DATA(SIUBOLR_MARK, PTZ6_FN),
+ PINMUX_DATA(SIUBOSLD_MARK, PTZ5_FN),
+ PINMUX_DATA(SIUBMCK_MARK, PTZ4_FN),
+ PINMUX_DATA(VIO_FLD_MARK, PSD1_PSD0_FN1, PTZ3_FN),
+ PINMUX_DATA(SIUBFCK_MARK, PSD1_PSD0_FN2, PTZ3_FN),
+ PINMUX_DATA(VIO_HD1_MARK, PSD1_PSD0_FN1, PTZ2_FN),
+ PINMUX_DATA(SIUBILR_MARK, PSD1_PSD0_FN2, PTZ2_FN),
+ PINMUX_DATA(VIO_VD1_MARK, PSD1_PSD0_FN1, PTZ1_FN),
+ PINMUX_DATA(SIUBIBT_MARK, PSD1_PSD0_FN2, PTZ1_FN),
+ PINMUX_DATA(VIO_CLK1_MARK, PSD1_PSD0_FN1, PTZ0_FN),
+ PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* SDHI0 (PTD) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
+
+ /* SDHI0 (PTS) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+
+ /* SIUA */
+ PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUOSPD, SIUAOSPD_MARK),
+
+ /* SIUB */
+ PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+
+ /* MSIOF0 (PTF) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
+
+ /* MSIOF0 (PTT+PTX) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ };
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+ PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+ PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+ PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+ PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+ PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+ PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+ PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+ PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+ PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+ PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+ PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+ PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+ PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+ PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+ PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+ PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+ PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+ PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTE5_FN, PTE5_OUT, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+ PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+ PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+ PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, 0, PTF7_IN,
+ PTF6_FN, PTF6_OUT, 0, PTF6_IN,
+ PTF5_FN, PTF5_OUT, 0, PTF5_IN,
+ PTF4_FN, PTF4_OUT, 0, PTF4_IN,
+ PTF3_FN, PTF3_OUT, 0, PTF3_IN,
+ PTF2_FN, PTF2_OUT, 0, PTF2_IN,
+ PTF1_FN, PTF1_OUT, 0, PTF1_IN,
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, 0, PTH7_IN,
+ PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+ PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+ PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+ PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+ PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+ PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, 0, PTK7_IN,
+ PTK6_FN, PTK6_OUT, 0, PTK6_IN,
+ PTK5_FN, PTK5_OUT, 0, PTK5_IN,
+ PTK4_FN, PTK4_OUT, 0, PTK4_IN,
+ PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+ PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+ PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+ PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+ PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+ PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+ PTL3_FN, PTL3_OUT, 0, PTL3_IN,
+ PTL2_FN, PTL2_OUT, 0, PTL2_IN,
+ PTL1_FN, PTL1_OUT, 0, PTL1_IN,
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+ PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+ PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+ PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+ PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+ PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+ PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, 0, PTN7_IN,
+ PTN6_FN, PTN6_OUT, 0, PTN6_IN,
+ PTN5_FN, PTN5_OUT, 0, PTN5_IN,
+ PTN4_FN, PTN4_OUT, 0, PTN4_IN,
+ PTN3_FN, PTN3_OUT, 0, PTN3_IN,
+ PTN2_FN, PTN2_OUT, 0, PTN2_IN,
+ PTN1_FN, PTN1_OUT, 0, PTN1_IN,
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTQ3_FN, 0, 0, PTQ3_IN,
+ PTQ2_FN, 0, 0, PTQ2_IN,
+ PTQ1_FN, 0, 0, PTQ1_IN,
+ PTQ0_FN, 0, 0, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+ PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+ PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+ PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+ PTR3_FN, 0, 0, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ PTS7_FN, PTS7_OUT, 0, PTS7_IN,
+ PTS6_FN, PTS6_OUT, 0, PTS6_IN,
+ PTS5_FN, PTS5_OUT, 0, PTS5_IN,
+ PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+ PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+ PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+ PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT5_FN, PTT5_OUT, 0, PTT5_IN,
+ PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+ PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+ PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+ PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU5_FN, PTU5_OUT, 0, PTU5_IN,
+ PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+ PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+ PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+ PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, 0, PTV7_IN,
+ PTV6_FN, PTV6_OUT, 0, PTV6_IN,
+ PTV5_FN, PTV5_OUT, 0, PTV5_IN,
+ PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+ PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+ PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+ PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, 0, PTW7_IN,
+ PTW6_FN, PTW6_OUT, 0, PTW6_IN,
+ PTW5_FN, PTW5_OUT, 0, PTW5_IN,
+ PTW4_FN, PTW4_OUT, 0, PTW4_IN,
+ PTW3_FN, PTW3_OUT, 0, PTW3_IN,
+ PTW2_FN, PTW2_OUT, 0, PTW2_IN,
+ PTW1_FN, PTW1_OUT, 0, PTW1_IN,
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, 0, PTX7_IN,
+ PTX6_FN, PTX6_OUT, 0, PTX6_IN,
+ PTX5_FN, PTX5_OUT, 0, PTX5_IN,
+ PTX4_FN, PTX4_OUT, 0, PTX4_IN,
+ PTX3_FN, PTX3_OUT, 0, PTX3_IN,
+ PTX2_FN, PTX2_OUT, 0, PTX2_IN,
+ PTX1_FN, PTX1_OUT, 0, PTX1_IN,
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, 0, PTY7_IN,
+ PTY6_FN, PTY6_OUT, 0, PTY6_IN,
+ PTY5_FN, PTY5_OUT, 0, PTY5_IN,
+ PTY4_FN, PTY4_OUT, 0, PTY4_IN,
+ PTY3_FN, PTY3_OUT, 0, PTY3_IN,
+ PTY2_FN, PTY2_OUT, 0, PTY2_IN,
+ PTY1_FN, PTY1_OUT, 0, PTY1_IN,
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
+ 0, 0, 0, 0,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2, 0, 0,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ 0, 0, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, 0, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, 0, 0, 0,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7723_pinmux_info = {
+ .name = "sh7723_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_IDEA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7723_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c
new file mode 100644
index 00000000..1af0f958
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c
@@ -0,0 +1,2230 @@
+/*
+ * SH7724 Pinmux
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7724.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ7_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU,
+ PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU,
+ PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
+ PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTQ7_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ6_FN, PTJ5_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ7_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1,
+
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1,
+
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1,
+
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1,
+
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /*PTA*/
+ D23_MARK, KEYOUT2_MARK, IDED15_MARK,
+ D22_MARK, KEYOUT1_MARK, IDED14_MARK,
+ D21_MARK, KEYOUT0_MARK, IDED13_MARK,
+ D20_MARK, KEYIN4_MARK, IDED12_MARK,
+ D19_MARK, KEYIN3_MARK, IDED11_MARK,
+ D18_MARK, KEYIN2_MARK, IDED10_MARK,
+ D17_MARK, KEYIN1_MARK, IDED9_MARK,
+ D16_MARK, KEYIN0_MARK, IDED8_MARK,
+
+ /*PTB*/
+ D31_MARK, TPUTO1_MARK, IDEA1_MARK,
+ D30_MARK, TPUTO0_MARK, IDEA0_MARK,
+ D29_MARK, IODREQ_MARK,
+ D28_MARK, IDECS0_MARK,
+ D27_MARK, IDECS1_MARK,
+ D26_MARK, KEYOUT5_IN5_MARK, IDEIORD_MARK,
+ D25_MARK, KEYOUT4_IN6_MARK, IDEIOWR_MARK,
+ D24_MARK, KEYOUT3_MARK, IDEINT_MARK,
+
+ /*PTC*/
+ LCDD7_MARK,
+ LCDD6_MARK,
+ LCDD5_MARK,
+ LCDD4_MARK,
+ LCDD3_MARK,
+ LCDD2_MARK,
+ LCDD1_MARK,
+ LCDD0_MARK,
+
+ /*PTD*/
+ LCDD15_MARK,
+ LCDD14_MARK,
+ LCDD13_MARK,
+ LCDD12_MARK,
+ LCDD11_MARK,
+ LCDD10_MARK,
+ LCDD9_MARK,
+ LCDD8_MARK,
+
+ /*PTE*/
+ FSIMCKB_MARK,
+ FSIMCKA_MARK,
+ LCDD21_MARK, SCIF2_L_TXD_MARK,
+ LCDD20_MARK, SCIF4_SCK_MARK,
+ LCDD19_MARK, SCIF4_RXD_MARK,
+ LCDD18_MARK, SCIF4_TXD_MARK,
+ LCDD17_MARK,
+ LCDD16_MARK,
+
+ /*PTF*/
+ LCDVSYN_MARK,
+ LCDDISP_MARK, LCDRS_MARK,
+ LCDHSYN_MARK, LCDCS_MARK,
+ LCDDON_MARK,
+ LCDDCK_MARK, LCDWR_MARK,
+ LCDVEPWC_MARK, SCIF0_TXD_MARK,
+ LCDD23_MARK, SCIF2_L_SCK_MARK,
+ LCDD22_MARK, SCIF2_L_RXD_MARK,
+
+ /*PTG*/
+ AUDCK_MARK,
+ AUDSYNC_MARK,
+ AUDATA3_MARK,
+ AUDATA2_MARK,
+ AUDATA1_MARK,
+ AUDATA0_MARK,
+
+ /*PTH*/
+ VIO0_VD_MARK,
+ VIO0_CLK_MARK,
+ VIO0_D7_MARK,
+ VIO0_D6_MARK,
+ VIO0_D5_MARK,
+ VIO0_D4_MARK,
+ VIO0_D3_MARK,
+ VIO0_D2_MARK,
+
+ /*PTJ*/
+ PDSTATUS_MARK,
+ STATUS2_MARK,
+ STATUS0_MARK,
+ A25_MARK, BS_MARK,
+ A24_MARK,
+ A23_MARK,
+ A22_MARK,
+
+ /*PTK*/
+ VIO1_D5_MARK, VIO0_D13_MARK, IDED5_MARK,
+ VIO1_D4_MARK, VIO0_D12_MARK, IDED4_MARK,
+ VIO1_D3_MARK, VIO0_D11_MARK, IDED3_MARK,
+ VIO1_D2_MARK, VIO0_D10_MARK, IDED2_MARK,
+ VIO1_D1_MARK, VIO0_D9_MARK, IDED1_MARK,
+ VIO1_D0_MARK, VIO0_D8_MARK, IDED0_MARK,
+ VIO0_FLD_MARK,
+ VIO0_HD_MARK,
+
+ /*PTL*/
+ DV_D5_MARK, SCIF3_V_SCK_MARK, RMII_RXD0_MARK,
+ DV_D4_MARK, SCIF3_V_RXD_MARK, RMII_RXD1_MARK,
+ DV_D3_MARK, SCIF3_V_TXD_MARK, RMII_REF_CLK_MARK,
+ DV_D2_MARK, SCIF1_SCK_MARK, RMII_TX_EN_MARK,
+ DV_D1_MARK, SCIF1_RXD_MARK, RMII_TXD0_MARK,
+ DV_D0_MARK, SCIF1_TXD_MARK, RMII_TXD1_MARK,
+ DV_D15_MARK,
+ DV_D14_MARK, MSIOF0_MCK_MARK,
+
+ /*PTM*/
+ DV_D13_MARK, MSIOF0_TSCK_MARK,
+ DV_D12_MARK, MSIOF0_RXD_MARK,
+ DV_D11_MARK, MSIOF0_TXD_MARK,
+ DV_D10_MARK, MSIOF0_TSYNC_MARK,
+ DV_D9_MARK, MSIOF0_SS1_MARK, MSIOF0_RSCK_MARK,
+ DV_D8_MARK, MSIOF0_SS2_MARK, MSIOF0_RSYNC_MARK,
+ LCDVCPWC_MARK, SCIF0_RXD_MARK,
+ LCDRD_MARK, SCIF0_SCK_MARK,
+
+ /*PTN*/
+ VIO0_D1_MARK,
+ VIO0_D0_MARK,
+ DV_CLKI_MARK,
+ DV_CLK_MARK, SCIF2_V_SCK_MARK,
+ DV_VSYNC_MARK, SCIF2_V_RXD_MARK,
+ DV_HSYNC_MARK, SCIF2_V_TXD_MARK,
+ DV_D7_MARK, SCIF3_V_CTS_MARK, RMII_RX_ER_MARK,
+ DV_D6_MARK, SCIF3_V_RTS_MARK, RMII_CRS_DV_MARK,
+
+ /*PTQ*/
+ D7_MARK,
+ D6_MARK,
+ D5_MARK,
+ D4_MARK,
+ D3_MARK,
+ D2_MARK,
+ D1_MARK,
+ D0_MARK,
+
+ /*PTR*/
+ CS6B_CE1B_MARK,
+ CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK,
+ CS5A_CE2A_MARK,
+ IOIS16_MARK, LCDLCLK_MARK,
+ WAIT_MARK,
+ WE3_ICIOWR_MARK, TPUTO3_MARK, TPUTI3_MARK,
+ WE2_ICIORD_MARK, TPUTO2_MARK, IDEA2_MARK,
+
+ /*PTS*/
+ VIO_CKO_MARK,
+ VIO1_FLD_MARK, TPUTI2_MARK, IDEIORDY_MARK,
+ VIO1_HD_MARK, SCIF5_SCK_MARK,
+ VIO1_VD_MARK, SCIF5_RXD_MARK,
+ VIO1_CLK_MARK, SCIF5_TXD_MARK,
+ VIO1_D7_MARK, VIO0_D15_MARK, IDED7_MARK,
+ VIO1_D6_MARK, VIO0_D14_MARK, IDED6_MARK,
+
+ /*PTT*/
+ D15_MARK,
+ D14_MARK,
+ D13_MARK,
+ D12_MARK,
+ D11_MARK,
+ D10_MARK,
+ D9_MARK,
+ D8_MARK,
+
+ /*PTU*/
+ DMAC_DACK0_MARK,
+ DMAC_DREQ0_MARK,
+ FSIOASD_MARK,
+ FSIIABCK_MARK,
+ FSIIALRCK_MARK,
+ FSIOABCK_MARK,
+ FSIOALRCK_MARK,
+ CLKAUDIOAO_MARK,
+
+ /*PTV*/
+ FSIIBSD_MARK, MSIOF1_SS2_MARK, MSIOF1_RSYNC_MARK,
+ FSIOBSD_MARK, MSIOF1_SS1_MARK, MSIOF1_RSCK_MARK,
+ FSIIBBCK_MARK, MSIOF1_RXD_MARK,
+ FSIIBLRCK_MARK, MSIOF1_TSYNC_MARK,
+ FSIOBBCK_MARK, MSIOF1_TSCK_MARK,
+ FSIOBLRCK_MARK, MSIOF1_TXD_MARK,
+ CLKAUDIOBO_MARK, MSIOF1_MCK_MARK,
+ FSIIASD_MARK,
+
+ /*PTW*/
+ MMC_D7_MARK, SDHI1CD_MARK, IODACK_MARK,
+ MMC_D6_MARK, SDHI1WP_MARK, IDERST_MARK,
+ MMC_D5_MARK, SDHI1D3_MARK, EXBUF_ENB_MARK,
+ MMC_D4_MARK, SDHI1D2_MARK, DIRECTION_MARK,
+ MMC_D3_MARK, SDHI1D1_MARK,
+ MMC_D2_MARK, SDHI1D0_MARK,
+ MMC_D1_MARK, SDHI1CMD_MARK,
+ MMC_D0_MARK, SDHI1CLK_MARK,
+
+ /*PTX*/
+ DMAC_DACK1_MARK, IRDA_OUT_MARK,
+ DMAC_DREQ1_MARK, IRDA_IN_MARK,
+ TSIF_TS0_SDAT_MARK, LNKSTA_MARK,
+ TSIF_TS0_SCK_MARK, MDIO_MARK,
+ TSIF_TS0_SDEN_MARK, MDC_MARK,
+ TSIF_TS0_SPSYNC_MARK,
+ MMC_CLK_MARK,
+ MMC_CMD_MARK,
+
+ /*PTY*/
+ SDHI0CD_MARK,
+ SDHI0WP_MARK,
+ SDHI0D3_MARK,
+ SDHI0D2_MARK,
+ SDHI0D1_MARK,
+ SDHI0D0_MARK,
+ SDHI0CMD_MARK,
+ SDHI0CLK_MARK,
+
+ /*PTZ*/
+ INTC_IRQ7_MARK, SCIF3_I_CTS_MARK,
+ INTC_IRQ6_MARK, SCIF3_I_RTS_MARK,
+ INTC_IRQ5_MARK, SCIF3_I_SCK_MARK,
+ INTC_IRQ4_MARK, SCIF3_I_RXD_MARK,
+ INTC_IRQ3_MARK, SCIF3_I_TXD_MARK,
+ INTC_IRQ2_MARK,
+ INTC_IRQ1_MARK,
+ INTC_IRQ0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU),
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_0, PSA14_0, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_0, PSA14_0, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_0, PSA14_0, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_0, PSA14_0, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_0, PSA14_0, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_0, PSA14_0, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_0, PSA14_0, PTA0_FN),
+
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_0, PSA14_1, PTA7_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_0, PSA14_1, PTA6_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_0, PSA14_1, PTA5_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_0, PSA14_1, PTA4_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_0, PSA14_1, PTA3_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_0, PSA14_1, PTA2_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_0, PSA14_1, PTA1_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_0, PSA14_1, PTA0_FN),
+
+ PINMUX_DATA(IDED15_MARK, PSA15_1, PSA14_0, PTA7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA15_1, PSA14_0, PTA6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA15_1, PSA14_0, PTA5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA15_1, PSA14_0, PTA4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA15_1, PSA14_0, PTA3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA15_1, PSA14_0, PTA2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA15_1, PSA14_0, PTA1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA15_1, PSA14_0, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PSE15_0, PSE14_0, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PSE15_0, PSE14_0, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PSE11_0, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PSE11_0, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PSE11_0, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_0, PSA14_0, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_0, PSA14_0, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_0, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(IDEA1_MARK, PSE15_1, PSE14_0, PTB7_FN),
+ PINMUX_DATA(IDEA0_MARK, PSE15_1, PSE14_0, PTB6_FN),
+ PINMUX_DATA(IODREQ_MARK, PSE11_1, PTB5_FN),
+ PINMUX_DATA(IDECS0_MARK, PSE11_1, PTB4_FN),
+ PINMUX_DATA(IDECS1_MARK, PSE11_1, PTB3_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA15_1, PSA14_0, PTB2_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA15_1, PSA14_0, PTB1_FN),
+ PINMUX_DATA(IDEINT_MARK, PSA15_1, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(TPUTO1_MARK, PSE15_0, PSE14_1, PTB7_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSE15_0, PSE14_1, PTB6_FN),
+
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_0, PSA14_1, PTB2_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_0, PSA14_1, PTB1_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_0, PSA14_1, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCDD7_MARK, PSD5_0, PTC7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSD5_0, PTC6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSD5_0, PTC5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSD5_0, PTC4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSD5_0, PTC3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSD5_0, PTC2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSD5_0, PTC1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSD5_0, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCDD15_MARK, PSD5_0, PTD7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSD5_0, PTD6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSD5_0, PTD5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSD5_0, PTD4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSD5_0, PTD3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSD5_0, PTD2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSD5_0, PTD1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSD5_0, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(FSIMCKB_MARK, PTE7_FN),
+ PINMUX_DATA(FSIMCKA_MARK, PTE6_FN),
+
+ PINMUX_DATA(LCDD21_MARK, PSC5_0, PSC4_0, PTE5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSD3_0, PSD2_0, PTE4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSA3_0, PSA2_0, PTE3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSA3_0, PSA2_0, PTE2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSD5_0, PTE1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSD5_0, PTE0_FN),
+
+ PINMUX_DATA(SCIF2_L_TXD_MARK, PSC5_0, PSC4_1, PTE5_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, PSD3_0, PSD2_1, PTE4_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, PSA3_0, PSA2_1, PTE3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, PSA3_0, PSA2_1, PTE2_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(LCDVSYN_MARK, PSD8_0, PTF7_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSD10_0, PSD9_0, PTF6_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSD10_0, PSD9_0, PTF5_FN),
+ PINMUX_DATA(LCDDON_MARK, PSD8_0, PTF4_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSD10_0, PSD9_0, PTF3_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PSA6_0, PTF2_FN),
+ PINMUX_DATA(LCDD23_MARK, PSC7_0, PSC6_0, PTF1_FN),
+ PINMUX_DATA(LCDD22_MARK, PSC5_0, PSC4_0, PTF0_FN),
+
+ PINMUX_DATA(LCDRS_MARK, PSD10_0, PSD9_1, PTF6_FN),
+ PINMUX_DATA(LCDCS_MARK, PSD10_0, PSD9_1, PTF5_FN),
+ PINMUX_DATA(LCDWR_MARK, PSD10_0, PSD9_1, PTF3_FN),
+
+ PINMUX_DATA(SCIF0_TXD_MARK, PSA6_1, PTF2_FN),
+ PINMUX_DATA(SCIF2_L_SCK_MARK, PSC7_0, PSC6_1, PTF1_FN),
+ PINMUX_DATA(SCIF2_L_RXD_MARK, PSC5_0, PSC4_1, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(VIO0_VD_MARK, PTH7_FN),
+ PINMUX_DATA(VIO0_CLK_MARK, PTH6_FN),
+ PINMUX_DATA(VIO0_D7_MARK, PTH5_FN),
+ PINMUX_DATA(VIO0_D6_MARK, PTH4_FN),
+ PINMUX_DATA(VIO0_D5_MARK, PTH3_FN),
+ PINMUX_DATA(VIO0_D4_MARK, PTH2_FN),
+ PINMUX_DATA(VIO0_D3_MARK, PTH1_FN),
+ PINMUX_DATA(VIO0_D2_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(PDSTATUS_MARK, PTJ7_FN),
+ PINMUX_DATA(STATUS2_MARK, PTJ6_FN),
+ PINMUX_DATA(STATUS0_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PSA8_0, PTJ3_FN),
+ PINMUX_DATA(BS_MARK, PSA8_1, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(VIO1_D5_MARK, PSB7_0, PSB6_0, PTK7_FN),
+ PINMUX_DATA(VIO1_D4_MARK, PSB7_0, PSB6_0, PTK6_FN),
+ PINMUX_DATA(VIO1_D3_MARK, PSB7_0, PSB6_0, PTK5_FN),
+ PINMUX_DATA(VIO1_D2_MARK, PSB7_0, PSB6_0, PTK4_FN),
+ PINMUX_DATA(VIO1_D1_MARK, PSB7_0, PSB6_0, PTK3_FN),
+ PINMUX_DATA(VIO1_D0_MARK, PSB7_0, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_D13_MARK, PSB7_0, PSB6_1, PTK7_FN),
+ PINMUX_DATA(VIO0_D12_MARK, PSB7_0, PSB6_1, PTK6_FN),
+ PINMUX_DATA(VIO0_D11_MARK, PSB7_0, PSB6_1, PTK5_FN),
+ PINMUX_DATA(VIO0_D10_MARK, PSB7_0, PSB6_1, PTK4_FN),
+ PINMUX_DATA(VIO0_D9_MARK, PSB7_0, PSB6_1, PTK3_FN),
+ PINMUX_DATA(VIO0_D8_MARK, PSB7_0, PSB6_1, PTK2_FN),
+
+ PINMUX_DATA(IDED5_MARK, PSB7_1, PSB6_0, PTK7_FN),
+ PINMUX_DATA(IDED4_MARK, PSB7_1, PSB6_0, PTK6_FN),
+ PINMUX_DATA(IDED3_MARK, PSB7_1, PSB6_0, PTK5_FN),
+ PINMUX_DATA(IDED2_MARK, PSB7_1, PSB6_0, PTK4_FN),
+ PINMUX_DATA(IDED1_MARK, PSB7_1, PSB6_0, PTK3_FN),
+ PINMUX_DATA(IDED0_MARK, PSB7_1, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_FLD_MARK, PTK1_FN),
+ PINMUX_DATA(VIO0_HD_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(DV_D5_MARK, PSB9_0, PSB8_0, PTL7_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB9_0, PSB8_0, PTL6_FN),
+ PINMUX_DATA(DV_D3_MARK, PSE7_0, PSE6_0, PTL5_FN),
+ PINMUX_DATA(DV_D2_MARK, PSC9_0, PSC8_0, PTL4_FN),
+ PINMUX_DATA(DV_D1_MARK, PSC9_0, PSC8_0, PTL3_FN),
+ PINMUX_DATA(DV_D0_MARK, PSC9_0, PSC8_0, PTL2_FN),
+ PINMUX_DATA(DV_D15_MARK, PSD4_0, PTL1_FN),
+ PINMUX_DATA(DV_D14_MARK, PSE5_0, PSE4_0, PTL0_FN),
+
+ PINMUX_DATA(SCIF3_V_SCK_MARK, PSB9_0, PSB8_1, PTL7_FN),
+ PINMUX_DATA(SCIF3_V_RXD_MARK, PSB9_0, PSB8_1, PTL6_FN),
+ PINMUX_DATA(SCIF3_V_TXD_MARK, PSE7_0, PSE6_1, PTL5_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSC9_0, PSC8_1, PTL4_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSC9_0, PSC8_1, PTL3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSC9_0, PSC8_1, PTL2_FN),
+
+ PINMUX_DATA(RMII_RXD0_MARK, PSB9_1, PSB8_0, PTL7_FN),
+ PINMUX_DATA(RMII_RXD1_MARK, PSB9_1, PSB8_0, PTL6_FN),
+ PINMUX_DATA(RMII_REF_CLK_MARK, PSE7_1, PSE6_0, PTL5_FN),
+ PINMUX_DATA(RMII_TX_EN_MARK, PSC9_1, PSC8_0, PTL4_FN),
+ PINMUX_DATA(RMII_TXD0_MARK, PSC9_1, PSC8_0, PTL3_FN),
+ PINMUX_DATA(RMII_TXD1_MARK, PSC9_1, PSC8_0, PTL2_FN),
+
+ PINMUX_DATA(MSIOF0_MCK_MARK, PSE5_0, PSE4_1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DV_D13_MARK, PSC13_0, PSC12_0, PTM7_FN),
+ PINMUX_DATA(DV_D12_MARK, PSC13_0, PSC12_0, PTM6_FN),
+ PINMUX_DATA(DV_D11_MARK, PSC13_0, PSC12_0, PTM5_FN),
+ PINMUX_DATA(DV_D10_MARK, PSC13_0, PSC12_0, PTM4_FN),
+ PINMUX_DATA(DV_D9_MARK, PSC11_0, PSC10_0, PTM3_FN),
+ PINMUX_DATA(DV_D8_MARK, PSC11_0, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PSC13_0, PSC12_1, PTM7_FN),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PSC13_0, PSC12_1, PTM6_FN),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PSC13_0, PSC12_1, PTM5_FN),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PSC13_0, PSC12_1, PTM4_FN),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PSC11_0, PSC10_1, PTM3_FN),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PSC11_1, PSC10_0, PTM3_FN),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PSC11_0, PSC10_1, PTM2_FN),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PSC11_1, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(LCDVCPWC_MARK, PSA6_0, PTM1_FN),
+ PINMUX_DATA(LCDRD_MARK, PSA7_0, PTM0_FN),
+
+ PINMUX_DATA(SCIF0_RXD_MARK, PSA6_1, PTM1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSA7_1, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VIO0_D1_MARK, PTN7_FN),
+ PINMUX_DATA(VIO0_D0_MARK, PTN6_FN),
+
+ PINMUX_DATA(DV_CLKI_MARK, PSD11_0, PTN5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSD13_0, PSD12_0, PTN4_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD15_0, PSD14_0, PTN3_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_0, PSB4_0, PTN2_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB3_0, PSB2_0, PTN1_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB1_0, PSB0_0, PTN0_FN),
+
+ PINMUX_DATA(SCIF2_V_SCK_MARK, PSD13_0, PSD12_1, PTN4_FN),
+ PINMUX_DATA(SCIF2_V_RXD_MARK, PSD15_0, PSD14_1, PTN3_FN),
+ PINMUX_DATA(SCIF2_V_TXD_MARK, PSB5_0, PSB4_1, PTN2_FN),
+ PINMUX_DATA(SCIF3_V_CTS_MARK, PSB3_0, PSB2_1, PTN1_FN),
+ PINMUX_DATA(SCIF3_V_RTS_MARK, PSB1_0, PSB0_1, PTN0_FN),
+
+ PINMUX_DATA(RMII_RX_ER_MARK, PSB3_1, PSB2_0, PTN1_FN),
+ PINMUX_DATA(RMII_CRS_DV_MARK, PSB1_1, PSB0_0, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(D7_MARK, PTQ7_FN),
+ PINMUX_DATA(D6_MARK, PTQ6_FN),
+ PINMUX_DATA(D5_MARK, PTQ5_FN),
+ PINMUX_DATA(D4_MARK, PTQ4_FN),
+ PINMUX_DATA(D3_MARK, PTQ3_FN),
+ PINMUX_DATA(D2_MARK, PTQ2_FN),
+ PINMUX_DATA(D1_MARK, PTQ1_FN),
+ PINMUX_DATA(D0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA5_0, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PSA1_0, PSA0_0, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PSD1_0, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(LCDLCLK_MARK, PSA5_1, PTR3_FN),
+
+ PINMUX_DATA(IDEA2_MARK, PSD1_1, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(TPUTO3_MARK, PSA1_0, PSA0_1, PTR1_FN),
+ PINMUX_DATA(TPUTI3_MARK, PSA1_1, PSA0_0, PTR1_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSD1_0, PSD0_1, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(VIO_CKO_MARK, PTS6_FN),
+
+ PINMUX_DATA(TPUTI2_MARK, PSE9_0, PSE8_1, PTS5_FN),
+
+ PINMUX_DATA(IDEIORDY_MARK, PSE9_1, PSE8_0, PTS5_FN),
+
+ PINMUX_DATA(VIO1_FLD_MARK, PSE9_0, PSE8_0, PTS5_FN),
+ PINMUX_DATA(VIO1_HD_MARK, PSA10_0, PTS4_FN),
+ PINMUX_DATA(VIO1_VD_MARK, PSA9_0, PTS3_FN),
+ PINMUX_DATA(VIO1_CLK_MARK, PSA9_0, PTS2_FN),
+ PINMUX_DATA(VIO1_D7_MARK, PSB7_0, PSB6_0, PTS1_FN),
+ PINMUX_DATA(VIO1_D6_MARK, PSB7_0, PSB6_0, PTS0_FN),
+
+ PINMUX_DATA(SCIF5_SCK_MARK, PSA10_1, PTS4_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, PSA9_1, PTS3_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, PSA9_1, PTS2_FN),
+
+ PINMUX_DATA(VIO0_D15_MARK, PSB7_0, PSB6_1, PTS1_FN),
+ PINMUX_DATA(VIO0_D14_MARK, PSB7_0, PSB6_1, PTS0_FN),
+
+ PINMUX_DATA(IDED7_MARK, PSB7_1, PSB6_0, PTS1_FN),
+ PINMUX_DATA(IDED6_MARK, PSB7_1, PSB6_0, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(D15_MARK, PTT7_FN),
+ PINMUX_DATA(D14_MARK, PTT6_FN),
+ PINMUX_DATA(D13_MARK, PTT5_FN),
+ PINMUX_DATA(D12_MARK, PTT4_FN),
+ PINMUX_DATA(D11_MARK, PTT3_FN),
+ PINMUX_DATA(D10_MARK, PTT2_FN),
+ PINMUX_DATA(D9_MARK, PTT1_FN),
+ PINMUX_DATA(D8_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(DMAC_DACK0_MARK, PTU7_FN),
+ PINMUX_DATA(DMAC_DREQ0_MARK, PTU6_FN),
+
+ PINMUX_DATA(FSIOASD_MARK, PSE1_0, PTU5_FN),
+ PINMUX_DATA(FSIIABCK_MARK, PSE1_0, PTU4_FN),
+ PINMUX_DATA(FSIIALRCK_MARK, PSE1_0, PTU3_FN),
+ PINMUX_DATA(FSIOABCK_MARK, PSE1_0, PTU2_FN),
+ PINMUX_DATA(FSIOALRCK_MARK, PSE1_0, PTU1_FN),
+ PINMUX_DATA(CLKAUDIOAO_MARK, PSE0_0, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(FSIIBSD_MARK, PSD7_0, PSD6_0, PTV7_FN),
+ PINMUX_DATA(FSIOBSD_MARK, PSD7_0, PSD6_0, PTV6_FN),
+ PINMUX_DATA(FSIIBBCK_MARK, PSC15_0, PSC14_0, PTV5_FN),
+ PINMUX_DATA(FSIIBLRCK_MARK, PSC15_0, PSC14_0, PTV4_FN),
+ PINMUX_DATA(FSIOBBCK_MARK, PSC15_0, PSC14_0, PTV3_FN),
+ PINMUX_DATA(FSIOBLRCK_MARK, PSC15_0, PSC14_0, PTV2_FN),
+ PINMUX_DATA(CLKAUDIOBO_MARK, PSE3_0, PSE2_0, PTV1_FN),
+ PINMUX_DATA(FSIIASD_MARK, PSE10_0, PTV0_FN),
+
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSD7_0, PSD6_1, PTV7_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSD7_1, PSD6_0, PTV7_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSD7_0, PSD6_1, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSD7_1, PSD6_0, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSC15_0, PSC14_1, PTV5_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSC15_0, PSC14_1, PTV4_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSC15_0, PSC14_1, PTV3_FN),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PSC15_0, PSC14_1, PTV2_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSE3_0, PSE2_1, PTV1_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(MMC_D7_MARK, PSE13_0, PSE12_0, PTW7_FN),
+ PINMUX_DATA(MMC_D6_MARK, PSE13_0, PSE12_0, PTW6_FN),
+ PINMUX_DATA(MMC_D5_MARK, PSE13_0, PSE12_0, PTW5_FN),
+ PINMUX_DATA(MMC_D4_MARK, PSE13_0, PSE12_0, PTW4_FN),
+ PINMUX_DATA(MMC_D3_MARK, PSA13_0, PTW3_FN),
+ PINMUX_DATA(MMC_D2_MARK, PSA13_0, PTW2_FN),
+ PINMUX_DATA(MMC_D1_MARK, PSA13_0, PTW1_FN),
+ PINMUX_DATA(MMC_D0_MARK, PSA13_0, PTW0_FN),
+
+ PINMUX_DATA(SDHI1CD_MARK, PSE13_0, PSE12_1, PTW7_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSE13_0, PSE12_1, PTW6_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSE13_0, PSE12_1, PTW5_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSE13_0, PSE12_1, PTW4_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA13_1, PTW3_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA13_1, PTW2_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA13_1, PTW1_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA13_1, PTW0_FN),
+
+ PINMUX_DATA(IODACK_MARK, PSE13_1, PSE12_0, PTW7_FN),
+ PINMUX_DATA(IDERST_MARK, PSE13_1, PSE12_0, PTW6_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSE13_1, PSE12_0, PTW5_FN),
+ PINMUX_DATA(DIRECTION_MARK, PSE13_1, PSE12_0, PTW4_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DMAC_DACK1_MARK, PSA12_0, PTX7_FN),
+ PINMUX_DATA(DMAC_DREQ1_MARK, PSA12_0, PTX6_FN),
+
+ PINMUX_DATA(IRDA_OUT_MARK, PSA12_1, PTX7_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSA12_1, PTX6_FN),
+
+ PINMUX_DATA(TSIF_TS0_SDAT_MARK, PSC0_0, PTX5_FN),
+ PINMUX_DATA(TSIF_TS0_SCK_MARK, PSC1_0, PTX4_FN),
+ PINMUX_DATA(TSIF_TS0_SDEN_MARK, PSC2_0, PTX3_FN),
+ PINMUX_DATA(TSIF_TS0_SPSYNC_MARK, PTX2_FN),
+
+ PINMUX_DATA(LNKSTA_MARK, PSC0_1, PTX5_FN),
+ PINMUX_DATA(MDIO_MARK, PSC1_1, PTX4_FN),
+ PINMUX_DATA(MDC_MARK, PSC2_1, PTX3_FN),
+
+ PINMUX_DATA(MMC_CLK_MARK, PTX1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(SDHI0CD_MARK, PTY7_FN),
+ PINMUX_DATA(SDHI0WP_MARK, PTY6_FN),
+ PINMUX_DATA(SDHI0D3_MARK, PTY5_FN),
+ PINMUX_DATA(SDHI0D2_MARK, PTY4_FN),
+ PINMUX_DATA(SDHI0D1_MARK, PTY3_FN),
+ PINMUX_DATA(SDHI0D0_MARK, PTY2_FN),
+ PINMUX_DATA(SDHI0CMD_MARK, PTY1_FN),
+ PINMUX_DATA(SDHI0CLK_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(INTC_IRQ7_MARK, PSB10_0, PTZ7_FN),
+ PINMUX_DATA(INTC_IRQ6_MARK, PSB11_0, PTZ6_FN),
+ PINMUX_DATA(INTC_IRQ5_MARK, PSB12_0, PTZ5_FN),
+ PINMUX_DATA(INTC_IRQ4_MARK, PSB13_0, PTZ4_FN),
+ PINMUX_DATA(INTC_IRQ3_MARK, PSB14_0, PTZ3_FN),
+ PINMUX_DATA(INTC_IRQ2_MARK, PTZ2_FN),
+ PINMUX_DATA(INTC_IRQ1_MARK, PTZ1_FN),
+ PINMUX_DATA(INTC_IRQ0_MARK, PTZ0_FN),
+
+ PINMUX_DATA(SCIF3_I_CTS_MARK, PSB10_1, PTZ7_FN),
+ PINMUX_DATA(SCIF3_I_RTS_MARK, PSB11_1, PTZ6_FN),
+ PINMUX_DATA(SCIF3_I_SCK_MARK, PSB12_1, PTZ5_FN),
+ PINMUX_DATA(SCIF3_I_RXD_MARK, PSB13_1, PTZ4_FN),
+ PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA),
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+
+ /* FSI */
+ PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* VIO */
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+
+ /* VIO0 */
+ PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK),
+
+ /* VIO1 */
+ PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK),
+
+ /* Eth */
+ PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK),
+ PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK),
+
+ /* System */
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ /* MSIOF0 */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK),
+
+ /* SDHI0 */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+
+ /* IrDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK),
+ };
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN,
+ PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN,
+ PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN,
+ PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN,
+ PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN,
+ PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN,
+ PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN,
+ PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN,
+ PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN,
+ PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ PTJ6_FN, PTJ6_OUT, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN,
+ PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN,
+ PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN,
+ PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN,
+ PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN,
+ PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN,
+ PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN,
+ PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN,
+ PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN,
+ PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN,
+ PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN,
+ PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN,
+ PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN,
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, 0, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN,
+ PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN,
+ PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN,
+ PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN,
+ PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN,
+ PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN,
+ PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN,
+ PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN,
+ PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN,
+ PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN,
+ PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN,
+ PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN,
+ PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN,
+ PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN,
+ PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN,
+ PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN,
+ PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN,
+ PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN,
+ PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN,
+ PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN,
+ PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN,
+ PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN,
+ PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN,
+ PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ 0, 0,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ 0, 0,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1}
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ 0, 0,
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1}
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ 0, 0,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1}
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1}
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1}
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7724_pinmux_info = {
+ .name = "sh7724_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_INTC_IRQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7724_pinmux_info);
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
new file mode 100644
index 00000000..4c74bd04
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
@@ -0,0 +1,2287 @@
+/*
+ * SH7757 (B0 step) Pinmux
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7757.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
+ PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
+ PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
+ PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN,
+ PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
+ PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
+ PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU,
+ PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
+ PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
+ PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
+ PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT,
+ PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
+ PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
+ PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
+ PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN,
+ PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ PS1_2_FN1, PS1_2_FN2,
+
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ PS2_2_FN1, PS2_2_FN2,
+
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2,
+
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2,
+
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ PS7_4_FN1, PS7_4_FN2,
+
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* PTA (mobule: LBSC, RGMII) */
+ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
+ ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
+ IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+ ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK,
+ ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK,
+
+ /* PTC (mobule: IRQ, PWMU) */
+ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+ PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK,
+ PWMU4_MARK, PWMU5_MARK,
+
+ /* PTD (mobule: SPI0, DMAC) */
+ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK,
+ SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK,
+ DREQ0_MARK, DACK0_MARK, TEND0_MARK,
+
+ /* PTE (mobule: RMII) */
+ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK,
+ RMII0_TXD0_MARK, RMII0_TXEN_MARK,
+ RMII0_REFCLK_MARK, RMII0_RXD1_MARK,
+ RMII0_RXD0_MARK, RMII0_RX_ER_MARK,
+
+ /* PTF (mobule: RMII, SerMux) */
+ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK,
+ RMII1_TXD0_MARK, RMII1_TXEN_MARK,
+ RMII1_REFCLK_MARK, RMII1_RXD1_MARK,
+ RMII1_RXD0_MARK, RMII1_RX_ER_MARK,
+ RAC_RI_MARK,
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK,
+ SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK,
+ MMCCLK_MARK, MMCCMD_MARK,
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
+ SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK,
+ TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK,
+ ADTRG0_MARK,
+
+ /* PTI (mobule: LBSC, SDHI) */
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
+
+ /* PTJ (mobule: SCIF234) */
+ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK,
+ RTS4_MARK, RXD4_MARK, TXD4_MARK,
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
+ COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK,
+ SCK2_MARK, SCK4_MARK, SCK3_MARK,
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK,
+ RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK,
+ CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK,
+ TXD2_MARK,
+
+ /* PTM (mobule: LBSC, IIC) */
+ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK,
+ SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK,
+ JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK,
+ SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK,
+ SGPIO1_DO_MARK, SUB_CLKIN_MARK,
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK,
+ SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK,
+ SGPIO2_DI_MARK, SGPIO2_DO_MARK,
+ COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
+
+ /* PTQ (mobule: LPC) */
+ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
+ LFRAME_MARK, LRESET_MARK, LCLK_MARK,
+
+ /* PTR (mobule: GRA, IIC) */
+ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
+ SDA8_MARK, SCL8_MARK,
+
+ /* PTS (mobule: GRA, IIC) */
+ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK,
+ SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
+ SDA9_MARK, SCL9_MARK,
+
+ /* PTT (mobule: PWMX, AUD) */
+ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK,
+ PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ /* PTU (mobule: LPC, APM) */
+ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
+ LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
+ APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK,
+ APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK,
+ APMS3N_MARK,
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK,
+ R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK,
+ EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK,
+ VBIOS_CLK_MARK, VBIOS_CS_MARK,
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK,
+ EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK,
+
+ /* PTX (mobule: LBSC, SCIF, SIM) */
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+
+ /* PTY (mobule: LBSC) */
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK,
+ MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK,
+ ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK,
+ ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT),
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTI GPIO */
+ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT),
+ PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT),
+ PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT),
+ PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT),
+ PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT),
+ PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT),
+ PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT),
+ PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTO GPIO */
+ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT),
+ PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT),
+ PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT),
+ PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT),
+ PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT),
+ PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT),
+ PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT),
+ PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(BS_MARK, PTA7_FN),
+ PINMUX_DATA(RDWR_MARK, PTA6_FN),
+ PINMUX_DATA(WE1_MARK, PTA5_FN),
+ PINMUX_DATA(RDY_MARK, PTA4_FN),
+ PINMUX_DATA(ET0_MDC_MARK, PTA3_FN),
+ PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN),
+ PINMUX_DATA(ET1_MDC_MARK, PTA1_FN),
+ PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN),
+ PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN),
+ PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN),
+ PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN),
+ PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN),
+ PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN),
+ PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN),
+ PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN),
+ PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN),
+ PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN),
+ PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN),
+ PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN),
+ PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN),
+ PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN),
+ PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN),
+ PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN),
+ PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN),
+ PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN),
+ PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN),
+ PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN),
+ PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN),
+ PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN),
+ PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN),
+ PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN),
+ PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN),
+ PINMUX_DATA(IRQ1_MARK, PTC1_FN),
+ PINMUX_DATA(IRQ0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN),
+ PINMUX_DATA(SP0_MISO_MARK, PTD6_FN),
+ PINMUX_DATA(SP0_SCK_MARK, PTD5_FN),
+ PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN),
+ PINMUX_DATA(SP0_SS0_MARK, PTD3_FN),
+ PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN),
+ PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN),
+ PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN),
+ PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN),
+ PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN),
+ PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN),
+ PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN),
+ PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN),
+ PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN),
+ PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN),
+ PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN),
+ PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN),
+ PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN),
+ PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN),
+ PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN),
+ PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN),
+ PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN),
+ PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN),
+ PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN),
+ PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN),
+ PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN),
+ PINMUX_DATA(BOOTWP_MARK, PTG6_FN),
+ PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN),
+ PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN),
+ PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN),
+ PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN),
+ PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
+ PINMUX_DATA(WDTOVF_MARK, PTG2_FN),
+ PINMUX_DATA(LPCPD_MARK, PTG1_FN),
+ PINMUX_DATA(LDRQ_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN),
+ PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN),
+ PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN),
+ PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN),
+ PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN),
+ PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN),
+ PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN),
+ PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN),
+ PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
+ PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN),
+ PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN),
+ PINMUX_DATA(WP_MARK, PTH1_FN),
+ PINMUX_DATA(FMS0_MARK, PTH0_FN),
+
+ /* PTI FN */
+ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN),
+ PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN),
+ PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN),
+ PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN),
+ PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN),
+ PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN),
+ PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN),
+ PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN),
+ PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN),
+ PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN),
+ PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN),
+ PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN),
+ PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN),
+ PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN),
+ PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN),
+ PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(RTS3_MARK, PTJ6_FN),
+ PINMUX_DATA(CTS3_MARK, PTJ5_FN),
+ PINMUX_DATA(TXD3_MARK, PTJ4_FN),
+ PINMUX_DATA(RXD3_MARK, PTJ3_FN),
+ PINMUX_DATA(RTS4_MARK, PTJ2_FN),
+ PINMUX_DATA(RXD4_MARK, PTJ1_FN),
+ PINMUX_DATA(TXD4_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN),
+ PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN),
+ PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
+ PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
+ PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
+ PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
+ PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN),
+ PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN),
+ PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN),
+ PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN),
+ PINMUX_DATA(CLKOUT_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN),
+ PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN),
+ PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN),
+ PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN),
+ PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN),
+ PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN),
+ PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
+ PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN),
+ PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN),
+ PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN),
+ PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN),
+ PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(CS4_MARK, PTM7_FN),
+ PINMUX_DATA(RD_MARK, PTM6_FN),
+ PINMUX_DATA(WE0_MARK, PTM7_FN),
+ PINMUX_DATA(CS0_MARK, PTM4_FN),
+ PINMUX_DATA(SDA6_MARK, PTM3_FN),
+ PINMUX_DATA(SCL6_MARK, PTM2_FN),
+ PINMUX_DATA(SDA7_MARK, PTM1_FN),
+ PINMUX_DATA(SCL7_MARK, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN),
+ PINMUX_DATA(VBUS_OC_MARK, PTN5_FN),
+ PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN),
+ PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN),
+ PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN),
+ PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN),
+ PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN),
+ PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN),
+ PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN),
+ PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN),
+ PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN),
+ PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN),
+
+ /* PTO FN */
+ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
+ PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
+ PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
+ PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
+ PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN),
+ PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN),
+ PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN),
+ PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN),
+ PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN),
+ PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN),
+ PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN),
+ PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN),
+
+ /* PTP FN */
+
+ /* PTQ FN */
+ PINMUX_DATA(LAD3_MARK, PTQ6_FN),
+ PINMUX_DATA(LAD2_MARK, PTQ5_FN),
+ PINMUX_DATA(LAD1_MARK, PTQ4_FN),
+ PINMUX_DATA(LAD0_MARK, PTQ3_FN),
+ PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
+ PINMUX_DATA(LRESET_MARK, PTQ1_FN),
+ PINMUX_DATA(LCLK_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
+ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */
+ PINMUX_DATA(SDA2_MARK, PTR5_FN),
+ PINMUX_DATA(SCL2_MARK, PTR4_FN),
+ PINMUX_DATA(SDA1_MARK, PTR3_FN),
+ PINMUX_DATA(SCL1_MARK, PTR2_FN),
+ PINMUX_DATA(SDA0_MARK, PTR1_FN),
+ PINMUX_DATA(SCL0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */
+ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */
+ PINMUX_DATA(SDA5_MARK, PTS5_FN),
+ PINMUX_DATA(SCL5_MARK, PTS4_FN),
+ PINMUX_DATA(SDA4_MARK, PTS3_FN),
+ PINMUX_DATA(SCL4_MARK, PTS2_FN),
+ PINMUX_DATA(SDA3_MARK, PTS1_FN),
+ PINMUX_DATA(SCL3_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN),
+ PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN),
+ PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN),
+ PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN),
+ PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN),
+ PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN),
+ PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN),
+ PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN),
+ PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN),
+ PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN),
+ PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN),
+ PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN),
+ PINMUX_DATA(PWMX1_MARK, PTT1_FN),
+ PINMUX_DATA(PWMX0_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN),
+ PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN),
+ PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN),
+ PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN),
+ PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN),
+ PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN),
+ PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN),
+ PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN),
+ PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN),
+ PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN),
+ PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN),
+ PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN),
+ PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN),
+ PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN),
+ PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN),
+ PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN),
+ PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN),
+ PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN),
+ PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN),
+ PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN),
+ PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN),
+ PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN),
+ PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN),
+ PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN),
+ PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN),
+ PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN),
+ PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN),
+ PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN),
+ PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN),
+ PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN),
+ PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN),
+ PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN),
+ PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN),
+ PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN),
+ PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN),
+ PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN),
+ PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN),
+ PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN),
+ PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN),
+ PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN),
+ PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN),
+ PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN),
+ PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN),
+ PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN),
+ PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN),
+ PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN),
+ PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN),
+ PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN),
+ PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN),
+ PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN),
+ PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN),
+ PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN),
+ PINMUX_DATA(A3_MARK, PTX3_FN),
+ PINMUX_DATA(A2_MARK, PTX2_FN),
+ PINMUX_DATA(A1_MARK, PTX1_FN),
+ PINMUX_DATA(A0_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(D7_MARK, PTY7_FN),
+ PINMUX_DATA(D6_MARK, PTY6_FN),
+ PINMUX_DATA(D5_MARK, PTY5_FN),
+ PINMUX_DATA(D4_MARK, PTY4_FN),
+ PINMUX_DATA(D3_MARK, PTY3_FN),
+ PINMUX_DATA(D2_MARK, PTY2_FN),
+ PINMUX_DATA(D1_MARK, PTY1_FN),
+ PINMUX_DATA(D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN),
+ PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN),
+ PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN),
+ PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN),
+ PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN),
+ PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN),
+ PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN),
+ PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN),
+ PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN),
+ PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN),
+ PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN),
+ PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN),
+ PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN),
+ PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN),
+ PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN),
+ PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTI */
+ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
+ PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
+ PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
+ PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
+ PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
+ PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
+ PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
+ PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTO */
+ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
+ PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
+ PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
+ PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
+ PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
+ PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
+ PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
+ PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
+ PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
+ PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* PTA (mobule: LBSC, RGMII) */
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
+ PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDC_MARK),
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+
+ /* PTC (mobule: IRQ, PWMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
+
+ /* PTD (mobule: SPI0, DMAC) */
+ PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* PTE (mobule: RMII) */
+ PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
+
+ /* PTF (mobule: RMII, SerMux) */
+ PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
+ PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
+ PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+
+ /* PTI (mobule: LBSC, SDHI) */
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+
+ /* PTJ (mobule: SCIF234, SERMUX) */
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+
+ /* PTM (mobule: LBSC, IIC) */
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+
+ /* PTP (mobule: EVC, ADC) */
+
+ /* PTQ (mobule: LPC) */
+ PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
+ PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
+
+ /* PTR (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* PTS (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+
+ /* PTT (mobule: PWMX, AUD) */
+ PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* PTU (mobule: LPC, APM) */
+ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
+ PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
+
+ /* PTX (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* PTY (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
+ };
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
+ PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
+ PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
+ PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
+ PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
+ PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
+ PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
+ PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN, 0,
+ PTB6_FN, PTB6_OUT, PTB6_IN, 0,
+ PTB5_FN, PTB5_OUT, PTB5_IN, 0,
+ PTB4_FN, PTB4_OUT, PTB4_IN, 0,
+ PTB3_FN, PTB3_OUT, PTB3_IN, 0,
+ PTB2_FN, PTB2_OUT, PTB2_IN, 0,
+ PTB1_FN, PTB1_OUT, PTB1_IN, 0,
+ PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN, 0,
+ PTC6_FN, PTC6_OUT, PTC6_IN, 0,
+ PTC5_FN, PTC5_OUT, PTC5_IN, 0,
+ PTC4_FN, PTC4_OUT, PTC4_IN, 0,
+ PTC3_FN, PTC3_OUT, PTC3_IN, 0,
+ PTC2_FN, PTC2_OUT, PTC2_IN, 0,
+ PTC1_FN, PTC1_OUT, PTC1_IN, 0,
+ PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
+ PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
+ PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
+ PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
+ PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
+ PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
+ PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
+ PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
+ PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
+ PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
+ PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
+ PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
+ PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
+ PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
+ PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
+ PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
+ PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
+ PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
+ PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
+ PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
+ PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
+ PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
+ PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
+ PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
+ PTG5_FN, PTG5_OUT, PTG5_IN, 0,
+ PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
+ PTG3_FN, PTG3_OUT, PTG3_IN, 0,
+ PTG2_FN, PTG2_OUT, PTG2_IN, 0,
+ PTG1_FN, PTG1_OUT, PTG1_IN, 0,
+ PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
+ PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
+ PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
+ PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
+ PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
+ PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
+ PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
+ PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
+ PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
+ PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
+ PTI5_FN, PTI5_OUT, PTI5_IN, 0,
+ PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
+ PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
+ PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
+ PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
+ PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
+ PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
+ PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
+ PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
+ PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
+ PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
+ PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
+ PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
+ PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
+ PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
+ PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
+ PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
+ PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
+ PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
+ PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
+ PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
+ PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
+ PTM3_FN, PTM3_OUT, PTM3_IN, 0,
+ PTM2_FN, PTM2_OUT, PTM2_IN, 0,
+ PTM1_FN, PTM1_OUT, PTM1_IN, 0,
+ PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTN6_FN, PTN6_OUT, PTN6_IN, 0,
+ PTN5_FN, PTN5_OUT, PTN5_IN, 0,
+ PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
+ PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
+ PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
+ PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
+ PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
+ PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
+ PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
+ PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
+ PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
+ PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
+ PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
+ PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
+ PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
+ },
+#if 0 /* FIXME: Remove it? */
+ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTP6_FN, PTP6_OUT, PTP6_IN, 0,
+ PTP5_FN, PTP5_OUT, PTP5_IN, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN, 0,
+ PTP3_FN, PTP3_OUT, PTP3_IN, 0,
+ PTP2_FN, PTP2_OUT, PTP2_IN, 0,
+ PTP1_FN, PTP1_OUT, PTP1_IN, 0,
+ PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
+ },
+#endif
+ { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN, 0,
+ PTR6_FN, PTR6_OUT, PTR6_IN, 0,
+ PTR5_FN, PTR5_OUT, PTR5_IN, 0,
+ PTR4_FN, PTR4_OUT, PTR4_IN, 0,
+ PTR3_FN, PTR3_OUT, PTR3_IN, 0,
+ PTR2_FN, PTR2_OUT, PTR2_IN, 0,
+ PTR1_FN, PTR1_OUT, PTR1_IN, 0,
+ PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
+ PTS7_FN, PTS7_OUT, PTS7_IN, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN, 0,
+ PTS5_FN, PTS5_OUT, PTS5_IN, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN, 0,
+ PTS3_FN, PTS3_OUT, PTS3_IN, 0,
+ PTS2_FN, PTS2_OUT, PTS2_IN, 0,
+ PTS1_FN, PTS1_OUT, PTS1_IN, 0,
+ PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
+ PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
+ PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
+ PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
+ PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
+ PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
+ PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
+ PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
+ PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
+ PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
+ PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
+ PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
+ PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
+ PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
+ PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
+ PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
+ PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
+ PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
+ PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
+ PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
+ PTV1_FN, PTV1_OUT, PTV1_IN, 0,
+ PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN, 0,
+ PTW6_FN, PTW6_OUT, PTW6_IN, 0,
+ PTW5_FN, PTW5_OUT, PTW5_IN, 0,
+ PTW4_FN, PTW4_OUT, PTW4_IN, 0,
+ PTW3_FN, PTW3_OUT, PTW3_IN, 0,
+ PTW2_FN, PTW2_OUT, PTW2_IN, 0,
+ PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
+ PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
+ PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
+ PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
+ PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
+ PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
+ PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
+ PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
+ PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
+ PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
+ PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
+ PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
+ PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
+ PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
+ PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
+ PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
+ },
+
+ { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_2_FN1, PS1_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ 0, 0,
+ PS2_2_FN1, PS2_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+ 0, 0, }
+ },
+
+ { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
+ 0, 0,
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ 0, 0,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
+ 0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
+ 0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7757_pinmux_info = {
+ .name = "sh7757_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA0,
+ .last_gpio = GPIO_FN_ON_DQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7757_pinmux_info);
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
new file mode 100644
index 00000000..5ebc25fd
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
@@ -0,0 +1,1310 @@
+/*
+ * SH7785 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7785.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA,
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA,
+ PM1_DATA, PM0_DATA,
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA,
+ PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA,
+ PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ PK7_IN, PK6_IN, PK5_IN, PK4_IN,
+ PK3_IN, PK2_IN, PK1_IN, PK0_IN,
+ PL7_IN, PL6_IN, PL5_IN, PL4_IN,
+ PL3_IN, PL2_IN, PL1_IN, PL0_IN,
+ PM1_IN, PM0_IN,
+ PN7_IN, PN6_IN, PN5_IN, PN4_IN,
+ PN3_IN, PN2_IN, PN1_IN, PN0_IN,
+ PP5_IN, PP4_IN, PP3_IN, PP2_IN, PP1_IN, PP0_IN,
+ PQ4_IN, PQ3_IN, PQ2_IN, PQ1_IN, PQ0_IN,
+ PR3_IN, PR2_IN, PR1_IN, PR0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU,
+ PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU,
+ PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU,
+ PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU,
+ PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU,
+ PM1_IN_PU, PM0_IN_PU,
+ PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU,
+ PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU,
+ PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU,
+ PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU,
+ PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT,
+ PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT,
+ PL7_OUT, PL6_OUT, PL5_OUT, PL4_OUT,
+ PL3_OUT, PL2_OUT, PL1_OUT, PL0_OUT,
+ PM1_OUT, PM0_OUT,
+ PN7_OUT, PN6_OUT, PN5_OUT, PN4_OUT,
+ PN3_OUT, PN2_OUT, PN1_OUT, PN0_OUT,
+ PP5_OUT, PP4_OUT, PP3_OUT, PP2_OUT, PP1_OUT, PP0_OUT,
+ PQ4_OUT, PQ3_OUT, PQ2_OUT, PQ1_OUT, PQ0_OUT,
+ PR3_OUT, PR2_OUT, PR1_OUT, PR0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN, PJ0_FN,
+ PK7_FN, PK6_FN, PK5_FN, PK4_FN,
+ PK3_FN, PK2_FN, PK1_FN, PK0_FN,
+ PL7_FN, PL6_FN, PL5_FN, PL4_FN,
+ PL3_FN, PL2_FN, PL1_FN, PL0_FN,
+ PM1_FN, PM0_FN,
+ PN7_FN, PN6_FN, PN5_FN, PN4_FN,
+ PN3_FN, PN2_FN, PN1_FN, PN0_FN,
+ PP5_FN, PP4_FN, PP3_FN, PP2_FN, PP1_FN, PP0_FN,
+ PQ4_FN, PQ3_FN, PQ2_FN, PQ1_FN, PQ0_FN,
+ PR3_FN, PR2_FN, PR1_FN, PR0_FN,
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D63_AD31_MARK,
+ D62_AD30_MARK,
+ D61_AD29_MARK,
+ D60_AD28_MARK,
+ D59_AD27_MARK,
+ D58_AD26_MARK,
+ D57_AD25_MARK,
+ D56_AD24_MARK,
+ D55_AD23_MARK,
+ D54_AD22_MARK,
+ D53_AD21_MARK,
+ D52_AD20_MARK,
+ D51_AD19_MARK,
+ D50_AD18_MARK,
+ D49_AD17_DB5_MARK,
+ D48_AD16_DB4_MARK,
+ D47_AD15_DB3_MARK,
+ D46_AD14_DB2_MARK,
+ D45_AD13_DB1_MARK,
+ D44_AD12_DB0_MARK,
+ D43_AD11_DG5_MARK,
+ D42_AD10_DG4_MARK,
+ D41_AD9_DG3_MARK,
+ D40_AD8_DG2_MARK,
+ D39_AD7_DG1_MARK,
+ D38_AD6_DG0_MARK,
+ D37_AD5_DR5_MARK,
+ D36_AD4_DR4_MARK,
+ D35_AD3_DR3_MARK,
+ D34_AD2_DR2_MARK,
+ D33_AD1_DR1_MARK,
+ D32_AD0_DR0_MARK,
+ REQ1_MARK,
+ REQ2_MARK,
+ REQ3_MARK,
+ GNT1_MARK,
+ GNT2_MARK,
+ GNT3_MARK,
+ MMCCLK_MARK,
+ D31_MARK,
+ D30_MARK,
+ D29_MARK,
+ D28_MARK,
+ D27_MARK,
+ D26_MARK,
+ D25_MARK,
+ D24_MARK,
+ D23_MARK,
+ D22_MARK,
+ D21_MARK,
+ D20_MARK,
+ D19_MARK,
+ D18_MARK,
+ D17_MARK,
+ D16_MARK,
+ SCIF1_SCK_MARK,
+ SCIF1_RXD_MARK,
+ SCIF1_TXD_MARK,
+ SCIF0_CTS_MARK,
+ INTD_MARK,
+ FCE_MARK,
+ SCIF0_RTS_MARK,
+ HSPI_CS_MARK,
+ FSE_MARK,
+ SCIF0_SCK_MARK,
+ HSPI_CLK_MARK,
+ FRE_MARK,
+ SCIF0_RXD_MARK,
+ HSPI_RX_MARK,
+ FRB_MARK,
+ SCIF0_TXD_MARK,
+ HSPI_TX_MARK,
+ FWE_MARK,
+ SCIF5_TXD_MARK,
+ HAC1_SYNC_MARK,
+ SSI1_WS_MARK,
+ SIOF_TXD_PJ_MARK,
+ HAC0_SDOUT_MARK,
+ SSI0_SDATA_MARK,
+ SIOF_RXD_PJ_MARK,
+ HAC0_SDIN_MARK,
+ SSI0_SCK_MARK,
+ SIOF_SYNC_PJ_MARK,
+ HAC0_SYNC_MARK,
+ SSI0_WS_MARK,
+ SIOF_MCLK_PJ_MARK,
+ HAC_RES_MARK,
+ SIOF_SCK_PJ_MARK,
+ HAC0_BITCLK_MARK,
+ SSI0_CLK_MARK,
+ HAC1_BITCLK_MARK,
+ SSI1_CLK_MARK,
+ TCLK_MARK,
+ IOIS16_MARK,
+ STATUS0_MARK,
+ DRAK0_PK3_MARK,
+ STATUS1_MARK,
+ DRAK1_PK2_MARK,
+ DACK2_MARK,
+ SCIF2_TXD_MARK,
+ MMCCMD_MARK,
+ SIOF_TXD_PK_MARK,
+ DACK3_MARK,
+ SCIF2_SCK_MARK,
+ MMCDAT_MARK,
+ SIOF_SCK_PK_MARK,
+ DREQ0_MARK,
+ DREQ1_MARK,
+ DRAK0_PK1_MARK,
+ DRAK1_PK0_MARK,
+ DREQ2_MARK,
+ INTB_MARK,
+ DREQ3_MARK,
+ INTC_MARK,
+ DRAK2_MARK,
+ CE2A_MARK,
+ IRL4_MARK,
+ FD4_MARK,
+ IRL5_MARK,
+ FD5_MARK,
+ IRL6_MARK,
+ FD6_MARK,
+ IRL7_MARK,
+ FD7_MARK,
+ DRAK3_MARK,
+ CE2B_MARK,
+ BREQ_BSACK_MARK,
+ BACK_BSREQ_MARK,
+ SCIF5_RXD_MARK,
+ HAC1_SDIN_MARK,
+ SSI1_SCK_MARK,
+ SCIF5_SCK_MARK,
+ HAC1_SDOUT_MARK,
+ SSI1_SDATA_MARK,
+ SCIF3_TXD_MARK,
+ FCLE_MARK,
+ SCIF3_RXD_MARK,
+ FALE_MARK,
+ SCIF3_SCK_MARK,
+ FD0_MARK,
+ SCIF4_TXD_MARK,
+ FD1_MARK,
+ SCIF4_RXD_MARK,
+ FD2_MARK,
+ SCIF4_SCK_MARK,
+ FD3_MARK,
+ DEVSEL_DCLKOUT_MARK,
+ STOP_CDE_MARK,
+ LOCK_ODDF_MARK,
+ TRDY_DISPL_MARK,
+ IRDY_HSYNC_MARK,
+ PCIFRAME_VSYNC_MARK,
+ INTA_MARK,
+ GNT0_GNTIN_MARK,
+ REQ0_REQOUT_MARK,
+ PERR_MARK,
+ SERR_MARK,
+ WE7_CBE3_MARK,
+ WE6_CBE2_MARK,
+ WE5_CBE1_MARK,
+ WE4_CBE0_MARK,
+ SCIF2_RXD_MARK,
+ SIOF_RXD_MARK,
+ MRESETOUT_MARK,
+ IRQOUT_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+ PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU),
+
+ /* PK GPIO */
+ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU),
+ PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU),
+ PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU),
+ PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU),
+ PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU),
+ PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU),
+ PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU),
+ PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU),
+
+ /* PL GPIO */
+ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU),
+ PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU),
+ PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU),
+ PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU),
+ PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU),
+ PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU),
+ PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU),
+ PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU),
+
+ /* PM GPIO */
+ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU),
+ PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU),
+
+ /* PN GPIO */
+ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU),
+ PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU),
+ PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU),
+ PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU),
+ PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU),
+ PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU),
+ PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU),
+ PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU),
+
+ /* PP GPIO */
+ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU),
+ PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU),
+ PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU),
+ PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU),
+ PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU),
+ PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU),
+
+ /* PQ GPIO */
+ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU),
+ PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU),
+ PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU),
+ PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU),
+ PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU),
+
+ /* PR GPIO */
+ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU),
+ PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU),
+ PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU),
+ PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D63_AD31_MARK, PA7_FN),
+ PINMUX_DATA(D62_AD30_MARK, PA6_FN),
+ PINMUX_DATA(D61_AD29_MARK, PA5_FN),
+ PINMUX_DATA(D60_AD28_MARK, PA4_FN),
+ PINMUX_DATA(D59_AD27_MARK, PA3_FN),
+ PINMUX_DATA(D58_AD26_MARK, PA2_FN),
+ PINMUX_DATA(D57_AD25_MARK, PA1_FN),
+ PINMUX_DATA(D56_AD24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D55_AD23_MARK, PB7_FN),
+ PINMUX_DATA(D54_AD22_MARK, PB6_FN),
+ PINMUX_DATA(D53_AD21_MARK, PB5_FN),
+ PINMUX_DATA(D52_AD20_MARK, PB4_FN),
+ PINMUX_DATA(D51_AD19_MARK, PB3_FN),
+ PINMUX_DATA(D50_AD18_MARK, PB2_FN),
+ PINMUX_DATA(D49_AD17_DB5_MARK, PB1_FN),
+ PINMUX_DATA(D48_AD16_DB4_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(D47_AD15_DB3_MARK, PC7_FN),
+ PINMUX_DATA(D46_AD14_DB2_MARK, PC6_FN),
+ PINMUX_DATA(D45_AD13_DB1_MARK, PC5_FN),
+ PINMUX_DATA(D44_AD12_DB0_MARK, PC4_FN),
+ PINMUX_DATA(D43_AD11_DG5_MARK, PC3_FN),
+ PINMUX_DATA(D42_AD10_DG4_MARK, PC2_FN),
+ PINMUX_DATA(D41_AD9_DG3_MARK, PC1_FN),
+ PINMUX_DATA(D40_AD8_DG2_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(D39_AD7_DG1_MARK, PD7_FN),
+ PINMUX_DATA(D38_AD6_DG0_MARK, PD6_FN),
+ PINMUX_DATA(D37_AD5_DR5_MARK, PD5_FN),
+ PINMUX_DATA(D36_AD4_DR4_MARK, PD4_FN),
+ PINMUX_DATA(D35_AD3_DR3_MARK, PD3_FN),
+ PINMUX_DATA(D34_AD2_DR2_MARK, PD2_FN),
+ PINMUX_DATA(D33_AD1_DR1_MARK, PD1_FN),
+ PINMUX_DATA(D32_AD0_DR0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(REQ1_MARK, PE5_FN),
+ PINMUX_DATA(REQ2_MARK, PE4_FN),
+ PINMUX_DATA(REQ3_MARK, P2MSEL0_0, PE3_FN),
+ PINMUX_DATA(GNT1_MARK, PE2_FN),
+ PINMUX_DATA(GNT2_MARK, PE1_FN),
+ PINMUX_DATA(GNT3_MARK, P2MSEL0_0, PE0_FN),
+ PINMUX_DATA(MMCCLK_MARK, P2MSEL0_1, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D31_MARK, PF7_FN),
+ PINMUX_DATA(D30_MARK, PF6_FN),
+ PINMUX_DATA(D29_MARK, PF5_FN),
+ PINMUX_DATA(D28_MARK, PF4_FN),
+ PINMUX_DATA(D27_MARK, PF3_FN),
+ PINMUX_DATA(D26_MARK, PF2_FN),
+ PINMUX_DATA(D25_MARK, PF1_FN),
+ PINMUX_DATA(D24_MARK, PF0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D23_MARK, PG7_FN),
+ PINMUX_DATA(D22_MARK, PG6_FN),
+ PINMUX_DATA(D21_MARK, PG5_FN),
+ PINMUX_DATA(D20_MARK, PG4_FN),
+ PINMUX_DATA(D19_MARK, PG3_FN),
+ PINMUX_DATA(D18_MARK, PG2_FN),
+ PINMUX_DATA(D17_MARK, PG1_FN),
+ PINMUX_DATA(D16_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(SCIF1_SCK_MARK, PH7_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PH6_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PH5_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, PH4_FN),
+ PINMUX_DATA(INTD_MARK, P1MSEL7_1, PH4_FN),
+ PINMUX_DATA(FCE_MARK, P1MSEL8_1, P1MSEL7_0, PH4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P1MSEL8_0, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL8_0, P1MSEL7_1, PH3_FN),
+ PINMUX_DATA(FSE_MARK, P1MSEL8_1, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P1MSEL8_0, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL8_0, P1MSEL7_1, PH2_FN),
+ PINMUX_DATA(FRE_MARK, P1MSEL8_1, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P1MSEL8_0, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL8_0, P1MSEL7_1, PH1_FN),
+ PINMUX_DATA(FRB_MARK, P1MSEL8_1, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P1MSEL8_0, P1MSEL7_0, PH0_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL8_0, P1MSEL7_1, PH0_FN),
+ PINMUX_DATA(FWE_MARK, P1MSEL8_1, P1MSEL7_0, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_TXD_MARK, P1MSEL2_0, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P1MSEL2_0, P1MSEL1_1, PJ7_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P1MSEL2_1, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(SIOF_TXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P1MSEL4_0, P1MSEL3_1, PJ6_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P1MSEL4_1, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(SIOF_RXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P1MSEL4_0, P1MSEL3_1, PJ5_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P1MSEL4_1, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(SIOF_SYNC_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P1MSEL4_0, P1MSEL3_1, PJ4_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P1MSEL4_1, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(SIOF_MCLK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ3_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL4_0, P1MSEL3_1, PJ3_FN),
+ PINMUX_DATA(SIOF_SCK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P1MSEL4_0, P1MSEL3_1, PJ2_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P1MSEL4_1, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P1MSEL2_0, PJ1_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P1MSEL2_1, P1MSEL1_0, PJ1_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL9_0, PJ0_FN),
+ PINMUX_DATA(IOIS16_MARK, P1MSEL9_1, PJ0_FN),
+
+ /* PK FN */
+ PINMUX_DATA(STATUS0_MARK, P1MSEL15_0, PK7_FN),
+ PINMUX_DATA(DRAK0_PK3_MARK, P1MSEL15_1, PK7_FN),
+ PINMUX_DATA(STATUS1_MARK, P1MSEL15_0, PK6_FN),
+ PINMUX_DATA(DRAK1_PK2_MARK, P1MSEL15_1, PK6_FN),
+ PINMUX_DATA(DACK2_MARK, P1MSEL12_0, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(SCIF2_TXD_MARK, P1MSEL12_1, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(MMCCMD_MARK, P1MSEL12_1, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(SIOF_TXD_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(DACK3_MARK, P1MSEL12_0, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(SCIF2_SCK_MARK, P1MSEL12_1, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(MMCDAT_MARK, P1MSEL12_1, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(SIOF_SCK_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(DREQ0_MARK, PK3_FN),
+ PINMUX_DATA(DREQ1_MARK, PK2_FN),
+ PINMUX_DATA(DRAK0_PK1_MARK, PK1_FN),
+ PINMUX_DATA(DRAK1_PK0_MARK, PK0_FN),
+
+ /* PL FN */
+ PINMUX_DATA(DREQ2_MARK, P1MSEL13_0, PL7_FN),
+ PINMUX_DATA(INTB_MARK, P1MSEL13_1, PL7_FN),
+ PINMUX_DATA(DREQ3_MARK, P1MSEL13_0, PL6_FN),
+ PINMUX_DATA(INTC_MARK, P1MSEL13_1, PL6_FN),
+ PINMUX_DATA(DRAK2_MARK, P1MSEL10_0, PL5_FN),
+ PINMUX_DATA(CE2A_MARK, P1MSEL10_1, PL5_FN),
+ PINMUX_DATA(IRL4_MARK, P1MSEL14_0, PL4_FN),
+ PINMUX_DATA(FD4_MARK, P1MSEL14_1, PL4_FN),
+ PINMUX_DATA(IRL5_MARK, P1MSEL14_0, PL3_FN),
+ PINMUX_DATA(FD5_MARK, P1MSEL14_1, PL3_FN),
+ PINMUX_DATA(IRL6_MARK, P1MSEL14_0, PL2_FN),
+ PINMUX_DATA(FD6_MARK, P1MSEL14_1, PL2_FN),
+ PINMUX_DATA(IRL7_MARK, P1MSEL14_0, PL1_FN),
+ PINMUX_DATA(FD7_MARK, P1MSEL14_1, PL1_FN),
+ PINMUX_DATA(DRAK3_MARK, P1MSEL10_0, PL0_FN),
+ PINMUX_DATA(CE2B_MARK, P1MSEL10_1, PL0_FN),
+
+ /* PM FN */
+ PINMUX_DATA(BREQ_BSACK_MARK, PM1_FN),
+ PINMUX_DATA(BACK_BSREQ_MARK, PM0_FN),
+
+ /* PN FN */
+ PINMUX_DATA(SCIF5_RXD_MARK, P1MSEL2_0, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P1MSEL2_0, P1MSEL1_1, PN7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P1MSEL2_1, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(SCIF5_SCK_MARK, P1MSEL2_0, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(HAC1_SDOUT_MARK, P1MSEL2_0, P1MSEL1_1, PN6_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P1MSEL2_1, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL0_0, PN5_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL0_1, PN5_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL0_0, PN4_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL0_1, PN4_FN),
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL0_0, PN3_FN),
+ PINMUX_DATA(FD0_MARK, P1MSEL0_1, PN3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P1MSEL0_0, PN2_FN),
+ PINMUX_DATA(FD1_MARK, P1MSEL0_1, PN2_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P1MSEL0_0, PN1_FN),
+ PINMUX_DATA(FD2_MARK, P1MSEL0_1, PN1_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P1MSEL0_0, PN0_FN),
+ PINMUX_DATA(FD3_MARK, P1MSEL0_1, PN0_FN),
+
+ /* PP FN */
+ PINMUX_DATA(DEVSEL_DCLKOUT_MARK, PP5_FN),
+ PINMUX_DATA(STOP_CDE_MARK, PP4_FN),
+ PINMUX_DATA(LOCK_ODDF_MARK, PP3_FN),
+ PINMUX_DATA(TRDY_DISPL_MARK, PP2_FN),
+ PINMUX_DATA(IRDY_HSYNC_MARK, PP1_FN),
+ PINMUX_DATA(PCIFRAME_VSYNC_MARK, PP0_FN),
+
+ /* PQ FN */
+ PINMUX_DATA(INTA_MARK, PQ4_FN),
+ PINMUX_DATA(GNT0_GNTIN_MARK, PQ3_FN),
+ PINMUX_DATA(REQ0_REQOUT_MARK, PQ2_FN),
+ PINMUX_DATA(PERR_MARK, PQ1_FN),
+ PINMUX_DATA(SERR_MARK, PQ0_FN),
+
+ /* PR FN */
+ PINMUX_DATA(WE7_CBE3_MARK, PR3_FN),
+ PINMUX_DATA(WE6_CBE2_MARK, PR2_FN),
+ PINMUX_DATA(WE5_CBE1_MARK, PR1_FN),
+ PINMUX_DATA(WE4_CBE0_MARK, PR0_FN),
+
+ /* MISC FN */
+ PINMUX_DATA(SCIF2_RXD_MARK, P1MSEL6_0, P1MSEL5_0),
+ PINMUX_DATA(SIOF_RXD_MARK, P2MSEL1_1, P1MSEL6_1, P1MSEL5_0),
+ PINMUX_DATA(MRESETOUT_MARK, P2MSEL2_0),
+ PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* PK */
+ PINMUX_GPIO(GPIO_PK7, PK7_DATA),
+ PINMUX_GPIO(GPIO_PK6, PK6_DATA),
+ PINMUX_GPIO(GPIO_PK5, PK5_DATA),
+ PINMUX_GPIO(GPIO_PK4, PK4_DATA),
+ PINMUX_GPIO(GPIO_PK3, PK3_DATA),
+ PINMUX_GPIO(GPIO_PK2, PK2_DATA),
+ PINMUX_GPIO(GPIO_PK1, PK1_DATA),
+ PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+
+ /* PL */
+ PINMUX_GPIO(GPIO_PL7, PL7_DATA),
+ PINMUX_GPIO(GPIO_PL6, PL6_DATA),
+ PINMUX_GPIO(GPIO_PL5, PL5_DATA),
+ PINMUX_GPIO(GPIO_PL4, PL4_DATA),
+ PINMUX_GPIO(GPIO_PL3, PL3_DATA),
+ PINMUX_GPIO(GPIO_PL2, PL2_DATA),
+ PINMUX_GPIO(GPIO_PL1, PL1_DATA),
+ PINMUX_GPIO(GPIO_PL0, PL0_DATA),
+
+ /* PM */
+ PINMUX_GPIO(GPIO_PM1, PM1_DATA),
+ PINMUX_GPIO(GPIO_PM0, PM0_DATA),
+
+ /* PN */
+ PINMUX_GPIO(GPIO_PN7, PN7_DATA),
+ PINMUX_GPIO(GPIO_PN6, PN6_DATA),
+ PINMUX_GPIO(GPIO_PN5, PN5_DATA),
+ PINMUX_GPIO(GPIO_PN4, PN4_DATA),
+ PINMUX_GPIO(GPIO_PN3, PN3_DATA),
+ PINMUX_GPIO(GPIO_PN2, PN2_DATA),
+ PINMUX_GPIO(GPIO_PN1, PN1_DATA),
+ PINMUX_GPIO(GPIO_PN0, PN0_DATA),
+
+ /* PP */
+ PINMUX_GPIO(GPIO_PP5, PP5_DATA),
+ PINMUX_GPIO(GPIO_PP4, PP4_DATA),
+ PINMUX_GPIO(GPIO_PP3, PP3_DATA),
+ PINMUX_GPIO(GPIO_PP2, PP2_DATA),
+ PINMUX_GPIO(GPIO_PP1, PP1_DATA),
+ PINMUX_GPIO(GPIO_PP0, PP0_DATA),
+
+ /* PQ */
+ PINMUX_GPIO(GPIO_PQ4, PQ4_DATA),
+ PINMUX_GPIO(GPIO_PQ3, PQ3_DATA),
+ PINMUX_GPIO(GPIO_PQ2, PQ2_DATA),
+ PINMUX_GPIO(GPIO_PQ1, PQ1_DATA),
+ PINMUX_GPIO(GPIO_PQ0, PQ0_DATA),
+
+ /* PR */
+ PINMUX_GPIO(GPIO_PR3, PR3_DATA),
+ PINMUX_GPIO(GPIO_PR2, PR2_DATA),
+ PINMUX_GPIO(GPIO_PR1, PR1_DATA),
+ PINMUX_GPIO(GPIO_PR0, PR0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK),
+ PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK),
+ PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK),
+ PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK),
+ PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK),
+ PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK),
+ PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK),
+ PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK),
+ PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK),
+ PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK),
+ PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK),
+ PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK),
+ PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK),
+ PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK),
+ PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK),
+ PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK),
+ PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK),
+ PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) {
+ PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU,
+ PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU,
+ PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU,
+ PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU,
+ PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU,
+ PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU,
+ PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU,
+ PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) {
+ PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU,
+ PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU,
+ PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU,
+ PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU,
+ PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU,
+ PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU,
+ PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU,
+ PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU,
+ PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) {
+ PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU,
+ PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU,
+ PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU,
+ PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU,
+ PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU,
+ PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU,
+ PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU,
+ PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU,
+ PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU,
+ PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU,
+ PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU,
+ PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU,
+ PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU,
+ PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU,
+ PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU,
+ PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU,
+ PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU,
+ PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU,
+ PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU,
+ PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) {
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, 0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffe70020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) {
+ 0, 0, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) {
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) {
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) {
+ 0, 0, 0, 0,
+ 0, 0, PM1_DATA, PM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) {
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) {
+ 0, 0, PP5_DATA, PP4_DATA,
+ PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) {
+ 0, 0, 0, PQ4_DATA,
+ PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) {
+ 0, 0, 0, 0,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7785_pinmux_info = {
+ .name = "sh7785_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRQOUT,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7785_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c
new file mode 100644
index 00000000..4229e072
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c
@@ -0,0 +1,843 @@
+/*
+ * SH7786 Pinmux
+ *
+ * Copyright (C) 2008, 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7785 pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7786.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ DCLKIN_MARK, DCLKOUT_MARK, ODDF_MARK,
+ VSYNC_MARK, HSYNC_MARK, CDE_MARK, DISP_MARK,
+ DR0_MARK, DR1_MARK, DR2_MARK, DR3_MARK, DR4_MARK, DR5_MARK,
+ DG0_MARK, DG1_MARK, DG2_MARK, DG3_MARK, DG4_MARK, DG5_MARK,
+ DB0_MARK, DB1_MARK, DB2_MARK, DB3_MARK, DB4_MARK, DB5_MARK,
+ ETH_MAGIC_MARK, ETH_LINK_MARK, ETH_TX_ER_MARK, ETH_TX_EN_MARK,
+ ETH_MDIO_MARK, ETH_RX_CLK_MARK, ETH_MDC_MARK, ETH_COL_MARK,
+ ETH_TX_CLK_MARK, ETH_CRS_MARK, ETH_RX_DV_MARK, ETH_RX_ER_MARK,
+ ETH_TXD3_MARK, ETH_TXD2_MARK, ETH_TXD1_MARK, ETH_TXD0_MARK,
+ ETH_RXD3_MARK, ETH_RXD2_MARK, ETH_RXD1_MARK, ETH_RXD0_MARK,
+ HSPI_CLK_MARK, HSPI_CS_MARK, HSPI_RX_MARK, HSPI_TX_MARK,
+ SCIF0_CTS_MARK, SCIF0_RTS_MARK,
+ SCIF0_SCK_MARK, SCIF0_RXD_MARK, SCIF0_TXD_MARK,
+ SCIF1_SCK_MARK, SCIF1_RXD_MARK, SCIF1_TXD_MARK,
+ SCIF3_SCK_MARK, SCIF3_RXD_MARK, SCIF3_TXD_MARK,
+ SCIF4_SCK_MARK, SCIF4_RXD_MARK, SCIF4_TXD_MARK,
+ SCIF5_SCK_MARK, SCIF5_RXD_MARK, SCIF5_TXD_MARK,
+ BREQ_MARK, IOIS16_MARK, CE2B_MARK, CE2A_MARK, BACK_MARK,
+ FALE_MARK, FRB_MARK, FSTATUS_MARK,
+ FSE_MARK, FCLE_MARK,
+ DACK0_MARK, DACK1_MARK, DACK2_MARK, DACK3_MARK,
+ DREQ0_MARK, DREQ1_MARK, DREQ2_MARK, DREQ3_MARK,
+ DRAK0_MARK, DRAK1_MARK, DRAK2_MARK, DRAK3_MARK,
+ USB_OVC1_MARK, USB_OVC0_MARK,
+ USB_PENC1_MARK, USB_PENC0_MARK,
+ HAC_RES_MARK,
+ HAC1_SDOUT_MARK, HAC1_SDIN_MARK, HAC1_SYNC_MARK, HAC1_BITCLK_MARK,
+ HAC0_SDOUT_MARK, HAC0_SDIN_MARK, HAC0_SYNC_MARK, HAC0_BITCLK_MARK,
+ SSI0_SDATA_MARK, SSI0_SCK_MARK, SSI0_WS_MARK, SSI0_CLK_MARK,
+ SSI1_SDATA_MARK, SSI1_SCK_MARK, SSI1_WS_MARK, SSI1_CLK_MARK,
+ SSI2_SDATA_MARK, SSI2_SCK_MARK, SSI2_WS_MARK,
+ SSI3_SDATA_MARK, SSI3_SCK_MARK, SSI3_WS_MARK,
+ SDIF1CMD_MARK, SDIF1CD_MARK, SDIF1WP_MARK, SDIF1CLK_MARK,
+ SDIF1D3_MARK, SDIF1D2_MARK, SDIF1D1_MARK, SDIF1D0_MARK,
+ SDIF0CMD_MARK, SDIF0CD_MARK, SDIF0WP_MARK, SDIF0CLK_MARK,
+ SDIF0D3_MARK, SDIF0D2_MARK, SDIF0D1_MARK, SDIF0D0_MARK,
+ TCLK_MARK,
+ IRL7_MARK, IRL6_MARK, IRL5_MARK, IRL4_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(CDE_MARK, P1MSEL2_0, PA7_FN),
+ PINMUX_DATA(DISP_MARK, P1MSEL2_0, PA6_FN),
+ PINMUX_DATA(DR5_MARK, P1MSEL2_0, PA5_FN),
+ PINMUX_DATA(DR4_MARK, P1MSEL2_0, PA4_FN),
+ PINMUX_DATA(DR3_MARK, P1MSEL2_0, PA3_FN),
+ PINMUX_DATA(DR2_MARK, P1MSEL2_0, PA2_FN),
+ PINMUX_DATA(DR1_MARK, P1MSEL2_0, PA1_FN),
+ PINMUX_DATA(DR0_MARK, P1MSEL2_0, PA0_FN),
+ PINMUX_DATA(ETH_MAGIC_MARK, P1MSEL2_1, PA7_FN),
+ PINMUX_DATA(ETH_LINK_MARK, P1MSEL2_1, PA6_FN),
+ PINMUX_DATA(ETH_TX_ER_MARK, P1MSEL2_1, PA5_FN),
+ PINMUX_DATA(ETH_TX_EN_MARK, P1MSEL2_1, PA4_FN),
+ PINMUX_DATA(ETH_TXD3_MARK, P1MSEL2_1, PA3_FN),
+ PINMUX_DATA(ETH_TXD2_MARK, P1MSEL2_1, PA2_FN),
+ PINMUX_DATA(ETH_TXD1_MARK, P1MSEL2_1, PA1_FN),
+ PINMUX_DATA(ETH_TXD0_MARK, P1MSEL2_1, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(VSYNC_MARK, P1MSEL3_0, PB7_FN),
+ PINMUX_DATA(ODDF_MARK, P1MSEL3_0, PB6_FN),
+ PINMUX_DATA(DG5_MARK, P1MSEL2_0, PB5_FN),
+ PINMUX_DATA(DG4_MARK, P1MSEL2_0, PB4_FN),
+ PINMUX_DATA(DG3_MARK, P1MSEL2_0, PB3_FN),
+ PINMUX_DATA(DG2_MARK, P1MSEL2_0, PB2_FN),
+ PINMUX_DATA(DG1_MARK, P1MSEL2_0, PB1_FN),
+ PINMUX_DATA(DG0_MARK, P1MSEL2_0, PB0_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL3_1, PB7_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL3_1, PB6_FN),
+ PINMUX_DATA(ETH_MDIO_MARK, P1MSEL2_1, PB5_FN),
+ PINMUX_DATA(ETH_RX_CLK_MARK, P1MSEL2_1, PB4_FN),
+ PINMUX_DATA(ETH_MDC_MARK, P1MSEL2_1, PB3_FN),
+ PINMUX_DATA(ETH_COL_MARK, P1MSEL2_1, PB2_FN),
+ PINMUX_DATA(ETH_TX_CLK_MARK, P1MSEL2_1, PB1_FN),
+ PINMUX_DATA(ETH_CRS_MARK, P1MSEL2_1, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(DCLKIN_MARK, P1MSEL3_0, PC7_FN),
+ PINMUX_DATA(HSYNC_MARK, P1MSEL3_0, PC6_FN),
+ PINMUX_DATA(DB5_MARK, P1MSEL2_0, PC5_FN),
+ PINMUX_DATA(DB4_MARK, P1MSEL2_0, PC4_FN),
+ PINMUX_DATA(DB3_MARK, P1MSEL2_0, PC3_FN),
+ PINMUX_DATA(DB2_MARK, P1MSEL2_0, PC2_FN),
+ PINMUX_DATA(DB1_MARK, P1MSEL2_0, PC1_FN),
+ PINMUX_DATA(DB0_MARK, P1MSEL2_0, PC0_FN),
+
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL3_1, PC7_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL3_1, PC6_FN),
+ PINMUX_DATA(ETH_RXD3_MARK, P1MSEL2_1, PC5_FN),
+ PINMUX_DATA(ETH_RXD2_MARK, P1MSEL2_1, PC4_FN),
+ PINMUX_DATA(ETH_RXD1_MARK, P1MSEL2_1, PC3_FN),
+ PINMUX_DATA(ETH_RXD0_MARK, P1MSEL2_1, PC2_FN),
+ PINMUX_DATA(ETH_RX_DV_MARK, P1MSEL2_1, PC1_FN),
+ PINMUX_DATA(ETH_RX_ER_MARK, P1MSEL2_1, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DCLKOUT_MARK, PD7_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PD6_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PD5_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PD4_FN),
+ PINMUX_DATA(DACK1_MARK, P1MSEL13_1, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(BACK_MARK, P1MSEL13_0, P1MSEL12_1, PD3_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL13_0, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(DACK0_MARK, P1MSEL14_1, PD2_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL14_0, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, P1MSEL10_0, P1MSEL9_1, PD1_FN),
+ PINMUX_DATA(BREQ_MARK, P1MSEL10_1, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(USB_OVC1_MARK, P1MSEL10_0, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, P1MSEL11_1, PD0_FN),
+ PINMUX_DATA(USB_OVC0_MARK, P1MSEL11_0, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(USB_PENC1_MARK, PE7_FN),
+ PINMUX_DATA(USB_PENC0_MARK, PE6_FN),
+
+ /* PF FN */
+ PINMUX_DATA(HAC1_SDOUT_MARK, P2MSEL15_0, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P2MSEL15_0, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P2MSEL15_0, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P2MSEL15_0, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P2MSEL13_0, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P2MSEL13_0, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P2MSEL13_0, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P2MSEL13_0, P2MSEL12_0, PF0_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P2MSEL15_0, P2MSEL14_1, PF7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P2MSEL15_0, P2MSEL14_1, PF6_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P2MSEL15_0, P2MSEL14_1, PF5_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P2MSEL15_0, P2MSEL14_1, PF4_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P2MSEL13_0, P2MSEL12_1, PF3_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P2MSEL13_0, P2MSEL12_1, PF2_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P2MSEL13_0, P2MSEL12_1, PF1_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P2MSEL13_0, P2MSEL12_1, PF0_FN),
+ PINMUX_DATA(SDIF1CMD_MARK, P2MSEL15_1, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(SDIF1CD_MARK, P2MSEL15_1, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(SDIF1WP_MARK, P2MSEL15_1, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(SDIF1CLK_MARK, P2MSEL15_1, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(SDIF1D3_MARK, P2MSEL13_1, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(SDIF1D2_MARK, P2MSEL13_1, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(SDIF1D1_MARK, P2MSEL13_1, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(SDIF1D0_MARK, P2MSEL13_1, P2MSEL12_0, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL8_0, PG7_FN),
+ PINMUX_DATA(SSI2_SDATA_MARK, P1MSEL8_1, PG7_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL7_0, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(SSI2_SCK_MARK, P1MSEL7_1, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL7_0, P1MSEL6_1, PG6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL5_0, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(SSI2_WS_MARK, P1MSEL5_1, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL5_0, P1MSEL4_1, PG5_FN),
+
+ /* PH FN */
+ PINMUX_DATA(DACK3_MARK, P2MSEL4_0, PH7_FN),
+ PINMUX_DATA(SDIF0CMD_MARK, P2MSEL4_1, PH7_FN),
+ PINMUX_DATA(DACK2_MARK, P2MSEL4_0, PH6_FN),
+ PINMUX_DATA(SDIF0CD_MARK, P2MSEL4_1, PH6_FN),
+ PINMUX_DATA(DREQ3_MARK, P2MSEL4_0, PH5_FN),
+ PINMUX_DATA(SDIF0WP_MARK, P2MSEL4_1, PH5_FN),
+ PINMUX_DATA(DREQ2_MARK, P2MSEL3_0, P2MSEL2_1, PH4_FN),
+ PINMUX_DATA(SDIF0CLK_MARK, P2MSEL3_1, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, P2MSEL3_0, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SDIF0D3_MARK, P2MSEL1_1, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P2MSEL1_0, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(IRL7_MARK, P2MSEL1_0, P2MSEL0_1, PH3_FN),
+ PINMUX_DATA(SDIF0D2_MARK, P2MSEL1_1, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P2MSEL1_0, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(IRL6_MARK, P2MSEL1_0, P2MSEL0_1, PH2_FN),
+ PINMUX_DATA(SDIF0D1_MARK, P2MSEL1_1, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P2MSEL1_0, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(IRL5_MARK, P2MSEL1_0, P2MSEL0_1, PH1_FN),
+ PINMUX_DATA(SDIF0D0_MARK, P2MSEL1_1, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P2MSEL1_0, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(IRL4_MARK, P2MSEL1_0, P2MSEL0_1, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_SCK_MARK, P2MSEL11_1, PJ7_FN),
+ PINMUX_DATA(FRB_MARK, P2MSEL11_0, PJ7_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, P2MSEL10_0, PJ6_FN),
+ PINMUX_DATA(IOIS16_MARK, P2MSEL10_1, PJ6_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, P2MSEL10_0, PJ5_FN),
+ PINMUX_DATA(CE2B_MARK, P2MSEL10_1, PJ5_FN),
+ PINMUX_DATA(DRAK3_MARK, P2MSEL7_0, PJ4_FN),
+ PINMUX_DATA(CE2A_MARK, P2MSEL7_1, PJ4_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P2MSEL9_0, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(DRAK2_MARK, P2MSEL9_0, P2MSEL8_1, PJ3_FN),
+ PINMUX_DATA(SSI3_WS_MARK, P2MSEL9_1, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(DRAK1_MARK, P2MSEL6_0, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(FSTATUS_MARK, P2MSEL6_0, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(SSI3_SDATA_MARK, P2MSEL6_1, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(DRAK0_MARK, P2MSEL6_0, P2MSEL5_1, PJ1_FN),
+ PINMUX_DATA(FSE_MARK, P2MSEL6_0, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE5, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE6_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_CDE, CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MAGIC, ETH_MAGIC_MARK),
+ PINMUX_GPIO(GPIO_FN_DISP, DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_LINK, ETH_LINK_MARK),
+ PINMUX_GPIO(GPIO_FN_DR5, DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_ER, ETH_TX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DR4, DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_EN, ETH_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_DR3, DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD3, ETH_TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DR2, DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD2, ETH_TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DR1, DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD1, ETH_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DR0, DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD0, ETH_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_VSYNC, VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_ODDF, ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_DG5, DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDIO, ETH_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_DG4, DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_CLK, ETH_RX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG3, DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDC, ETH_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_DG2, DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_COL, ETH_COL_MARK),
+ PINMUX_GPIO(GPIO_FN_DG1, DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_CLK, ETH_TX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG0, DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_CRS, ETH_CRS_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKIN, DCLKIN_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_HSYNC, HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_DB5, DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD3, ETH_RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DB4, DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD2, ETH_RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DB3, DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD1, ETH_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DB2, DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD0, ETH_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_DB1, DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_DV, ETH_RX_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_DB0, DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_ER, ETH_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKOUT, DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC1, USB_OVC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC0, USB_OVC0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC1, USB_PENC1_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC0, USB_PENC0_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CMD, SDIF1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CD, SDIF1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1WP, SDIF1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CLK, SDIF1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D3, SDIF1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D2, SDIF1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D1, SDIF1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D0, SDIF1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SDATA, SSI2_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SCK, SSI2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_WS, SSI2_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CMD, SDIF0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CD, SDIF0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0WP, SDIF0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CLK, SDIF0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D3, SDIF0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D2, SDIF0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D1, SDIF0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D0, SDIF0D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_WS, SSI3_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SDATA, SSI3_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSTATUS, FSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SCK, SSI3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) {
+ 0, 0,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1) {
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffcc0020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8) {
+ PE7_DATA, PE6_DATA,
+ 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 }
+ },
+ { },
+};
+
+static struct pinmux_info sh7786_pinmux_info = {
+ .name = "sh7786_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_FSE,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7786_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
new file mode 100644
index 00000000..aaa5338a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
@@ -0,0 +1,587 @@
+/*
+ * SH-X3 prototype CPU pinmux
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/shx3.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+
+ PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+
+ PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
+ PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+
+ PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+
+ PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN, PE5_FN, PE4_FN,
+ PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+
+ PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK,
+ D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+
+ BACK_MARK, BREQ_MARK,
+ WE3_MARK, WE2_MARK,
+ CS6_MARK, CS5_MARK, CS4_MARK,
+ CLKOUTENB_MARK,
+
+ DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK,
+ DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK,
+
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+
+ DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK,
+
+ SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK,
+ IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK,
+ TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK,
+ RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK,
+
+ CE2B_MARK, CE2A_MARK, IOIS16_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ IRQOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t shx3_pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D31_MARK, PA7_FN),
+ PINMUX_DATA(D30_MARK, PA6_FN),
+ PINMUX_DATA(D29_MARK, PA5_FN),
+ PINMUX_DATA(D28_MARK, PA4_FN),
+ PINMUX_DATA(D27_MARK, PA3_FN),
+ PINMUX_DATA(D26_MARK, PA2_FN),
+ PINMUX_DATA(D25_MARK, PA1_FN),
+ PINMUX_DATA(D24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D23_MARK, PB7_FN),
+ PINMUX_DATA(D22_MARK, PB6_FN),
+ PINMUX_DATA(D21_MARK, PB5_FN),
+ PINMUX_DATA(D20_MARK, PB4_FN),
+ PINMUX_DATA(D19_MARK, PB3_FN),
+ PINMUX_DATA(D18_MARK, PB2_FN),
+ PINMUX_DATA(D17_MARK, PB1_FN),
+ PINMUX_DATA(D16_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(BACK_MARK, PC7_FN),
+ PINMUX_DATA(BREQ_MARK, PC6_FN),
+ PINMUX_DATA(WE3_MARK, PC5_FN),
+ PINMUX_DATA(WE2_MARK, PC4_FN),
+ PINMUX_DATA(CS6_MARK, PC3_FN),
+ PINMUX_DATA(CS5_MARK, PC2_FN),
+ PINMUX_DATA(CS4_MARK, PC1_FN),
+ PINMUX_DATA(CLKOUTENB_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DACK3_MARK, PD7_FN),
+ PINMUX_DATA(DACK2_MARK, PD6_FN),
+ PINMUX_DATA(DACK1_MARK, PD5_FN),
+ PINMUX_DATA(DACK0_MARK, PD4_FN),
+ PINMUX_DATA(DREQ3_MARK, PD3_FN),
+ PINMUX_DATA(DREQ2_MARK, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(IRQ3_MARK, PE7_FN),
+ PINMUX_DATA(IRQ2_MARK, PE6_FN),
+ PINMUX_DATA(IRQ1_MARK, PE5_FN),
+ PINMUX_DATA(IRQ0_MARK, PE4_FN),
+ PINMUX_DATA(DRAK3_MARK, PE3_FN),
+ PINMUX_DATA(DRAK2_MARK, PE2_FN),
+ PINMUX_DATA(DRAK1_MARK, PE1_FN),
+ PINMUX_DATA(DRAK0_MARK, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(SCK3_MARK, PF7_FN),
+ PINMUX_DATA(SCK2_MARK, PF6_FN),
+ PINMUX_DATA(SCK1_MARK, PF5_FN),
+ PINMUX_DATA(SCK0_MARK, PF4_FN),
+ PINMUX_DATA(IRL3_MARK, PF3_FN),
+ PINMUX_DATA(IRL2_MARK, PF2_FN),
+ PINMUX_DATA(IRL1_MARK, PF1_FN),
+ PINMUX_DATA(IRL0_MARK, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(TXD3_MARK, PG7_FN),
+ PINMUX_DATA(TXD2_MARK, PG6_FN),
+ PINMUX_DATA(TXD1_MARK, PG5_FN),
+ PINMUX_DATA(TXD0_MARK, PG4_FN),
+ PINMUX_DATA(RXD3_MARK, PG3_FN),
+ PINMUX_DATA(RXD2_MARK, PG2_FN),
+ PINMUX_DATA(RXD1_MARK, PG1_FN),
+ PINMUX_DATA(RXD0_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(CE2B_MARK, PH5_FN),
+ PINMUX_DATA(CE2A_MARK, PH4_FN),
+ PINMUX_DATA(IOIS16_MARK, PH3_FN),
+ PINMUX_DATA(STATUS1_MARK, PH2_FN),
+ PINMUX_DATA(STATUS0_MARK, PH1_FN),
+ PINMUX_DATA(IRQOUT_MARK, PH0_FN),
+};
+
+static struct pinmux_gpio shx3_pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
+ },
+ { },
+};
+
+static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
+ },
+ { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
+ },
+ { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
+ },
+ { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
+ },
+ { },
+};
+
+static struct pinmux_info shx3_pinmux_info = {
+ .name = "shx3_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRQOUT,
+ .gpios = shx3_pinmux_gpios,
+ .gpio_data = shx3_pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
+ .cfg_regs = shx3_pinmux_config_regs,
+ .data_regs = shx3_pinmux_data_regs,
+};
+
+static int __init shx3_pinmux_setup(void)
+{
+ return register_pinmux(&shx3_pinmux_info);
+}
+arch_initcall(shx3_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/serial-sh7722.c b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
new file mode 100644
index 00000000..59bc3a72
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
@@ -0,0 +1,23 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+
+#define PSCR 0xA405011E
+
+static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (port->mapbase == 0xffe00000) {
+ data = __raw_readw(PSCR);
+ data &= ~0x03cf;
+ if (!(cflag & CRTSCTS))
+ data |= 0x0340;
+
+ __raw_writew(data, PSCR);
+ }
+}
+
+struct plat_sci_port_ops sh7722_sci_port_ops = {
+ .init_pins = sh7722_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
new file mode 100644
index 00000000..1b884831
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -0,0 +1,509 @@
+/*
+ * SH7343 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_timer.h>
+#include <asm/clock.h>
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffe30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 83, 83, 83, 83 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct resource iic0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .end = 99,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic0_resources),
+ .resource = iic0_resources,
+};
+
+static struct resource iic1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0x04750000,
+ .end = 0x04750017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 44,
+ .end = 47,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1, /* "i2c1" clock */
+ .num_resources = ARRAY_SIZE(iic1_resources),
+ .resource = iic1_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU4",
+ .version = "0",
+ .irq = 60,
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe9022eb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = 54,
+};
+
+static struct resource veu_resources[] = {
+ [0] = {
+ .name = "VEU",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu_platform_data,
+ },
+ .resource = veu_resources,
+ .num_resources = ARRAY_SIZE(veu_resources),
+};
+
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = 27,
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfea00000,
+ .end = 0xfea102d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channel_offset = 0x60,
+ .timer_bit = 5,
+ .clockevent_rating = 125,
+ .clocksource_rating = 200,
+};
+
+static struct resource cmt_resources[] = {
+ [0] = {
+ .start = 0x044a0060,
+ .end = 0x044a006b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7343_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &iic0_device,
+ &iic1_device,
+ &vpu_device,
+ &veu_device,
+ &jpu_device,
+};
+
+static int __init sh7343_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
+ platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+
+ return platform_add_devices(sh7343_devices,
+ ARRAY_SIZE(sh7343_devices));
+}
+arch_initcall(sh7343_devices_setup);
+
+static struct platform_device *sh7343_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7343_early_devices,
+ ARRAY_SIZE(sh7343_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
+ MFI, VPU, TPU, Z3D4, USBI0, USBI1,
+ MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY,
+ DMAC4, DMAC5, DMAC_DADERR,
+ KEYSC,
+ SCIF, SCIF1, SCIF2, SCIF3,
+ SIOF0, SIOF1, SIO,
+ FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
+ I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
+ SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
+ IRDA, SDHI, CMT, TSIF, SIU,
+ TMU0, TMU1, TMU2,
+ JPU, LCDC,
+
+ /* interrupt groups */
+
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+ INTC_VECT(I2C1_ALI, 0x780), INTC_VECT(I2C1_TACKI, 0x7a0),
+ INTC_VECT(I2C1_WAITI, 0x7c0), INTC_VECT(I2C1_DTEI, 0x7e0),
+ INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
+ INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
+ INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
+ INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980),
+ INTC_VECT(TPU, 0x9a0), INTC_VECT(Z3D4, 0x9e0),
+ INTC_VECT(USBI0, 0xa20), INTC_VECT(USBI1, 0xa40),
+ INTC_VECT(MMC_ERR, 0xb00), INTC_VECT(MMC_TRAN, 0xb20),
+ INTC_VECT(MMC_FSTAT, 0xb40), INTC_VECT(MMC_FRDY, 0xb60),
+ INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
+ INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
+ INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIF1, 0xc20),
+ INTC_VECT(SCIF2, 0xc40), INTC_VECT(SCIF3, 0xc60),
+ INTC_VECT(SIOF0, 0xc80), INTC_VECT(SIOF1, 0xca0),
+ INTC_VECT(SIO, 0xd00),
+ INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
+ INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
+ INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
+ INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
+ INTC_VECT(SIU, 0xf80),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440),
+ INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
+ INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
+ INTC_GROUP(MMC, MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR),
+ INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
+ FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
+ INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
+ INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
+ INTC_GROUP(USB, USBI0, USBI1),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, 0, 0, 0, MFI } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC, DMAC_DADERR, DMAC5, DMAC4, SCIF3, SCIF2, SCIF1, SCIF } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, 0, SIO, Z3D4, 0, SIOF1, SIOF0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USBI1, USBI0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI, TPU, 0, 0, TSIF } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIF1, SCIF2, SCIF3 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C0 } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, I2C1 } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { Z3D4, 0, SIU } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { 0, 0, TPU } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7343",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
new file mode 100644
index 00000000..87773869
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -0,0 +1,463 @@
+/*
+ * SH7366 Setup
+ *
+ * Copyright (C) 2008 Renesas Solutions
+ *
+ * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_timer.h>
+#include <linux/usb/r8a66597.h>
+#include <asm/clock.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .port_reg = 0xa405013e,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .end = 99,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+};
+
+static struct resource usb_host_resources[] = {
+ [0] = {
+ .start = 0xa4d80000,
+ .end = 0xa4d800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(usb_host_resources),
+ .resource = usb_host_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5",
+ .version = "0",
+ .irq = 60,
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu0_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = 54,
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU(1)",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
+ },
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+static struct uio_info veu1_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = 27,
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU(2)",
+ .start = 0xfe924000,
+ .end = 0xfe9240b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channel_offset = 0x60,
+ .timer_bit = 5,
+ .clockevent_rating = 125,
+ .clocksource_rating = 200,
+};
+
+static struct resource cmt_resources[] = {
+ [0] = {
+ .start = 0x044a0060,
+ .end = 0x044a006b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7366_devices[] __initdata = {
+ &scif0_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &iic_device,
+ &usb_host_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
+};
+
+static int __init sh7366_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+
+ return platform_add_devices(sh7366_devices,
+ ARRAY_SIZE(sh7366_devices));
+}
+arch_initcall(sh7366_devices_setup);
+
+static struct platform_device *sh7366_early_devices[] __initdata = {
+ &scif0_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7366_early_devices,
+ ARRAY_SIZE(sh7366_early_devices));
+}
+
+enum {
+ UNUSED=0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ ICB,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
+ MFI, VPU, USB,
+ MMC_MMC1I, MMC_MMC2I, MMC_MMC3I,
+ DMAC4, DMAC5, DMAC_DADERR,
+ SCIF, SCIFA1, SCIFA2,
+ DENC, MSIOF,
+ FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
+ SDHI, CMT, TSIF, SIU,
+ TMU0, TMU1, TMU2,
+ VEU2, LCDC,
+
+ /* interrupt groups */
+
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+ INTC_VECT(ICB, 0x700),
+ INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
+ INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
+ INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
+ INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20),
+ INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20),
+ INTC_VECT(MMC_MMC3I, 0xb40),
+ INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
+ INTC_VECT(DMAC_DADERR, 0xbc0),
+ INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20),
+ INTC_VECT(SCIFA2, 0xc40),
+ INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80),
+ INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
+ INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
+ INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
+ INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
+ INTC_VECT(SIU, 0xf80),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440),
+ INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
+ INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
+ INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I),
+ INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
+ FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, 0, 0, 0, MFI } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { 0, 0, 0, ICB } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, 0, 0, 0, 0, 0, MSIOF } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USB, } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0, 0, 0, 0, 0, 0, 0, TSIF } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { } },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7366",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_mem_setup(void)
+{
+ /* TODO: Register Node 1 */
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
new file mode 100644
index 00000000..8420d4bc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -0,0 +1,734 @@
+/*
+ * SH7722 Setup
+ *
+ * Copyright (C) 2006 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/uio_driver.h>
+#include <linux/usb/m66592.h>
+
+#include <asm/clock.h>
+#include <asm/mmzone.h>
+#include <asm/siu.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7722.h>
+#include <cpu/serial.h>
+
+static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF0_TX,
+ .addr = 0xffe0000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF0_RX,
+ .addr = 0xffe00014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_TX,
+ .addr = 0xffe1000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x25,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_RX,
+ .addr = 0xffe10014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x26,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0xffe2000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0xffe20014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUA_TX,
+ .addr = 0xa454c098,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUA_RX,
+ .addr = 0xa454c090,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUB_TX,
+ .addr = 0xa454c09c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb5,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUB_RX,
+ .addr = 0xa454c094,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb6,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ },
+};
+
+static const struct sh_dmae_channel sh7722_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = sh7722_dmae_slaves,
+ .slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
+ .channel = sh7722_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7722_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7722_dmae_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 78,
+ .end = 78,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = 48,
+ .end = 51,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = 76,
+ .end = 77,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device dma_device = {
+ .name = "sh-dma-engine",
+ .id = -1,
+ .resource = sh7722_dmae_resources,
+ .num_resources = ARRAY_SIZE(sh7722_dmae_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 45,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 46,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 44,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct m66592_platdata usbf_platdata = {
+ .on_chip = 1,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .name = "USBF",
+ .start = 0x04480000,
+ .end = 0x044800FF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "m66592_udc",
+ .id = 0, /* "usbf0" clock */
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbf_platdata,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .end = 99,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU4",
+ .version = "0",
+ .irq = 60,
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe9022eb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = 54,
+};
+
+static struct resource veu_resources[] = {
+ [0] = {
+ .name = "VEU",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu_platform_data,
+ },
+ .resource = veu_resources,
+ .num_resources = ARRAY_SIZE(veu_resources),
+};
+
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = 27,
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfea00000,
+ .end = 0xfea102d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channel_offset = 0x60,
+ .timer_bit = 5,
+ .clockevent_rating = 125,
+ .clocksource_rating = 125,
+};
+
+static struct resource cmt_resources[] = {
+ [0] = {
+ .start = 0x044a0060,
+ .end = 0x044a006b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct siu_platform siu_platform_data = {
+ .dma_dev = &dma_device.dev,
+ .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
+ .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
+ .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
+ .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
+};
+
+static struct resource siu_resources[] = {
+ [0] = {
+ .start = 0xa4540000,
+ .end = 0xa454c10f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 108,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device siu_device = {
+ .name = "siu-pcm-audio",
+ .id = -1,
+ .dev = {
+ .platform_data = &siu_platform_data,
+ },
+ .resource = siu_resources,
+ .num_resources = ARRAY_SIZE(siu_resources),
+};
+
+static struct platform_device *sh7722_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+ &usbf_device,
+ &iic_device,
+ &vpu_device,
+ &veu_device,
+ &jpu_device,
+ &siu_device,
+ &dma_device,
+};
+
+static int __init sh7722_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
+ platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+
+ return platform_add_devices(sh7722_devices,
+ ARRAY_SIZE(sh7722_devices));
+}
+arch_initcall(sh7722_devices_setup);
+
+static struct platform_device *sh7722_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7722_early_devices,
+ ARRAY_SIZE(sh7722_early_devices));
+}
+
+enum {
+ UNUSED=0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ HUDI,
+ SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
+ RTC_ATI, RTC_PRI, RTC_CUI,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
+ VPU, TPU,
+ USB_USBI0, USB_USBI1,
+ DMAC4, DMAC5, DMAC_DADERR,
+ KEYSC,
+ SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
+ FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
+ CMT, TSIF, SIU, TWODG,
+ TMU0, TMU1, TMU2,
+ IRDA, JPU, LCDC,
+
+ /* interrupt groups */
+ SIM, RTC, DMAC0123, VIOVOU, USB, DMAC45, FLCTL, I2C, SDHI,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+ INTC_VECT(SIM_ERI, 0x700), INTC_VECT(SIM_RXI, 0x720),
+ INTC_VECT(SIM_TXI, 0x740), INTC_VECT(SIM_TEI, 0x760),
+ INTC_VECT(RTC_ATI, 0x780), INTC_VECT(RTC_PRI, 0x7a0),
+ INTC_VECT(RTC_CUI, 0x7c0),
+ INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
+ INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
+ INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
+ INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9a0),
+ INTC_VECT(USB_USBI0, 0xa20), INTC_VECT(USB_USBI1, 0xa40),
+ INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
+ INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
+ INTC_VECT(SCIF0, 0xc00), INTC_VECT(SCIF1, 0xc20),
+ INTC_VECT(SCIF2, 0xc40), INTC_VECT(SIOF0, 0xc80),
+ INTC_VECT(SIOF1, 0xca0), INTC_VECT(SIO, 0xd00),
+ INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
+ INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
+ INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
+ INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
+ INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(IRDA, 0x480),
+ INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
+ INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
+ INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
+ INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
+ INTC_GROUP(USB, USB_USBI0, USB_USBI1),
+ INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
+ FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC, DMAC_DADERR, DMAC5, DMAC4, 0, SCIF2, SCIF1, SCIF0 } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, 0, SIO, 0, 0, SIOF1, SIOF0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0, RTC_CUI, RTC_PRI, RTC_ATI, 0, TPU, 0, TSIF } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, IRDA } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { } },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, 0, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, SCIF2 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, RTC } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, 0, 0, SDHI } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { TWODG, 0, TPU } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7722",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_mem_setup(void)
+{
+ /* Register the URAM space as Node 1 */
+ setup_bootmem_node(1, 0x055f0000, 0x05610000);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
new file mode 100644
index 00000000..a188c9ea
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -0,0 +1,773 @@
+/*
+ * SH7723 Setup
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/usb/r8a66597.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+#include <asm/clock.h>
+#include <asm/mmzone.h>
+#include <cpu/sh7723.h>
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .port_reg = 0xa4050160,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xa4e30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xa4e40000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xa4e50000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5",
+ .version = "0",
+ .irq = 60,
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu0_platform_data = {
+ .name = "VEU2H",
+ .version = "0",
+ .irq = 54,
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU2H0",
+ .start = 0xfe920000,
+ .end = 0xfe92027b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
+ },
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+static struct uio_info veu1_platform_data = {
+ .name = "VEU2H",
+ .version = "0",
+ .irq = 27,
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU2H1",
+ .start = 0xfe924000,
+ .end = 0xfe92427b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channel_offset = 0x60,
+ .timer_bit = 5,
+ .clockevent_rating = 125,
+ .clocksource_rating = 125,
+};
+
+static struct resource cmt_resources[] = {
+ [0] = {
+ .start = 0x044a0060,
+ .end = 0x044a006b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffd90008,
+ .end = 0xffd90013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffd90014,
+ .end = 0xffd9001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 58,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffd90020,
+ .end = 0xffd9002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 69,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 70,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 68,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+};
+
+static struct resource sh7723_usb_host_resources[] = {
+ [0] = {
+ .start = 0xa4d80000,
+ .end = 0xa4d800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device sh7723_usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
+ .resource = sh7723_usb_host_resources,
+};
+
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .end = 99,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct platform_device *sh7723_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &rtc_device,
+ &iic_device,
+ &sh7723_usb_host_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
+};
+
+static int __init sh7723_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+
+ return platform_add_devices(sh7723_devices,
+ ARRAY_SIZE(sh7723_devices));
+}
+arch_initcall(sh7723_devices_setup);
+
+static struct platform_device *sh7723_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7723_early_devices,
+ ARRAY_SIZE(sh7723_early_devices));
+}
+
+#define RAMCR_CACHE_L2FC 0x0002
+#define RAMCR_CACHE_L2E 0x0001
+#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
+
+void l2_cache_init(void)
+{
+ /* Enable L2 cache */
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
+}
+
+enum {
+ UNUSED=0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ HUDI,
+ DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3,
+ _2DG_TRI,_2DG_INI,_2DG_CEI,
+ DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3,
+ VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI,
+ SCIFA_SCIFA0,
+ VPU_VPUI,
+ TPU_TPUI,
+ ADC_ADI,
+ USB_USI0,
+ RTC_ATI,RTC_PRI,RTC_CUI,
+ DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR,
+ DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR,
+ KEYSC_KEYI,
+ SCIF_SCIF0,SCIF_SCIF1,SCIF_SCIF2,
+ MSIOF_MSIOFI0,MSIOF_MSIOFI1,
+ SCIFA_SCIFA1,
+ FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
+ I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
+ CMT_CMTI,
+ TSIF_TSIFI,
+ SIU_SIUI,
+ SCIFA_SCIFA2,
+ TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
+ IRDA_IRDAI,
+ ATAPI_ATAPII,
+ VEU2H1_VEU2HI,
+ LCDC_LCDCI,
+ TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
+
+ /* interrupt groups */
+ DMAC1A, DMAC0A, VIO, DMAC0B, FLCTL, I2C, _2DG,
+ SDHI1, RTC, DMAC1B, SDHI0,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+
+ INTC_VECT(DMAC1A_DEI0,0x700),
+ INTC_VECT(DMAC1A_DEI1,0x720),
+ INTC_VECT(DMAC1A_DEI2,0x740),
+ INTC_VECT(DMAC1A_DEI3,0x760),
+
+ INTC_VECT(_2DG_TRI, 0x780),
+ INTC_VECT(_2DG_INI, 0x7A0),
+ INTC_VECT(_2DG_CEI, 0x7C0),
+
+ INTC_VECT(DMAC0A_DEI0,0x800),
+ INTC_VECT(DMAC0A_DEI1,0x820),
+ INTC_VECT(DMAC0A_DEI2,0x840),
+ INTC_VECT(DMAC0A_DEI3,0x860),
+
+ INTC_VECT(VIO_CEUI,0x880),
+ INTC_VECT(VIO_BEUI,0x8A0),
+ INTC_VECT(VIO_VEU2HI,0x8C0),
+ INTC_VECT(VIO_VOUI,0x8E0),
+
+ INTC_VECT(SCIFA_SCIFA0,0x900),
+ INTC_VECT(VPU_VPUI,0x980),
+ INTC_VECT(TPU_TPUI,0x9A0),
+ INTC_VECT(ADC_ADI,0x9E0),
+ INTC_VECT(USB_USI0,0xA20),
+
+ INTC_VECT(RTC_ATI,0xA80),
+ INTC_VECT(RTC_PRI,0xAA0),
+ INTC_VECT(RTC_CUI,0xAC0),
+
+ INTC_VECT(DMAC1B_DEI4,0xB00),
+ INTC_VECT(DMAC1B_DEI5,0xB20),
+ INTC_VECT(DMAC1B_DADERR,0xB40),
+
+ INTC_VECT(DMAC0B_DEI4,0xB80),
+ INTC_VECT(DMAC0B_DEI5,0xBA0),
+ INTC_VECT(DMAC0B_DADERR,0xBC0),
+
+ INTC_VECT(KEYSC_KEYI,0xBE0),
+ INTC_VECT(SCIF_SCIF0,0xC00),
+ INTC_VECT(SCIF_SCIF1,0xC20),
+ INTC_VECT(SCIF_SCIF2,0xC40),
+ INTC_VECT(MSIOF_MSIOFI0,0xC80),
+ INTC_VECT(MSIOF_MSIOFI1,0xCA0),
+ INTC_VECT(SCIFA_SCIFA1,0xD00),
+
+ INTC_VECT(FLCTL_FLSTEI,0xD80),
+ INTC_VECT(FLCTL_FLTENDI,0xDA0),
+ INTC_VECT(FLCTL_FLTREQ0I,0xDC0),
+ INTC_VECT(FLCTL_FLTREQ1I,0xDE0),
+
+ INTC_VECT(I2C_ALI,0xE00),
+ INTC_VECT(I2C_TACKI,0xE20),
+ INTC_VECT(I2C_WAITI,0xE40),
+ INTC_VECT(I2C_DTEI,0xE60),
+
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
+
+ INTC_VECT(CMT_CMTI,0xF00),
+ INTC_VECT(TSIF_TSIFI,0xF20),
+ INTC_VECT(SIU_SIUI,0xF80),
+ INTC_VECT(SCIFA_SCIFA2,0xFA0),
+
+ INTC_VECT(TMU0_TUNI0,0x400),
+ INTC_VECT(TMU0_TUNI1,0x420),
+ INTC_VECT(TMU0_TUNI2,0x440),
+
+ INTC_VECT(IRDA_IRDAI,0x480),
+ INTC_VECT(ATAPI_ATAPII,0x4A0),
+
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
+
+ INTC_VECT(VEU2H1_VEU2HI,0x560),
+ INTC_VECT(LCDC_LCDCI,0x580),
+
+ INTC_VECT(TMU1_TUNI0,0x920),
+ INTC_VECT(TMU1_TUNI1,0x940),
+ INTC_VECT(TMU1_TUNI2,0x960),
+
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC1A,DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3),
+ INTC_GROUP(DMAC0A,DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3),
+ INTC_GROUP(VIO, VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI),
+ INTC_GROUP(DMAC0B, DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR),
+ INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
+ INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
+ INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
+ INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+ 0, ENABLED, ENABLED, ENABLED } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU_VPUI,0,0,0,SCIFA_SCIFA0 } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { DMAC1A_DEI3,DMAC1A_DEI2,DMAC1A_DEI1,DMAC1A_DEI0,0,0,0,IRDA_IRDAI } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0,TMU0_TUNI2,TMU0_TUNI1,TMU0_TUNI0,VEU2H1_VEU2HI,0,0,LCDC_LCDCI } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC_KEYI,DMAC0B_DADERR,DMAC0B_DEI5,DMAC0B_DEI4,0,SCIF_SCIF2,SCIF_SCIF1,SCIF_SCIF0 } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0,0,0,SCIFA_SCIFA1,ADC_ADI,0,MSIOF_MSIOFI1,MSIOF_MSIOFI0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { 0, ENABLED, ENABLED, ENABLED,
+ 0, 0, SCIFA_SCIFA2, SIU_SIUI } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, DMAC1B_DADERR,DMAC1B_DEI5,DMAC1B_DEI4,0,RTC_ATI,RTC_PRI,RTC_CUI } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0,_2DG_CEI,_2DG_INI,_2DG_TRI,0,TPU_TPUI,0,TSIF_TSIFI } },
+ { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
+ { 0,0,0,0,0,0,0,ATAPI_ATAPII } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA_IRDAI } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2H1_VEU2HI, LCDC_LCDCI, DMAC1A, 0} },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, 0} },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA_SCIFA0, VPU_VPUI } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC_KEYI, DMAC0B, USB_USI0, CMT_CMTI } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,0 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0,MSIOF_MSIOFI1, FLCTL, I2C } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA_SCIFA1,0,TSIF_TSIFI,_2DG } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { ADC_ADI,0,SIU_SIUI,SDHI1 } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC,DMAC1B,0,SDHI0 } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA_SCIFA2,0,TPU_TPUI,ATAPI_ATAPII } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7723",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
new file mode 100644
index 00000000..4c671cfe
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -0,0 +1,1418 @@
+/*
+ * SH7724 Setup
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Setup
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+#include <linux/notifier.h>
+
+#include <asm/suspend.h>
+#include <asm/clock.h>
+#include <asm/mmzone.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7724.h>
+
+/* DMA */
+static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF0_TX,
+ .addr = 0xffe0000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF0_RX,
+ .addr = 0xffe00014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_TX,
+ .addr = 0xffe1000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x25,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_RX,
+ .addr = 0xffe10014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x26,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0xffe2000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0xffe20014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0xa4e30020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2d,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0xa4e30024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2e,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0xa4e40020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x31,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0xa4e40024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x32,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_TX,
+ .addr = 0xa4e50020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x35,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_RX,
+ .addr = 0xa4e50024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x36,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D0_TX,
+ .addr = 0xA4D80100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D0_RX,
+ .addr = 0xA4D80100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_TX,
+ .addr = 0xA4D80120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_RX,
+ .addr = 0xA4D80120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_TX,
+ .addr = 0xA4D90100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_RX,
+ .addr = 0xA4D90100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_TX,
+ .addr = 0xA4D90120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_RX,
+ .addr = 0xA4D90120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_TX,
+ .addr = 0x04cf0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc9,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_RX,
+ .addr = 0x04cf0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xca,
+ },
+};
+
+static const struct sh_dmae_channel sh7724_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = sh7724_dmae_slaves,
+ .slave_num = ARRAY_SIZE(sh7724_dmae_slaves),
+ .channel = sh7724_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7724_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource sh7724_dmae0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 78,
+ .end = 78,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = 48,
+ .end = 51,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = 76,
+ .end = 77,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* Resource order important! */
+static struct resource sh7724_dmae1_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfdc08020,
+ .end = 0xfdc0808f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfdc09000,
+ .end = 0xfdc0900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 74,
+ .end = 74,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = 40,
+ .end = 43,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = 72,
+ .end = 73,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7724_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7724_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xa4e30000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xa4e40000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xa4e50000,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .scbrr_algo_id = SCBRR_ALGO_3,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+/* RTC */
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 69,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 70,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 68,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* I2C0 */
+static struct resource iic0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0x04470000,
+ .end = 0x04470018 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .end = 99,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic0_resources),
+ .resource = iic0_resources,
+};
+
+/* I2C1 */
+static struct resource iic1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0x04750000,
+ .end = 0x04750018 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 92,
+ .end = 95,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1, /* "i2c1" clock */
+ .num_resources = ARRAY_SIZE(iic1_resources),
+ .resource = iic1_resources,
+};
+
+/* VPU */
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5F",
+ .version = "0",
+ .irq = 60,
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+/* VEU0 */
+static struct uio_info veu0_platform_data = {
+ .name = "VEU3F0",
+ .version = "0",
+ .irq = 83,
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU3F0",
+ .start = 0xfe920000,
+ .end = 0xfe9200cb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
+ },
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+/* VEU1 */
+static struct uio_info veu1_platform_data = {
+ .name = "VEU3F1",
+ .version = "0",
+ .irq = 54,
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU3F1",
+ .start = 0xfe924000,
+ .end = 0xfe9240cb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+/* BEU0 */
+static struct uio_info beu0_platform_data = {
+ .name = "BEU0",
+ .version = "0",
+ .irq = evt2irq(0x8A0),
+};
+
+static struct resource beu0_resources[] = {
+ [0] = {
+ .name = "BEU0",
+ .start = 0xfe930000,
+ .end = 0xfe933400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 6,
+ .dev = {
+ .platform_data = &beu0_platform_data,
+ },
+ .resource = beu0_resources,
+ .num_resources = ARRAY_SIZE(beu0_resources),
+};
+
+/* BEU1 */
+static struct uio_info beu1_platform_data = {
+ .name = "BEU1",
+ .version = "0",
+ .irq = evt2irq(0xA00),
+};
+
+static struct resource beu1_resources[] = {
+ [0] = {
+ .name = "BEU1",
+ .start = 0xfe940000,
+ .end = 0xfe943400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 7,
+ .dev = {
+ .platform_data = &beu1_platform_data,
+ },
+ .resource = beu1_resources,
+ .num_resources = ARRAY_SIZE(beu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channel_offset = 0x60,
+ .timer_bit = 5,
+ .clockevent_rating = 125,
+ .clocksource_rating = 200,
+};
+
+static struct resource cmt_resources[] = {
+ [0] = {
+ .start = 0x044a0060,
+ .end = 0x044a006b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 104,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh_cmt",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffd90008,
+ .end = 0xffd90013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffd90014,
+ .end = 0xffd9001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 58,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffd90020,
+ .end = 0xffd9002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+/* JPU */
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = 27,
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfe980000,
+ .end = 0xfe9902d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 3,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+/* SPU2DSP0 */
+static struct uio_info spu0_platform_data = {
+ .name = "SPU2DSP0",
+ .version = "0",
+ .irq = 86,
+};
+
+static struct resource spu0_resources[] = {
+ [0] = {
+ .name = "SPU2DSP0",
+ .start = 0xFE200000,
+ .end = 0xFE2FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 4,
+ .dev = {
+ .platform_data = &spu0_platform_data,
+ },
+ .resource = spu0_resources,
+ .num_resources = ARRAY_SIZE(spu0_resources),
+};
+
+/* SPU2DSP1 */
+static struct uio_info spu1_platform_data = {
+ .name = "SPU2DSP1",
+ .version = "0",
+ .irq = 87,
+};
+
+static struct resource spu1_resources[] = {
+ [0] = {
+ .name = "SPU2DSP1",
+ .start = 0xFE300000,
+ .end = 0xFE3FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 5,
+ .dev = {
+ .platform_data = &spu1_platform_data,
+ },
+ .resource = spu1_resources,
+ .num_resources = ARRAY_SIZE(spu1_resources),
+};
+
+static struct platform_device *sh7724_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &dma0_device,
+ &dma1_device,
+ &rtc_device,
+ &iic0_device,
+ &iic1_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
+ &beu0_device,
+ &beu1_device,
+ &jpu_device,
+ &spu0_device,
+ &spu1_device,
+};
+
+static int __init sh7724_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+ platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
+ platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
+
+ return platform_add_devices(sh7724_devices,
+ ARRAY_SIZE(sh7724_devices));
+}
+arch_initcall(sh7724_devices_setup);
+
+static struct platform_device *sh7724_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7724_early_devices,
+ ARRAY_SIZE(sh7724_early_devices));
+}
+
+#define RAMCR_CACHE_L2FC 0x0002
+#define RAMCR_CACHE_L2E 0x0001
+#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
+
+void l2_cache_init(void)
+{
+ /* Enable L2 cache */
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
+}
+
+enum {
+ UNUSED = 0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ HUDI,
+ DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3,
+ _2DG_TRI, _2DG_INI, _2DG_CEI,
+ DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3,
+ VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU,
+ SCIFA3,
+ VPU,
+ TPU,
+ CEU1,
+ BEU1,
+ USB0, USB1,
+ ATAPI,
+ RTC_ATI, RTC_PRI, RTC_CUI,
+ DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR,
+ DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR,
+ KEYSC,
+ SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,
+ VEU0,
+ MSIOF_MSIOFI0, MSIOF_MSIOFI1,
+ SPU_SPUI0, SPU_SPUI1,
+ SCIFA4,
+ ICB,
+ ETHI,
+ I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
+ I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
+ CMT,
+ TSIF,
+ FSI,
+ SCIFA5,
+ TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
+ IRDA,
+ JPU,
+ _2DDMAC,
+ MMC_MMC2I, MMC_MMC3I,
+ LCDC,
+ TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
+
+ /* interrupt groups */
+ DMAC1A, _2DG, DMAC0A, VIO, USB, RTC,
+ DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+
+ INTC_VECT(DMAC1A_DEI0, 0x700),
+ INTC_VECT(DMAC1A_DEI1, 0x720),
+ INTC_VECT(DMAC1A_DEI2, 0x740),
+ INTC_VECT(DMAC1A_DEI3, 0x760),
+
+ INTC_VECT(_2DG_TRI, 0x780),
+ INTC_VECT(_2DG_INI, 0x7A0),
+ INTC_VECT(_2DG_CEI, 0x7C0),
+
+ INTC_VECT(DMAC0A_DEI0, 0x800),
+ INTC_VECT(DMAC0A_DEI1, 0x820),
+ INTC_VECT(DMAC0A_DEI2, 0x840),
+ INTC_VECT(DMAC0A_DEI3, 0x860),
+
+ INTC_VECT(VIO_CEU0, 0x880),
+ INTC_VECT(VIO_BEU0, 0x8A0),
+ INTC_VECT(VIO_VEU1, 0x8C0),
+ INTC_VECT(VIO_VOU, 0x8E0),
+
+ INTC_VECT(SCIFA3, 0x900),
+ INTC_VECT(VPU, 0x980),
+ INTC_VECT(TPU, 0x9A0),
+ INTC_VECT(CEU1, 0x9E0),
+ INTC_VECT(BEU1, 0xA00),
+ INTC_VECT(USB0, 0xA20),
+ INTC_VECT(USB1, 0xA40),
+ INTC_VECT(ATAPI, 0xA60),
+
+ INTC_VECT(RTC_ATI, 0xA80),
+ INTC_VECT(RTC_PRI, 0xAA0),
+ INTC_VECT(RTC_CUI, 0xAC0),
+
+ INTC_VECT(DMAC1B_DEI4, 0xB00),
+ INTC_VECT(DMAC1B_DEI5, 0xB20),
+ INTC_VECT(DMAC1B_DADERR, 0xB40),
+
+ INTC_VECT(DMAC0B_DEI4, 0xB80),
+ INTC_VECT(DMAC0B_DEI5, 0xBA0),
+ INTC_VECT(DMAC0B_DADERR, 0xBC0),
+
+ INTC_VECT(KEYSC, 0xBE0),
+ INTC_VECT(SCIF_SCIF0, 0xC00),
+ INTC_VECT(SCIF_SCIF1, 0xC20),
+ INTC_VECT(SCIF_SCIF2, 0xC40),
+ INTC_VECT(VEU0, 0xC60),
+ INTC_VECT(MSIOF_MSIOFI0, 0xC80),
+ INTC_VECT(MSIOF_MSIOFI1, 0xCA0),
+ INTC_VECT(SPU_SPUI0, 0xCC0),
+ INTC_VECT(SPU_SPUI1, 0xCE0),
+ INTC_VECT(SCIFA4, 0xD00),
+
+ INTC_VECT(ICB, 0xD20),
+ INTC_VECT(ETHI, 0xD60),
+
+ INTC_VECT(I2C1_ALI, 0xD80),
+ INTC_VECT(I2C1_TACKI, 0xDA0),
+ INTC_VECT(I2C1_WAITI, 0xDC0),
+ INTC_VECT(I2C1_DTEI, 0xDE0),
+
+ INTC_VECT(I2C0_ALI, 0xE00),
+ INTC_VECT(I2C0_TACKI, 0xE20),
+ INTC_VECT(I2C0_WAITI, 0xE40),
+ INTC_VECT(I2C0_DTEI, 0xE60),
+
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
+ INTC_VECT(SDHI0, 0xEE0),
+
+ INTC_VECT(CMT, 0xF00),
+ INTC_VECT(TSIF, 0xF20),
+ INTC_VECT(FSI, 0xF80),
+ INTC_VECT(SCIFA5, 0xFA0),
+
+ INTC_VECT(TMU0_TUNI0, 0x400),
+ INTC_VECT(TMU0_TUNI1, 0x420),
+ INTC_VECT(TMU0_TUNI2, 0x440),
+
+ INTC_VECT(IRDA, 0x480),
+
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
+
+ INTC_VECT(JPU, 0x560),
+ INTC_VECT(_2DDMAC, 0x4A0),
+
+ INTC_VECT(MMC_MMC2I, 0x5A0),
+ INTC_VECT(MMC_MMC3I, 0x5C0),
+
+ INTC_VECT(LCDC, 0xF40),
+
+ INTC_VECT(TMU1_TUNI0, 0x920),
+ INTC_VECT(TMU1_TUNI1, 0x940),
+ INTC_VECT(TMU1_TUNI2, 0x960),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3),
+ INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI),
+ INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3),
+ INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU),
+ INTC_GROUP(USB, USB0, USB1),
+ INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
+ INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR),
+ INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
+ INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
+ INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
+ INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
+ INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+ 0, ENABLED, ENABLED, ENABLED } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
+ DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0,
+ SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0,
+ JPU, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4,
+ VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, ICB, SCIFA4,
+ CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
+ I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED,
+ 0, 0, SCIFA5, FSI } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4,
+ 0, RTC_CUI, RTC_PRI, RTC_ATI } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0, _2DG_CEI, _2DG_INI, _2DG_TRI,
+ 0, TPU, 0, TSIF } },
+ { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
+ { 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1,
+ TMU0_TUNI2, IRDA } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1,
+ TMU1_TUNI2, SPU } },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1,
+ SCIF_SCIF2, VEU0 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1,
+ I2C1, I2C0 } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7724",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct {
+ /* BSC */
+ unsigned long mmselr;
+ unsigned long cs0bcr;
+ unsigned long cs4bcr;
+ unsigned long cs5abcr;
+ unsigned long cs5bbcr;
+ unsigned long cs6abcr;
+ unsigned long cs6bbcr;
+ unsigned long cs4wcr;
+ unsigned long cs5awcr;
+ unsigned long cs5bwcr;
+ unsigned long cs6awcr;
+ unsigned long cs6bwcr;
+ /* INTC */
+ unsigned short ipra;
+ unsigned short iprb;
+ unsigned short iprc;
+ unsigned short iprd;
+ unsigned short ipre;
+ unsigned short iprf;
+ unsigned short iprg;
+ unsigned short iprh;
+ unsigned short ipri;
+ unsigned short iprj;
+ unsigned short iprk;
+ unsigned short iprl;
+ unsigned char imr0;
+ unsigned char imr1;
+ unsigned char imr2;
+ unsigned char imr3;
+ unsigned char imr4;
+ unsigned char imr5;
+ unsigned char imr6;
+ unsigned char imr7;
+ unsigned char imr8;
+ unsigned char imr9;
+ unsigned char imr10;
+ unsigned char imr11;
+ unsigned char imr12;
+ /* RWDT */
+ unsigned short rwtcnt;
+ unsigned short rwtcsr;
+ /* CPG */
+ unsigned long irdaclk;
+ unsigned long spuclk;
+} sh7724_rstandby_state;
+
+static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
+ sh7724_rstandby_state.mmselr |= 0xa5a50000;
+ sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
+ sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
+ sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
+ sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
+ sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
+ sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
+ sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
+ sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
+ sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
+ sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
+ sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
+ sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
+ sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
+ sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
+ sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
+ sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
+ sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
+ sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
+ sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
+ sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
+ sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
+ sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
+ sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
+ sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
+ sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
+ sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
+ sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
+ sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
+ sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
+ sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
+ sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
+ sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
+ sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
+ sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
+ sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
+ sh7724_rstandby_state.rwtcnt |= 0x5a00;
+ sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
+ sh7724_rstandby_state.rwtcsr |= 0xa500;
+ __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
+
+ /* CPG */
+ sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
+ sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
+ __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
+ __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
+ __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
+ __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
+ __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
+ __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
+ __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
+ __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
+ __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
+ __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
+ __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
+ __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
+ __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
+ __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
+ __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
+ __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
+ __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
+ __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
+ __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
+ __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
+ __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
+ __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
+ __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
+ __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
+ __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
+ __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
+ __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
+ __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
+ __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
+ __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
+ __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
+ __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
+ __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
+ __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
+ __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
+ __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
+
+ /* CPG */
+ __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
+ __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sh7724_pre_sleep_notifier = {
+ .notifier_call = sh7724_pre_sleep_notifier_call,
+ .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
+};
+
+static struct notifier_block sh7724_post_sleep_notifier = {
+ .notifier_call = sh7724_post_sleep_notifier_call,
+ .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
+};
+
+static int __init sh7724_sleep_setup(void)
+{
+ atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
+ &sh7724_pre_sleep_notifier);
+
+ atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
+ &sh7724_post_sleep_notifier);
+ return 0;
+}
+arch_initcall(sh7724_sleep_setup);
+
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
new file mode 100644
index 00000000..c8836cff
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -0,0 +1,1255 @@
+/*
+ * SH7757 Setup
+ *
+ * Copyright (C) 2009, 2011 Renesas Solutions Corp.
+ *
+ * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7757.h>
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfe4b0000, /* SCIF2 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfe4c0000, /* SCIF3 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xfe4d0000, /* SCIF4 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xfe430008,
+ .end = 0xfe430013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 28,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xfe430014,
+ .end = 0xfe43001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 29,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct resource spi0_resources[] = {
+ [0] = {
+ .start = 0xfe002000,
+ .end = 0xfe0020ff,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+ },
+ [1] = {
+ .start = 86,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* DMA */
+static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_TX,
+ .addr = 0x1fe50030,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc5,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_RX,
+ .addr = 0x1fe50030,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc6,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ .addr = 0x1fcb0034,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd3,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ .addr = 0x1fcb0034,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd7,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0x1f4b000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0x1f4b0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0x1f4c000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0x1f4c0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0x1f4d000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0x1f4d0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_TX,
+ .addr = 0x1e500012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_RX,
+ .addr = 0x1e500013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_TX,
+ .addr = 0x1e510012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_RX,
+ .addr = 0x1e510013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_TX,
+ .addr = 0x1e520012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_RX,
+ .addr = 0x1e520013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa2,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_TX,
+ .addr = 0x1e530012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa9,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_RX,
+ .addr = 0x1e530013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xaf,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_TX,
+ .addr = 0x1e540012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc5,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_RX,
+ .addr = 0x1e540013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc6,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_TX,
+ .addr = 0x1e550012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_RX,
+ .addr = 0x1e550013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_TX,
+ .addr = 0x1e560012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_RX,
+ .addr = 0x1e560013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_TX,
+ .addr = 0x1e570012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_RX,
+ .addr = 0x1e570013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_TX,
+ .addr = 0x1e580012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x45,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_RX,
+ .addr = 0x1e580013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x46,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_TX,
+ .addr = 0x1e590012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x51,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_RX,
+ .addr = 0x1e590013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x52,
+ },
+};
+
+static const struct sh_dmae_channel sh7757_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .slave = sh7757_dmae0_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae0_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .slave = sh7757_dmae1_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae1_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma2_platform_data = {
+ .slave = sh7757_dmae2_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae2_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma3_platform_data = {
+ .slave = sh7757_dmae3_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae3_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* channel 0 to 5 */
+static struct resource sh7757_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff608020,
+ .end = 0xff60808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff609000,
+ .end = 0xff60900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 34,
+ .end = 34,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 6 to 11 */
+static struct resource sh7757_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff618020,
+ .end = 0xff61808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff619000,
+ .end = 0xff61900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 34,
+ .end = 34,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 4 */
+ .start = 46,
+ .end = 46,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 5 */
+ .start = 46,
+ .end = 46,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 6 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 7 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 8 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 9 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 10 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 11 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 12 to 17 */
+static struct resource sh7757_dmae2_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff708020,
+ .end = 0xff70808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff709000,
+ .end = 0xff70900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 323,
+ .end = 323,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 12 to 16 */
+ .start = 272,
+ .end = 276,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 17 */
+ .start = 279,
+ .end = 279,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* channel 18 to 23 */
+static struct resource sh7757_dmae3_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff718020,
+ .end = 0xff71808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff719000,
+ .end = 0xff71900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = 324,
+ .end = 324,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 18 to 22 */
+ .start = 280,
+ .end = 284,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 23 */
+ .start = 288,
+ .end = 288,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7757_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7757_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae1_resources),
+ .dev = {
+ .platform_data = &dma1_platform_data,
+ },
+};
+
+static struct platform_device dma2_device = {
+ .name = "sh-dma-engine",
+ .id = 2,
+ .resource = sh7757_dmae2_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae2_resources),
+ .dev = {
+ .platform_data = &dma2_platform_data,
+ },
+};
+
+static struct platform_device dma3_device = {
+ .name = "sh-dma-engine",
+ .id = 3,
+ .resource = sh7757_dmae3_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae3_resources),
+ .dev = {
+ .platform_data = &dma3_platform_data,
+ },
+};
+
+static struct platform_device spi0_device = {
+ .name = "sh_spi",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(spi0_resources),
+ .resource = spi0_resources,
+};
+
+static struct resource spi1_resources[] = {
+ {
+ .start = 0xffd8ee70,
+ .end = 0xffd8eeff,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
+ },
+ {
+ .start = 54,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device spi1_device = {
+ .name = "sh_spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(spi1_resources),
+ .resource = spi1_resources,
+};
+
+static struct resource rspi_resources[] = {
+ {
+ .start = 0xfe480000,
+ .end = 0xfe4800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 220,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rspi_device = {
+ .name = "rspi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(rspi_resources),
+ .resource = rspi_resources,
+};
+
+static struct resource usb_ehci_resources[] = {
+ [0] = {
+ .start = 0xfe4f1000,
+ .end = 0xfe4f10ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .end = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ehci_device = {
+ .name = "sh_ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ehci_resources),
+ .resource = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xfe4f1800,
+ .end = 0xfe4f18ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 57,
+ .end = 57,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct platform_device *sh7757_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &tmu0_device,
+ &tmu1_device,
+ &dma0_device,
+ &dma1_device,
+ &dma2_device,
+ &dma3_device,
+ &spi0_device,
+ &spi1_device,
+ &rspi_device,
+ &usb_ehci_device,
+ &usb_ohci_device,
+};
+
+static int __init sh7757_devices_setup(void)
+{
+ return platform_add_devices(sh7757_devices,
+ ARRAY_SIZE(sh7757_devices));
+}
+arch_initcall(sh7757_devices_setup);
+
+static struct platform_device *sh7757_early_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7757_early_devices,
+ ARRAY_SIZE(sh7757_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+
+ SDHI, DVC,
+ IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15,
+ TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
+ HUDI,
+ ARC4,
+ DMAC0_5, DMAC6_7, DMAC8_11,
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4,
+ USB0, USB1,
+ JMC,
+ SPI0, SPI1,
+ TMR01, TMR23, TMR45,
+ FRT,
+ LPC, LPC5, LPC6, LPC7, LPC8,
+ PECI0, PECI1, PECI2, PECI3, PECI4, PECI5,
+ ETHERC,
+ ADC0, ADC1,
+ SIM,
+ IIC0_0, IIC0_1, IIC0_2, IIC0_3,
+ IIC1_0, IIC1_1, IIC1_2, IIC1_3,
+ IIC2_0, IIC2_1, IIC2_2, IIC2_3,
+ IIC3_0, IIC3_1, IIC3_2, IIC3_3,
+ IIC4_0, IIC4_1, IIC4_2, IIC4_3,
+ IIC5_0, IIC5_1, IIC5_2, IIC5_3,
+ IIC6_0, IIC6_1, IIC6_2, IIC6_3,
+ IIC7_0, IIC7_1, IIC7_2, IIC7_3,
+ IIC8_0, IIC8_1, IIC8_2, IIC8_3,
+ IIC9_0, IIC9_1, IIC9_2, IIC9_3,
+ ONFICTL,
+ MMC1, MMC2,
+ ECCU,
+ PCIC,
+ G200,
+ RSPI,
+ SGPIO,
+ DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19,
+ DMINT20, DMINT21, DMINT22, DMINT23,
+ DDRECC,
+ TSIP,
+ PCIE_BRIDGE,
+ WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B,
+ GETHER0, GETHER1, GETHER2,
+ PBIA, PBIB, PBIC,
+ DMAE2, DMAE3,
+ SERMUX2, SERMUX3,
+
+ /* interrupt groups */
+
+ TMU012, TMU345,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0),
+ INTC_VECT(SDHI, 0x4c0),
+ INTC_VECT(DVC, 0x4e0),
+ INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
+ INTC_VECT(IRQ10, 0x540),
+ INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
+ INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(ARC4, 0x620),
+ INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660),
+ INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0),
+ INTC_VECT(DMAC0_5, 0x6c0),
+ INTC_VECT(IRQ11, 0x6e0),
+ INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
+ INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
+ INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0),
+ INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0),
+ INTC_VECT(USB0, 0x840),
+ INTC_VECT(IRQ12, 0x880),
+ INTC_VECT(JMC, 0x8a0),
+ INTC_VECT(SPI1, 0x8c0),
+ INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900),
+ INTC_VECT(USB1, 0x920),
+ INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
+ INTC_VECT(TMR45, 0xa40),
+ INTC_VECT(FRT, 0xa80),
+ INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
+ INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
+ INTC_VECT(LPC, 0xb20),
+ INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
+ INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
+ INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
+ INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20),
+ INTC_VECT(PECI2, 0xc40),
+ INTC_VECT(IRQ15, 0xc60),
+ INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
+ INTC_VECT(SPI0, 0xcc0),
+ INTC_VECT(ADC1, 0xce0),
+ INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20),
+ INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60),
+ INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
+ INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40),
+ INTC_VECT(ADC0, 0xe60),
+ INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20),
+ INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60),
+ INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420),
+ INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460),
+ INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0),
+ INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520),
+ INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560),
+ INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600),
+ INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640),
+ INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700),
+ INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800),
+ INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840),
+ INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
+ INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
+ INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
+ INTC_VECT(IIC6_2, 0x1920),
+ INTC_VECT(ONFICTL, 0x1960),
+ INTC_VECT(IIC6_3, 0x1980),
+ INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
+ INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
+ INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
+ INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
+ INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
+ INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
+ INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80),
+ INTC_VECT(ECCU, 0x1cc0),
+ INTC_VECT(PCIC, 0x1ce0),
+ INTC_VECT(G200, 0x1d00),
+ INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0),
+ INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0),
+ INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0),
+ INTC_VECT(PECI5, 0x1f00),
+ INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0),
+ INTC_VECT(SGPIO, 0x1fc0),
+ INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420),
+ INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460),
+ INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0),
+ INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520),
+ INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560),
+ INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600),
+ INTC_VECT(DDRECC, 0x2620),
+ INTC_VECT(TSIP, 0x2640),
+ INTC_VECT(PCIE_BRIDGE, 0x27c0),
+ INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820),
+ INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860),
+ INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0),
+ INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0),
+ INTC_VECT(WDT8B, 0x2900),
+ INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980),
+ INTC_VECT(GETHER2, 0x29a0),
+ INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20),
+ INTC_VECT(PBIC, 0x2a40),
+ INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80),
+ INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40),
+ INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80),
+ INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45,
+ TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5,
+ HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012
+ } },
+
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
+ IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
+ ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1,
+ ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
+ } },
+
+ { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
+ { IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0,
+ 0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
+ IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
+ IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2
+ } },
+
+ { 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */
+ { MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2,
+ IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
+ PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3,
+ IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
+ } },
+
+ { 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */
+ { WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0,
+ 0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC,
+ PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP,
+ DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22
+ } },
+
+ { 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */
+ { 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0,
+ DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0,
+ 0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8,
+ DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17
+ } },
+};
+
+#define INTPRI 0xffd00010
+#define INT2PRI0 0xffd40000
+#define INT2PRI1 0xffd40004
+#define INT2PRI2 0xffd40008
+#define INT2PRI3 0xffd4000c
+#define INT2PRI4 0xffd40010
+#define INT2PRI5 0xffd40014
+#define INT2PRI6 0xffd40018
+#define INT2PRI7 0xffd4001c
+#define INT2PRI8 0xffd400a0
+#define INT2PRI9 0xffd400a4
+#define INT2PRI10 0xffd400a8
+#define INT2PRI11 0xffd400ac
+#define INT2PRI12 0xffd400b0
+#define INT2PRI13 0xffd400b4
+#define INT2PRI14 0xffd400b8
+#define INT2PRI15 0xffd400bc
+#define INT2PRI16 0xffd10000
+#define INT2PRI17 0xffd10004
+#define INT2PRI18 0xffd10008
+#define INT2PRI19 0xffd1000c
+#define INT2PRI20 0xffd10010
+#define INT2PRI21 0xffd10014
+#define INT2PRI22 0xffd10018
+#define INT2PRI23 0xffd1001c
+#define INT2PRI24 0xffd100a0
+#define INT2PRI25 0xffd100a4
+#define INT2PRI26 0xffd100a8
+#define INT2PRI27 0xffd100ac
+#define INT2PRI28 0xffd100b0
+#define INT2PRI29 0xffd100b4
+#define INT2PRI30 0xffd100b8
+#define INT2PRI31 0xffd100bc
+#define INT2PRI32 0xffd20000
+#define INT2PRI33 0xffd20004
+#define INT2PRI34 0xffd20008
+#define INT2PRI35 0xffd2000c
+#define INT2PRI36 0xffd20010
+#define INT2PRI37 0xffd20014
+#define INT2PRI38 0xffd20018
+#define INT2PRI39 0xffd2001c
+#define INT2PRI40 0xffd200a0
+#define INT2PRI41 0xffd200a4
+#define INT2PRI42 0xffd200a8
+#define INT2PRI43 0xffd200ac
+#define INT2PRI44 0xffd200b0
+#define INT2PRI45 0xffd200b4
+#define INT2PRI46 0xffd200b8
+#define INT2PRI47 0xffd200bc
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
+ { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
+ { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } },
+ { INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } },
+ { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
+ { INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } },
+ { INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } },
+ { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
+ { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
+ { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
+ { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } },
+ { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } },
+ { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
+ { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
+
+ { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
+ { INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } },
+ { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
+ { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
+ { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
+ { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
+ { INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } },
+ { INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } },
+ { INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } },
+ { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
+ { INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } },
+ { INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } },
+ { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } },
+ { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
+ { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } },
+ { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
+ { INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } },
+ { INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } },
+ { INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } },
+ { INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } },
+ { INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } },
+ { INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } },
+ { INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } },
+ { INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } },
+ { INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } },
+ { INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } },
+ { INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } },
+ { INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } },
+ { INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } },
+ { INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } },
+ { INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } },
+ { INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } },
+};
+
+static struct intc_sense_reg sense_registers_irq8to15[] __initdata = {
+ { 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12,
+ IRQ11, IRQ10, IRQ9, IRQ8 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
+ mask_registers, prio_registers,
+ sense_registers_irq8to15);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+ INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+ INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123",
+ vectors_irq0123, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567",
+ vectors_irq4567, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220),
+ INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260),
+ INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0),
+ INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0),
+ INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320),
+ INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360),
+ INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0),
+ INTC_VECT(IRL4_HHHL, 0x3c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
+ NULL, mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
+ NULL, mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
new file mode 100644
index 00000000..00113515
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -0,0 +1,572 @@
+/*
+ * SH7763 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2007 Yoshihiro Shimoda
+ * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+#include <linux/serial_sci.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe08000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffe80000,
+ .end = 0xffe80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xffec8000,
+ .end = 0xffec80ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 83,
+ .end = 83,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 usb_ohci_dma_mask = 0xffffffffUL;
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct resource usbf_resources[] = {
+ [0] = {
+ .start = 0xffec0000,
+ .end = 0xffec00ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 84,
+ .end = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbf_device = {
+ .name = "sh_udc",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(usbf_resources),
+ .resource = usbf_resources,
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 28,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 29,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 30,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffd88008,
+ .end = 0xffd88013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffd88014,
+ .end = 0xffd8801f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 97,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffd88020,
+ .end = 0xffd8802b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 98,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct platform_device *sh7763_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &rtc_device,
+ &usb_ohci_device,
+ &usbf_device,
+};
+
+static int __init sh7763_devices_setup(void)
+{
+ return platform_add_devices(sh7763_devices,
+ ARRAY_SIZE(sh7763_devices));
+}
+arch_initcall(sh7763_devices_setup);
+
+static struct platform_device *sh7763_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7763_early_devices,
+ ARRAY_SIZE(sh7763_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2,
+ USBH, USBF, TPU, PCC, MMCIF, SIM,
+ TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
+ SCIF2, GPIO,
+
+ /* interrupt groups */
+
+ TMU012, TMU345,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
+ INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
+ INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
+ INTC_VECT(LCDC, 0x620),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x6c0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
+ INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
+ INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920),
+ INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960),
+ INTC_VECT(HAC, 0x980),
+ INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
+ INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
+ INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
+ INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
+ INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
+ INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
+ INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80),
+ INTC_VECT(USBF, 0xca0),
+ INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
+ INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
+ INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
+ INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
+ INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
+ INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20),
+ INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, GPIO, 0,
+ SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
+ PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
+ HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
+ 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
+ PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
+ LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
+ TMU2, TMU2_TICPI } },
+ { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
+ { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
+ { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
+ { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
+ PCISERR, PCIINTA } },
+ { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
+ PCIINTD, PCIC5 } },
+ { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
+ { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
+ { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
+ { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
+ { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
+ { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
+ { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
+ { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg irq_ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7763-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers, irq_ack_registers);
+
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
+ NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
+ NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ7-0 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
new file mode 100644
index 00000000..2c6aa22c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -0,0 +1,736 @@
+/*
+ * SH7770 Setup
+ *
+ * Copyright (C) 2006 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xff923000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xff924000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xff925000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xff926000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 64, 64, 64, 64 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xff927000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 65, 65, 65, 65 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xff928000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 66, 66, 66, 66 },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .mapbase = 0xff929000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 67, 67, 67, 67 },
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .mapbase = 0xff92a000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 68, 68, 68, 68 },
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct plat_sci_port scif8_platform_data = {
+ .mapbase = 0xff92b000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 69, 69, 69, 69 },
+};
+
+static struct platform_device scif8_device = {
+ .name = "sh-sci",
+ .id = 8,
+ .dev = {
+ .platform_data = &scif8_platform_data,
+ },
+};
+
+static struct plat_sci_port scif9_platform_data = {
+ .mapbase = 0xff92c000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 70, 70, 70, 70 },
+};
+
+static struct platform_device scif9_device = {
+ .name = "sh-sci",
+ .id = 9,
+ .dev = {
+ .platform_data = &scif9_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffd81008,
+ .end = 0xffd81013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 19,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffd81014,
+ .end = 0xffd8101f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffd81020,
+ .end = 0xffd8102f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct sh_timer_config tmu6_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu6_resources[] = {
+ [0] = {
+ .start = 0xffd82008,
+ .end = 0xffd82013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 22,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu6_device = {
+ .name = "sh_tmu",
+ .id = 6,
+ .dev = {
+ .platform_data = &tmu6_platform_data,
+ },
+ .resource = tmu6_resources,
+ .num_resources = ARRAY_SIZE(tmu6_resources),
+};
+
+static struct sh_timer_config tmu7_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu7_resources[] = {
+ [0] = {
+ .start = 0xffd82014,
+ .end = 0xffd8201f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 23,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu7_device = {
+ .name = "sh_tmu",
+ .id = 7,
+ .dev = {
+ .platform_data = &tmu7_platform_data,
+ },
+ .resource = tmu7_resources,
+ .num_resources = ARRAY_SIZE(tmu7_resources),
+};
+
+static struct sh_timer_config tmu8_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu8_resources[] = {
+ [0] = {
+ .start = 0xffd82020,
+ .end = 0xffd8202b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 24,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu8_device = {
+ .name = "sh_tmu",
+ .id = 8,
+ .dev = {
+ .platform_data = &tmu8_platform_data,
+ },
+ .resource = tmu8_resources,
+ .num_resources = ARRAY_SIZE(tmu8_resources),
+};
+
+static struct platform_device *sh7770_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &tmu6_device,
+ &tmu7_device,
+ &tmu8_device,
+};
+
+static int __init sh7770_devices_setup(void)
+{
+ return platform_add_devices(sh7770_devices,
+ ARRAY_SIZE(sh7770_devices));
+}
+arch_initcall(sh7770_devices_setup);
+
+static struct platform_device *sh7770_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &tmu6_device,
+ &tmu7_device,
+ &tmu8_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7770_early_devices,
+ ARRAY_SIZE(sh7770_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+
+ GPIO,
+ TMU0, TMU1, TMU2, TMU2_TICPI,
+ TMU3, TMU4, TMU5, TMU5_TICPI,
+ TMU6, TMU7, TMU8,
+ HAC, IPI, SPDIF, HUDI, I2C,
+ DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
+ I2S0, I2S1, I2S2, I2S3,
+ SRC_RX, SRC_TX, SRC_SPDIF,
+ DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D,
+ GFX3D_MBX, GFX3D_DMAC,
+ EXBUS_ATA,
+ SPI0, SPI1,
+ SCIF089, SCIF1234, SCIF567,
+ ADC,
+ BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
+ BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
+ BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31,
+
+ /* interrupt groups */
+ TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(GPIO, 0x3e0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
+ INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0),
+ INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0),
+ INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520),
+ INTC_VECT(TMU8, 0x540),
+ INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0),
+ INTC_VECT(SPDIF, 0x5e0),
+ INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620),
+ INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
+ INTC_VECT(DMAC0_DMINT2, 0x680),
+ INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0),
+ INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700),
+ INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740),
+ INTC_VECT(SRC_SPDIF, 0x760),
+ INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0),
+ INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0),
+ INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860),
+ INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0),
+ INTC_VECT(GFX2D, 0x8c0),
+ INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920),
+ INTC_VECT(EXBUS_ATA, 0x940),
+ INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980),
+ INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0),
+ INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00),
+ INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40),
+ INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80),
+ INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0),
+ INTC_VECT(ADC, 0xb20),
+ INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0),
+ INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00),
+ INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40),
+ INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80),
+ INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0),
+ INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00),
+ INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40),
+ INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80),
+ INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0),
+ INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00),
+ INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40),
+ INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80),
+ INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0),
+ INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00),
+ INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40),
+ INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
+ TMU5_TICPI, TMU6, TMU7, TMU8),
+ INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2),
+ INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3),
+ INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF),
+ INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC),
+ INTC_GROUP(SPI, SPI0, SPI1),
+ INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567),
+ INTC_GROUP(BBDMAC,
+ BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
+ BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
+ BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D,
+ GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S,
+ DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } },
+ { 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } },
+ { 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } },
+ { 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } },
+ { 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } },
+ { 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } },
+ { 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } },
+ { 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } },
+ { 0xffe00020, 0, 32, 8, /* INT2PRI8 */
+ { BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } },
+ { 0xffe00024, 0, 32, 8, /* INT2PRI9 */
+ { BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } },
+ { 0xffe00028, 0, 32, 8, /* INT2PRI10 */
+ { BBDMAC_29, BBDMAC_30, BBDMAC_31 } },
+ { 0xffe0002c, 0, 32, 8, /* INT2PRI11 */
+ { TMU1, TMU2, TMU2_TICPI, TMU3 } },
+ { 0xffe00030, 0, 32, 8, /* INT2PRI12 */
+ { TMU4, TMU5, TMU5_TICPI, TMU6 } },
+ { 0xffe00034, 0, 32, 8, /* INT2PRI13 */
+ { TMU7, TMU8 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, } },
+};
+
+static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
+ NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
+ NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ7-0 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
new file mode 100644
index 00000000..d431b005
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -0,0 +1,626 @@
+/*
+ * SH7780 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <cpu/dma-register.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 28,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 29,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 30,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffdc0008,
+ .end = 0xffdc0013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffdc0014,
+ .end = 0xffdc001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 97,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffdc0020,
+ .end = 0xffdc002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 98,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffe80000,
+ .end = 0xffe80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* DMA */
+static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
+ {
+ .offset = 0,
+ }, {
+ .offset = 0x10,
+ }, {
+ .offset = 0x20,
+ }, {
+ .offset = 0x30,
+ }, {
+ .offset = 0x50,
+ }, {
+ .offset = 0x60,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = sh7780_dmae0_channels,
+ .channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .channel = sh7780_dmae1_channels,
+ .channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7780_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc808020,
+ .end = 0xfc80808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfc809000,
+ .end = 0xfc80900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */
+ .name = "error_irq",
+ .start = 34,
+ .end = 34,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct resource sh7780_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc818020,
+ .end = 0xfc81808f,
+ .flags = IORESOURCE_MEM,
+ },
+ /* DMAC1 has no DMARS */
+ {
+ /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */
+ .name = "error_irq",
+ .start = 46,
+ .end = 46,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7780_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7780_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
+ .dev = {
+ .platform_data = &dma1_platform_data,
+ },
+};
+
+static struct platform_device *sh7780_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &rtc_device,
+ &dma0_device,
+ &dma1_device,
+};
+
+static int __init sh7780_devices_setup(void)
+{
+ return platform_add_devices(sh7780_devices,
+ ARRAY_SIZE(sh7780_devices));
+}
+arch_initcall(sh7780_devices_setup);
+
+static struct platform_device *sh7780_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ if (mach_is_sh2007()) {
+ scif0_platform_data.scscr &= ~SCSCR_CKE1;
+ scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+ scif1_platform_data.scscr &= ~SCSCR_CKE1;
+ scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+ }
+
+ early_platform_add_devices(sh7780_early_devices,
+ ARRAY_SIZE(sh7780_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO,
+
+ /* interrupt groups */
+
+ TMU012, TMU345,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
+ INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
+ INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
+ INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
+ INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0),
+ INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980),
+ INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
+ INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
+ INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
+ INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
+ INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
+ INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0),
+ INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40),
+ INTC_VECT(SSI, 0xe80),
+ INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
+ INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
+ SSI, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
+ PCIINTA, PCISERR, HAC, CMT, 0, 0, DMAC1, DMAC0,
+ HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
+ TMU2, TMU2_TICPI } },
+ { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
+ { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
+ { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC0, DMAC1 } },
+ { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
+ PCISERR, PCIINTA, } },
+ { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
+ PCIINTD, PCIC5 } },
+ { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF, HSPI, MMCIF, SSI } },
+ { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg irq_ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7780-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers, irq_ack_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
+ NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
+ NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ7-0 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
new file mode 100644
index 00000000..81588ef1
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -0,0 +1,715 @@
+/*
+ * SH7785 Setup
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <asm/mmzone.h>
+#include <cpu/dma-register.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 60, 60, 60, 60 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 28,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 29,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 30,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffdc0008,
+ .end = 0xffdc0013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffdc0014,
+ .end = 0xffdc001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 97,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffdc0020,
+ .end = 0xffdc002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 98,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+/* DMA */
+static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
+ {
+ .offset = 0,
+ }, {
+ .offset = 0x10,
+ }, {
+ .offset = 0x20,
+ }, {
+ .offset = 0x30,
+ }, {
+ .offset = 0x50,
+ }, {
+ .offset = 0x60,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = sh7785_dmae0_channels,
+ .channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .channel = sh7785_dmae1_channels,
+ .channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7785_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc808020,
+ .end = 0xfc80808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfc809000,
+ .end = 0xfc80900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */
+ .name = "error_irq",
+ .start = 33,
+ .end = 33,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct resource sh7785_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfcc08020,
+ .end = 0xfcc0808f,
+ .flags = IORESOURCE_MEM,
+ },
+ /* DMAC1 has no DMARS */
+ {
+ /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */
+ .name = "error_irq",
+ .start = 52,
+ .end = 52,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7785_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7785_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
+ .dev = {
+ .platform_data = &dma1_platform_data,
+ },
+};
+
+static struct platform_device *sh7785_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &dma0_device,
+ &dma1_device,
+};
+
+static int __init sh7785_devices_setup(void)
+{
+ return platform_add_devices(sh7785_devices,
+ ARRAY_SIZE(sh7785_devices));
+}
+arch_initcall(sh7785_devices_setup);
+
+static struct platform_device *sh7785_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7785_early_devices,
+ ARRAY_SIZE(sh7785_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, DMAC0, SCIF0, SCIF1, DMAC1, HSPI,
+ SCIF2, SCIF3, SCIF4, SCIF5,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ SIOF, MMCIF, DU, GDTA,
+ TMU3, TMU4, TMU5,
+ SSI0, SSI1,
+ HAC0, HAC1,
+ FLCTL, GPIO,
+
+ /* interrupt groups */
+
+ TMU012, TMU345
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(WDT, 0x560),
+ INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
+ INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(DMAC0, 0x620), INTC_VECT(DMAC0, 0x640),
+ INTC_VECT(DMAC0, 0x660), INTC_VECT(DMAC0, 0x680),
+ INTC_VECT(DMAC0, 0x6a0), INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(DMAC0, 0x6e0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(SCIF1, 0x780), INTC_VECT(SCIF1, 0x7a0),
+ INTC_VECT(SCIF1, 0x7c0), INTC_VECT(SCIF1, 0x7e0),
+ INTC_VECT(DMAC1, 0x880), INTC_VECT(DMAC1, 0x8a0),
+ INTC_VECT(DMAC1, 0x8c0), INTC_VECT(DMAC1, 0x8e0),
+ INTC_VECT(DMAC1, 0x900), INTC_VECT(DMAC1, 0x920),
+ INTC_VECT(DMAC1, 0x940),
+ INTC_VECT(HSPI, 0x960),
+ INTC_VECT(SCIF2, 0x980), INTC_VECT(SCIF3, 0x9a0),
+ INTC_VECT(SCIF4, 0x9c0), INTC_VECT(SCIF5, 0x9e0),
+ INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
+ INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
+ INTC_VECT(SIOF, 0xc00),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
+ INTC_VECT(DU, 0xd80),
+ INTC_VECT(GDTA, 0xda0), INTC_VECT(GDTA, 0xdc0),
+ INTC_VECT(GDTA, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40),
+ INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
+ INTC_VECT(HAC0, 0xec0), INTC_VECT(HAC1, 0xee0),
+ INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
+ INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, GDTA, DU, SSI0, SSI1, GPIO,
+ FLCTL, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
+ PCIINTA, PCISERR, HAC1, HAC0, DMAC1, DMAC0, HUDI, WDT,
+ SCIF5, SCIF4, SCIF3, SCIF2, SCIF1, SCIF0, TMU345, TMU012 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
+ TMU2, TMU2_TICPI } },
+ { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, } },
+ { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1,
+ SCIF2, SCIF3 } },
+ { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { SCIF4, SCIF5, WDT, } },
+ { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { HUDI, DMAC0, DMAC1, } },
+ { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { HAC0, HAC1,
+ PCISERR, PCIINTA } },
+ { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { PCIINTB, PCIINTC,
+ PCIINTD, PCIC5 } },
+ { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SIOF, HSPI, MMCIF, } },
+ { 0xffd40020, 0, 32, 8, /* INT2PRI8 */ { FLCTL, GPIO, SSI0, SSI1, } },
+ { 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7785-irq0123",
+ vectors_irq0123, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7785-irq4567",
+ vectors_irq4567, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
+ INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
+ INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
+ INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
+ INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
+ INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
+ INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
+ INTC_VECT(IRL4_HHHL, 0xcc0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
+ NULL, mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
+ NULL, mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ /* Register the URAM space as Node 1 */
+ setup_bootmem_node(1, 0xe55f0000, 0xe5610000);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
new file mode 100644
index 00000000..599022d7
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -0,0 +1,1056 @@
+/*
+ * SH7786 Setup
+ *
+ * Copyright (C) 2009 - 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ * Paul Mundt <paul.mundt@renesas.com>
+ *
+ * Based on SH7785 Setup
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_intc.h>
+#include <cpu/dma-register.h>
+#include <asm/mmzone.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+/*
+ * The rest of these all have multiplexed IRQs
+ */
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 50, 50, 50, 50 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 51, 51, 51, 51 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .scbrr_algo_id = SCBRR_ALGO_1,
+ .type = PORT_SCIF,
+ .irqs = { 53, 53, 53, 53 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffd80008,
+ .end = 0xffd80013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffd80014,
+ .end = 0xffd8001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffd80020,
+ .end = 0xffd8002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffda0008,
+ .end = 0xffda0013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffda0014,
+ .end = 0xffda001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffda0020,
+ .end = 0xffda002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 22,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct sh_timer_config tmu6_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu6_resources[] = {
+ [0] = {
+ .start = 0xffdc0008,
+ .end = 0xffdc0013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 45,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu6_device = {
+ .name = "sh_tmu",
+ .id = 6,
+ .dev = {
+ .platform_data = &tmu6_platform_data,
+ },
+ .resource = tmu6_resources,
+ .num_resources = ARRAY_SIZE(tmu6_resources),
+};
+
+static struct sh_timer_config tmu7_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu7_resources[] = {
+ [0] = {
+ .start = 0xffdc0014,
+ .end = 0xffdc001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 45,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu7_device = {
+ .name = "sh_tmu",
+ .id = 7,
+ .dev = {
+ .platform_data = &tmu7_platform_data,
+ },
+ .resource = tmu7_resources,
+ .num_resources = ARRAY_SIZE(tmu7_resources),
+};
+
+static struct sh_timer_config tmu8_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu8_resources[] = {
+ [0] = {
+ .start = 0xffdc0020,
+ .end = 0xffdc002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 45,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu8_device = {
+ .name = "sh_tmu",
+ .id = 8,
+ .dev = {
+ .platform_data = &tmu8_platform_data,
+ },
+ .resource = tmu8_resources,
+ .num_resources = ARRAY_SIZE(tmu8_resources),
+};
+
+static struct sh_timer_config tmu9_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu9_resources[] = {
+ [0] = {
+ .start = 0xffde0008,
+ .end = 0xffde0013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 46,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu9_device = {
+ .name = "sh_tmu",
+ .id = 9,
+ .dev = {
+ .platform_data = &tmu9_platform_data,
+ },
+ .resource = tmu9_resources,
+ .num_resources = ARRAY_SIZE(tmu9_resources),
+};
+
+static struct sh_timer_config tmu10_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu10_resources[] = {
+ [0] = {
+ .start = 0xffde0014,
+ .end = 0xffde001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 46,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu10_device = {
+ .name = "sh_tmu",
+ .id = 10,
+ .dev = {
+ .platform_data = &tmu10_platform_data,
+ },
+ .resource = tmu10_resources,
+ .num_resources = ARRAY_SIZE(tmu10_resources),
+};
+
+static struct sh_timer_config tmu11_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu11_resources[] = {
+ [0] = {
+ .start = 0xffde0020,
+ .end = 0xffde002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 46,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu11_device = {
+ .name = "sh_tmu",
+ .id = 11,
+ .dev = {
+ .platform_data = &tmu11_platform_data,
+ },
+ .resource = tmu11_resources,
+ .num_resources = ARRAY_SIZE(tmu11_resources),
+};
+
+static const struct sh_dmae_channel dmac0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = dmac0_channels,
+ .channel_num = ARRAY_SIZE(dmac0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource dmac0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "error_irq",
+ .start = evt2irq(0x5c0),
+ .end = evt2irq(0x5c0),
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x500),
+ .end = evt2irq(0x5a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = dmac0_resources,
+ .num_resources = ARRAY_SIZE(dmac0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+#define USB_EHCI_START 0xffe70000
+#define USB_OHCI_START 0xffe70400
+
+static struct resource usb_ehci_resources[] = {
+ [0] = {
+ .start = USB_EHCI_START,
+ .end = USB_EHCI_START + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 77,
+ .end = 77,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ehci_device = {
+ .name = "sh_ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ehci_resources),
+ .resource = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = USB_OHCI_START,
+ .end = USB_OHCI_START + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 77,
+ .end = 77,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ohci_device = {
+ .name = "sh_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct platform_device *sh7786_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+ &tmu6_device,
+ &tmu7_device,
+ &tmu8_device,
+ &tmu9_device,
+ &tmu10_device,
+ &tmu11_device,
+};
+
+static struct platform_device *sh7786_devices[] __initdata = {
+ &dma0_device,
+ &usb_ehci_device,
+ &usb_ohci_device,
+};
+
+/*
+ * Please call this function if your platform board
+ * use external clock for USB
+ * */
+#define USBCTL0 0xffe70858
+#define CLOCK_MODE_MASK 0xffffff7f
+#define EXT_CLOCK_MODE 0x00000080
+
+void __init sh7786_usb_use_exclock(void)
+{
+ u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
+ __raw_writel(val | EXT_CLOCK_MODE, USBCTL0);
+}
+
+#define USBINITREG1 0xffe70094
+#define USBINITREG2 0xffe7009c
+#define USBINITVAL1 0x00ff0040
+#define USBINITVAL2 0x00000001
+
+#define USBPCTL1 0xffe70804
+#define USBST 0xffe70808
+#define PHY_ENB 0x00000001
+#define PLL_ENB 0x00000002
+#define PHY_RST 0x00000004
+#define ACT_PLL_STATUS 0xc0000000
+
+static void __init sh7786_usb_setup(void)
+{
+ int i = 1000000;
+
+ /*
+ * USB initial settings
+ *
+ * The following settings are necessary
+ * for using the USB modules.
+ *
+ * see "USB Initial Settings" for detail
+ */
+ __raw_writel(USBINITVAL1, USBINITREG1);
+ __raw_writel(USBINITVAL2, USBINITREG2);
+
+ /*
+ * Set the PHY and PLL enable bit
+ */
+ __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
+ while (i--) {
+ if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
+ /* Set the PHY RST bit */
+ __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
+ printk(KERN_INFO "sh7786 usb setup done\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ WDT,
+ TMU0_0, TMU0_1, TMU0_2, TMU0_3,
+ TMU1_0, TMU1_1, TMU1_2,
+ DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
+ HUDI1, HUDI0,
+ DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
+ HPB_0, HPB_1, HPB_2,
+ SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
+ SCIF1,
+ TMU2, TMU3,
+ SCIF2, SCIF3, SCIF4, SCIF5,
+ Eth_0, Eth_1,
+ PCIeC0_0, PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1, PCIeC1_2,
+ USB,
+ I2C0, I2C1,
+ DU,
+ SSI0, SSI1, SSI2, SSI3,
+ PCIeC2_0, PCIeC2_1, PCIeC2_2,
+ HAC0, HAC1,
+ FLCTL,
+ HSPI,
+ GPIO0, GPIO1,
+ Thermal,
+ INTICI0, INTICI1, INTICI2, INTICI3,
+ INTICI4, INTICI5, INTICI6, INTICI7,
+
+ /* Muxed sub-events */
+ TXI1, BRI1, RXI1, ERI1,
+};
+
+static struct intc_vect sh7786_vectors[] __initdata = {
+ INTC_VECT(WDT, 0x3e0),
+ INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
+ INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
+ INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0),
+ INTC_VECT(TMU1_2, 0x4c0),
+ INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520),
+ INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560),
+ INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0),
+ INTC_VECT(DMAC0_6, 0x5c0),
+ INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600),
+ INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640),
+ INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680),
+ INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0),
+ INTC_VECT(HPB_2, 0x6e0),
+ INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720),
+ INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760),
+ INTC_VECT(SCIF1, 0x780),
+ INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0),
+ INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860),
+ INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0),
+ INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0),
+ INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00),
+ INTC_VECT(PCIeC0_2, 0xb20),
+ INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60),
+ INTC_VECT(PCIeC1_2, 0xb80),
+ INTC_VECT(USB, 0xba0),
+ INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0),
+ INTC_VECT(DU, 0xd00),
+ INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40),
+ INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80),
+ INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0),
+ INTC_VECT(PCIeC2_2, 0xde0),
+ INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20),
+ INTC_VECT(FLCTL, 0xe40),
+ INTC_VECT(HSPI, 0xe80),
+ INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0),
+ INTC_VECT(Thermal, 0xee0),
+ INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
+ INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
+ INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
+ INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
+};
+
+#define CnINTMSK0 0xfe410030
+#define CnINTMSK1 0xfe410040
+#define CnINTMSKCLR0 0xfe410050
+#define CnINTMSKCLR1 0xfe410060
+#define CnINT2MSKR0 0xfe410a20
+#define CnINT2MSKR1 0xfe410a24
+#define CnINT2MSKR2 0xfe410a28
+#define CnINT2MSKR3 0xfe410a2c
+#define CnINT2MSKCR0 0xfe410a30
+#define CnINT2MSKCR1 0xfe410a34
+#define CnINT2MSKCR2 0xfe410a38
+#define CnINT2MSKCR3 0xfe410a3c
+#define INTMSK2 0xfe410068
+#define INTMSKCLR2 0xfe41006c
+
+#define INTDISTCR0 0xfe4100b0
+#define INTDISTCR1 0xfe4100b4
+#define INT2DISTCR0 0xfe410900
+#define INT2DISTCR1 0xfe410904
+#define INT2DISTCR2 0xfe410908
+#define INT2DISTCR3 0xfe41090c
+
+static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
+ { CnINTMSK0, CnINTMSKCLR0, 32,
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
+ INTC_SMP_BALANCING(INTDISTCR0) },
+ { INTMSK2, INTMSKCLR2, 32,
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+ { CnINT2MSKR0, CnINT2MSKCR0 , 32,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
+ { CnINT2MSKR1, CnINT2MSKCR1, 32,
+ { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
+ DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
+ HUDI1, HUDI0,
+ DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
+ HPB_0, HPB_1, HPB_2,
+ SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
+ SCIF1,
+ TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
+ { CnINT2MSKR2, CnINT2MSKCR2, 32,
+ { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
+ Eth_0, Eth_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PCIeC0_0, PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1, PCIeC1_2,
+ USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
+ { CnINT2MSKR3, CnINT2MSKCR3, 32,
+ { 0, 0, 0, 0, 0, 0,
+ I2C0, I2C1,
+ DU, SSI0, SSI1, SSI2, SSI3,
+ PCIeC2_0, PCIeC2_1, PCIeC2_2,
+ HAC0, HAC1,
+ FLCTL, 0,
+ HSPI, GPIO0, GPIO1, Thermal,
+ 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
+};
+
+static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
+ { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
+ { 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1,
+ TMU0_2, TMU0_3 } },
+ { 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1,
+ TMU1_2, 0 } },
+ { 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1,
+ DMAC0_2, DMAC0_3 } },
+ { 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5,
+ DMAC0_6, HUDI1 } },
+ { 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0,
+ DMAC1_1, DMAC1_2 } },
+ { 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0,
+ HPB_1, HPB_2 } },
+ { 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1,
+ SCIF0_2, SCIF0_3 } },
+ { 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } },
+ { 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } },
+ { 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5,
+ Eth_0, Eth_1 } },
+ { 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } },
+ { 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } },
+ { 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } },
+ { 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } },
+ { 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1 } },
+ { 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } },
+ { 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } },
+ { 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } },
+ { 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } },
+ { 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0,
+ PCIeC2_1, PCIeC2_2 } },
+ { 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } },
+ { 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0,
+ GPIO1, Thermal } },
+ { 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } },
+ { 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } },
+ { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
+ { INTICI7, INTICI6, INTICI5, INTICI4,
+ INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
+};
+
+static struct intc_subgroup sh7786_subgroups[] __initdata = {
+ { 0xfe410c20, 32, SCIF1,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
+};
+
+static struct intc_desc sh7786_intc_desc __initdata = {
+ .name = "sh7786",
+ .hw = {
+ .vectors = sh7786_vectors,
+ .nr_vectors = ARRAY_SIZE(sh7786_vectors),
+ .mask_regs = sh7786_mask_registers,
+ .nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
+ .subgroups = sh7786_subgroups,
+ .nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
+ .prio_regs = sh7786_prio_registers,
+ .nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
+ },
+};
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+ INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+ INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
+};
+
+static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
+ { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
+ { 0xfe410024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
+ vectors_irq0123, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
+ vectors_irq4567, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920),
+ INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960),
+ INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0),
+ INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0),
+ INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20),
+ INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60),
+ INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0),
+ INTC_VECT(IRL4_HHHL, 0xac0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
+ NULL, sh7786_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
+ NULL, sh7786_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xfe410000
+#define INTC_INTMSK0 CnINTMSK0
+#define INTC_INTMSK1 CnINTMSK1
+#define INTC_INTMSK2 INTMSK2
+#define INTC_INTMSKCLR1 CnINTMSKCLR1
+#define INTC_INTMSKCLR2 INTMSKCLR2
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ register_intc_controller(&sh7786_intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+}
+
+static int __init sh7786_devices_setup(void)
+{
+ int ret, irq;
+
+ sh7786_usb_setup();
+
+ /*
+ * De-mux SCIF1 IRQs if possible
+ */
+ irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
+ if (irq > 0) {
+ scif1_platform_data.irqs[SCIx_TXI_IRQ] = irq;
+ scif1_platform_data.irqs[SCIx_ERI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, ERI1);
+ scif1_platform_data.irqs[SCIx_BRI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, BRI1);
+ scif1_platform_data.irqs[SCIx_RXI_IRQ] =
+ intc_irq_lookup(sh7786_intc_desc.name, RXI1);
+ }
+
+ ret = platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(sh7786_devices,
+ ARRAY_SIZE(sh7786_devices));
+}
+arch_initcall(sh7786_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
new file mode 100644
index 00000000..bb208806
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -0,0 +1,508 @@
+/*
+ * SH-X3 Prototype Setup
+ *
+ * Copyright (C) 2007 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/sh_timer.h>
+#include <cpu/shx3.h>
+#include <asm/mmzone.h>
+
+/*
+ * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
+ * INTEVT values overlap with the FPU EXPEVT ones, requiring special
+ * demuxing in the exception dispatch path.
+ *
+ * As this overlap is something that never should have made it in to
+ * silicon in the first place, we just refuse to deal with the port at
+ * all rather than adding infrastructure to hack around it.
+ */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffc30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffc40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 44, 45, 47, 46 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffc60000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = 0xffc10008,
+ .end = 0xffc10013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 16,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = 0xffc10014,
+ .end = 0xffc1001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 17,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = 0xffc10020,
+ .end = 0xffc1002f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 18,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+};
+
+static struct resource tmu3_resources[] = {
+ [0] = {
+ .start = 0xffc20008,
+ .end = 0xffc20013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 19,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh_tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static struct sh_timer_config tmu4_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+};
+
+static struct resource tmu4_resources[] = {
+ [0] = {
+ .start = 0xffc20014,
+ .end = 0xffc2001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu4_device = {
+ .name = "sh_tmu",
+ .id = 4,
+ .dev = {
+ .platform_data = &tmu4_platform_data,
+ },
+ .resource = tmu4_resources,
+ .num_resources = ARRAY_SIZE(tmu4_resources),
+};
+
+static struct sh_timer_config tmu5_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu5_resources[] = {
+ [0] = {
+ .start = 0xffc20020,
+ .end = 0xffc2002b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu5_device = {
+ .name = "sh_tmu",
+ .id = 5,
+ .dev = {
+ .platform_data = &tmu5_platform_data,
+ },
+ .resource = tmu5_resources,
+ .num_resources = ARRAY_SIZE(tmu5_resources),
+};
+
+static struct platform_device *shx3_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &tmu3_device,
+ &tmu4_device,
+ &tmu5_device,
+};
+
+static int __init shx3_devices_setup(void)
+{
+ return platform_add_devices(shx3_early_devices,
+ ARRAY_SIZE(shx3_early_devices));
+}
+arch_initcall(shx3_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(shx3_early_devices,
+ ARRAY_SIZE(shx3_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+ IRQ0, IRQ1, IRQ2, IRQ3,
+ HUDII,
+ TMU0, TMU1, TMU2, TMU3, TMU4, TMU5,
+ PCII0, PCII1, PCII2, PCII3, PCII4,
+ PCII5, PCII6, PCII7, PCII8, PCII9,
+ SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
+ SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
+ SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
+ SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI,
+ DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3,
+ DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE,
+ DU,
+ DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
+ DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
+ IIC, VIN0, VIN1, VCORE0, ATAPI,
+ DTU0, DTU1, DTU2, DTU3,
+ FE0, FE1,
+ GPIO0, GPIO1, GPIO2, GPIO3,
+ PAM, IRM,
+ INTICI0, INTICI1, INTICI2, INTICI3,
+ INTICI4, INTICI5, INTICI6, INTICI7,
+
+ /* interrupt groups */
+ IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
+ DMAC0, DMAC1,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(HUDII, 0x3e0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU3, 0x460),
+ INTC_VECT(TMU4, 0x480), INTC_VECT(TMU5, 0x4a0),
+ INTC_VECT(PCII0, 0x500), INTC_VECT(PCII1, 0x520),
+ INTC_VECT(PCII2, 0x540), INTC_VECT(PCII3, 0x560),
+ INTC_VECT(PCII4, 0x580), INTC_VECT(PCII5, 0x5a0),
+ INTC_VECT(PCII6, 0x5c0), INTC_VECT(PCII7, 0x5e0),
+ INTC_VECT(PCII8, 0x600), INTC_VECT(PCII9, 0x620),
+ INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
+ INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
+ INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
+ INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
+ INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
+ INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
+ INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
+ INTC_VECT(DMAC0_DMINT2, 0x940), INTC_VECT(DMAC0_DMINT3, 0x960),
+ INTC_VECT(DMAC0_DMINT4, 0x980), INTC_VECT(DMAC0_DMINT5, 0x9a0),
+ INTC_VECT(DMAC0_DMAE, 0x9c0),
+ INTC_VECT(DU, 0x9e0),
+ INTC_VECT(DMAC1_DMINT6, 0xa00), INTC_VECT(DMAC1_DMINT7, 0xa20),
+ INTC_VECT(DMAC1_DMINT8, 0xa40), INTC_VECT(DMAC1_DMINT9, 0xa60),
+ INTC_VECT(DMAC1_DMINT10, 0xa80), INTC_VECT(DMAC1_DMINT11, 0xaa0),
+ INTC_VECT(DMAC1_DMAE, 0xac0),
+ INTC_VECT(IIC, 0xae0),
+ INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20),
+ INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60),
+ INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20),
+ INTC_VECT(DTU0, 0xc40),
+ INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80),
+ INTC_VECT(DTU1, 0xca0),
+ INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0),
+ INTC_VECT(DTU2, 0xd00),
+ INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40),
+ INTC_VECT(DTU3, 0xd60),
+ INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20),
+ INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60),
+ INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0),
+ INTC_VECT(PAM, 0xec0), INTC_VECT(IRM, 0xee0),
+ INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
+ INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
+ INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
+ INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL),
+ INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
+ INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
+ INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
+ INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
+ DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
+ INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
+ DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
+};
+
+#define INT2DISTCR0 0xfe4108a0
+#define INT2DISTCR1 0xfe4108a4
+#define INT2DISTCR2 0xfe4108a8
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfe410040, 0xfe410060, 32, /* CnINTMSK1 / CnINTMSKCLR1 */
+ { IRL } },
+ { 0xfe410820, 0xfe410850, 32, /* CnINT2MSK0 / CnINT2MSKCLR0 */
+ { FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC,
+ DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */
+ 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
+ { 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */
+ { 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */
+ PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2,
+ PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11,
+ DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7,
+ DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4,
+ DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 },
+ INTC_SMP_BALANCING(INT2DISTCR1) },
+ { 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI,
+ SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI,
+ SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI,
+ SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI },
+ INTC_SMP_BALANCING(INT2DISTCR2) },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+
+ { 0xfe410800, 0, 32, 4, /* INT2PRI0 */ { 0, HUDII, TMU5, TMU4,
+ TMU3, TMU2, TMU1, TMU0 } },
+ { 0xfe410804, 0, 32, 4, /* INT2PRI1 */ { DTU3, DTU2, DTU1, DTU0,
+ SCIF3, SCIF2,
+ SCIF1, SCIF0 } },
+ { 0xfe410808, 0, 32, 4, /* INT2PRI2 */ { DMAC1, DMAC0,
+ PCII56789, PCII4,
+ PCII3, PCII2,
+ PCII1, PCII0 } },
+ { 0xfe41080c, 0, 32, 4, /* INT2PRI3 */ { FE1, FE0, ATAPI, VCORE0,
+ VIN1, VIN0, IIC, DU} },
+ { 0xfe410810, 0, 32, 4, /* INT2PRI4 */ { 0, 0, PAM, GPIO3,
+ GPIO2, GPIO1, GPIO0, IRM } },
+ { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
+ { INTICI7, INTICI6, INTICI5, INTICI4,
+ INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
+ mask_registers, prio_registers, sense_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect vectors_irl[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
+ mask_registers, prio_registers, NULL);
+
+void __init plat_irq_setup_pins(int mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRQ mode\n");
+ return;
+ }
+
+ register_intc_controller(&intc_desc_irq);
+ break;
+ case IRQ_MODE_IRL3210:
+ ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRL mode\n");
+ return;
+ }
+
+ register_intc_controller(&intc_desc_irl);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_irq_setup(void)
+{
+ reserve_intc_vectors(vectors_irq, ARRAY_SIZE(vectors_irq));
+ reserve_intc_vectors(vectors_irl, ARRAY_SIZE(vectors_irl));
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_mem_setup(void)
+{
+ unsigned int nid = 1;
+
+ /* Register CPU#0 URAM space as Node 1 */
+ setup_bootmem_node(nid++, 0x145f0000, 0x14610000); /* CPU0 */
+
+#if 0
+ /* XXX: Not yet.. */
+ setup_bootmem_node(nid++, 0x14df0000, 0x14e10000); /* CPU1 */
+ setup_bootmem_node(nid++, 0x155f0000, 0x15610000); /* CPU2 */
+ setup_bootmem_node(nid++, 0x15df0000, 0x15e10000); /* CPU3 */
+#endif
+
+ setup_bootmem_node(nid++, 0x16000000, 0x16020000); /* CSM */
+}
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
new file mode 100644
index 00000000..03f2b557
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -0,0 +1,166 @@
+/*
+ * SH-X3 SMP
+ *
+ * Copyright (C) 2007 - 2010 Paul Mundt
+ * Copyright (C) 2007 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/sections.h>
+
+#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
+#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
+
+#define STBCR_MSTP 0x00000001
+#define STBCR_RESET 0x00000002
+#define STBCR_SLEEP 0x00000004
+#define STBCR_LTSLP 0x80000000
+
+static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
+{
+ unsigned int message = (unsigned int)(long)arg;
+ unsigned int cpu = hard_smp_processor_id();
+ unsigned int offs = 4 * cpu;
+ unsigned int x;
+
+ x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
+ x &= (1 << (message << 2));
+ __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
+
+ smp_message_recv(message);
+
+ return IRQ_HANDLED;
+}
+
+static void shx3_smp_setup(void)
+{
+ unsigned int cpu = 0;
+ int i, num;
+
+ init_cpu_possible(cpumask_of(cpu));
+
+ /* Enable light sleep for the boot CPU */
+ __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
+
+ __cpu_number_map[0] = 0;
+ __cpu_logical_map[0] = 0;
+
+ /*
+ * Do this stupidly for now.. we don't have an easy way to probe
+ * for the total number of cores.
+ */
+ for (i = 1, num = 0; i < NR_CPUS; i++) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++num;
+ __cpu_logical_map[num] = i;
+ }
+
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+}
+
+static void shx3_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ local_timer_setup(0);
+
+ BUILD_BUG_ON(SMP_MSG_NR >= 8);
+
+ for (i = 0; i < SMP_MSG_NR; i++)
+ request_irq(104 + i, ipi_interrupt_handler,
+ IRQF_PERCPU, "IPI", (void *)(long)i);
+
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
+{
+ if (__in_29bit_mode())
+ __raw_writel(entry_point, RESET_REG(cpu));
+ else
+ __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
+
+ if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
+
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
+
+ /* Start up secondary processor by sending a reset */
+ __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
+}
+
+static unsigned int shx3_smp_processor_id(void)
+{
+ return __raw_readl(0xff000048); /* CPIDR */
+}
+
+static void shx3_send_ipi(unsigned int cpu, unsigned int message)
+{
+ unsigned long addr = 0xfe410070 + (cpu * 4);
+
+ BUG_ON(cpu >= 4);
+
+ __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
+}
+
+static void shx3_update_boot_vector(unsigned int cpu)
+{
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
+ __raw_writel(STBCR_RESET, STBCR_REG(cpu));
+}
+
+static int __cpuinit
+shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ shx3_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("CPU %u is now online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
+ .notifier_call = shx3_cpu_callback,
+};
+
+static int __cpuinit register_shx3_cpu_notifier(void)
+{
+ register_hotcpu_notifier(&shx3_cpu_notifier);
+ return 0;
+}
+late_initcall(register_shx3_cpu_notifier);
+
+struct plat_smp_ops shx3_smp_ops = {
+ .smp_setup = shx3_smp_setup,
+ .prepare_cpus = shx3_prepare_cpus,
+ .start_cpu = shx3_start_cpu,
+ .smp_processor_id = shx3_smp_processor_id,
+ .send_ipi = shx3_send_ipi,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+};
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 00000000..efb2745b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/ubc.c
+ *
+ * On-chip UBC support for SH-4A CPUs.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
+#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
+#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
+#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
+
+#define UBC_CCMFR 0xff200600
+#define UBC_CBCR 0xff200620
+
+/* CRR */
+#define UBC_CRR_PCB (1 << 1)
+#define UBC_CRR_BIE (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE (1 << 0)
+
+static struct sh_ubc sh4a_ubc;
+
+static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
+ __raw_writel(info->address, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(0, UBC_CBR(idx));
+ __raw_writel(0, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_enable_all(unsigned long mask)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (mask & (1 << i))
+ __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static void sh4a_ubc_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static unsigned long sh4a_ubc_active_mask(void)
+{
+ unsigned long active = 0;
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
+ active |= (1 << i);
+
+ return active;
+}
+
+static unsigned long sh4a_ubc_triggered_mask(void)
+{
+ return __raw_readl(UBC_CCMFR);
+}
+
+static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
+{
+ __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
+}
+
+static struct sh_ubc sh4a_ubc = {
+ .name = "SH-4A",
+ .num_events = 2,
+ .trap_nr = 0x1e0,
+ .enable = sh4a_ubc_enable,
+ .disable = sh4a_ubc_disable,
+ .enable_all = sh4a_ubc_enable_all,
+ .disable_all = sh4a_ubc_disable_all,
+ .active_mask = sh4a_ubc_active_mask,
+ .triggered_mask = sh4a_ubc_triggered_mask,
+ .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
+};
+
+static int __init sh4a_ubc_init(void)
+{
+ struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+ int i;
+
+ /*
+ * The UBC MSTP bit is optional, as not all platforms will have
+ * it. Just ignore it if we can't find it.
+ */
+ if (IS_ERR(ubc_iclk))
+ ubc_iclk = NULL;
+
+ clk_enable(ubc_iclk);
+
+ __raw_writel(0, UBC_CBCR);
+
+ for (i = 0; i < sh4a_ubc.num_events; i++) {
+ __raw_writel(0, UBC_CAMR(i));
+ __raw_writel(0, UBC_CBR(i));
+
+ __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
+
+ /* dummy read for write posting */
+ (void)__raw_readl(UBC_CRR(i));
+ }
+
+ clk_disable(ubc_iclk);
+
+ sh4a_ubc.clk = ubc_iclk;
+
+ return register_sh_ubc(&sh4a_ubc);
+}
+arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
new file mode 100644
index 00000000..a184a31e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the Linux/SuperH SH-5 backends.
+#
+obj-y := entry.o probe.o switchto.o
+
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_KALLSYMS) += unwind.o
+
+# CPU subtype setup
+obj-$(CONFIG_CPU_SH5) += setup-sh5.o
+
+# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SH5) := clock-sh5.o
+
+obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
new file mode 100644
index 00000000..c48b93d4
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -0,0 +1,79 @@
+/*
+ * arch/sh/kernel/cpu/sh5/clock-sh5.c
+ *
+ * SH-5 support for the clock framework
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/io.h>
+
+static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
+
+/* Clock, Power and Reset Controller */
+#define CPRC_BLOCK_OFF 0x01010000
+#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
+
+static unsigned long cprc_base;
+
+static void master_clk_init(struct clk *clk)
+{
+ int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
+ clk->rate *= ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) & 0x0007);
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh5_clk_ops[] = {
+ &sh5_master_clk_ops,
+ &sh5_module_clk_ops,
+ &sh5_bus_clk_ops,
+ &sh5_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ cprc_base = (unsigned long)ioremap_nocache(CPRC_BASE, 1024);
+ BUG_ON(!cprc_base);
+
+ if (idx < ARRAY_SIZE(sh5_clk_ops))
+ *ops = sh5_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 00000000..6b80295d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2068 @@
+/*
+ * arch/sh/kernel/cpu/sh5/entry.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2008 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/sys.h>
+#include <cpu/registers.h>
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * SR fields.
+ */
+#define SR_ASID_MASK 0x00ff0000
+#define SR_FD_MASK 0x00008000
+#define SR_SS 0x08000000
+#define SR_BL 0x10000000
+#define SR_MD 0x40000000
+
+/*
+ * Event code.
+ */
+#define EVENT_INTERRUPT 0
+#define EVENT_FAULT_TLB 1
+#define EVENT_FAULT_NOT_TLB 2
+#define EVENT_DEBUG 3
+
+/* EXPEVT values */
+#define RESET_CAUSE 0x20
+#define DEBUGSS_CAUSE 0x980
+
+/*
+ * Frame layout. Quad index.
+ */
+#define FRAME_T(x) FRAME_TBASE+(x*8)
+#define FRAME_R(x) FRAME_RBASE+(x*8)
+#define FRAME_S(x) FRAME_SBASE+(x*8)
+#define FSPC 0
+#define FSSR 1
+#define FSYSCALL_ID 2
+
+/* Arrange the save frame to be a multiple of 32 bytes long */
+#define FRAME_SBASE 0
+#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
+#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
+#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
+#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
+
+#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
+#define FP_FRAME_BASE 0
+
+#define SAVED_R2 0*8
+#define SAVED_R3 1*8
+#define SAVED_R4 2*8
+#define SAVED_R5 3*8
+#define SAVED_R18 4*8
+#define SAVED_R6 5*8
+#define SAVED_TR0 6*8
+
+/* These are the registers saved in the TLB path that aren't saved in the first
+ level of the normal one. */
+#define TLB_SAVED_R25 7*8
+#define TLB_SAVED_TR1 8*8
+#define TLB_SAVED_TR2 9*8
+#define TLB_SAVED_TR3 10*8
+#define TLB_SAVED_TR4 11*8
+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+ breakage otherwise. */
+#define TLB_SAVED_R0 12*8
+#define TLB_SAVED_R1 13*8
+
+#define CLI() \
+ getcon SR, r6; \
+ ori r6, 0xf0, r6; \
+ putcon r6, SR;
+
+#define STI() \
+ getcon SR, r6; \
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop() CLI()
+#else
+# define preempt_stop()
+# define resume_kernel restore_all
+#endif
+
+ .section .data, "aw"
+
+#define FAST_TLBMISS_STACK_CACHELINES 4
+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+
+/* Register back-up area for all exceptions */
+ .balign 32
+ /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+ * register saves etc. */
+ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+/* This is 32 byte aligned by construction */
+/* Register back-up area for all exceptions */
+reg_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+
+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+ * reentrancy. Note this area may be accessed via physical address.
+ * Align so this fits a whole single cache line, for ease of purging.
+ */
+ .balign 32,0,32
+resvec_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .balign 32,0,32
+
+/* Jump table of 3rd level handlers */
+trap_jtable:
+ .long do_exception_error /* 0x000 */
+ .long do_exception_error /* 0x020 */
+#ifdef CONFIG_MMU
+ .long tlb_miss_load /* 0x040 */
+ .long tlb_miss_store /* 0x060 */
+#else
+ .long do_exception_error
+ .long do_exception_error
+#endif
+ ! ARTIFICIAL pseudo-EXPEVT setting
+ .long do_debug_interrupt /* 0x080 */
+#ifdef CONFIG_MMU
+ .long tlb_miss_load /* 0x0A0 */
+ .long tlb_miss_store /* 0x0C0 */
+#else
+ .long do_exception_error
+ .long do_exception_error
+#endif
+ .long do_address_error_load /* 0x0E0 */
+ .long do_address_error_store /* 0x100 */
+#ifdef CONFIG_SH_FPU
+ .long do_fpu_error /* 0x120 */
+#else
+ .long do_exception_error /* 0x120 */
+#endif
+ .long do_exception_error /* 0x140 */
+ .long system_call /* 0x160 */
+ .long do_reserved_inst /* 0x180 */
+ .long do_illegal_slot_inst /* 0x1A0 */
+ .long do_exception_error /* 0x1C0 - NMI */
+ .long do_exception_error /* 0x1E0 */
+ .rept 15
+ .long do_IRQ /* 0x200 - 0x3C0 */
+ .endr
+ .long do_exception_error /* 0x3E0 */
+ .rept 32
+ .long do_IRQ /* 0x400 - 0x7E0 */
+ .endr
+ .long fpu_error_or_IRQA /* 0x800 */
+ .long fpu_error_or_IRQB /* 0x820 */
+ .long do_IRQ /* 0x840 */
+ .long do_IRQ /* 0x860 */
+ .rept 6
+ .long do_exception_error /* 0x880 - 0x920 */
+ .endr
+ .long breakpoint_trap_handler /* 0x940 */
+ .long do_exception_error /* 0x960 */
+ .long do_single_step /* 0x980 */
+
+ .rept 3
+ .long do_exception_error /* 0x9A0 - 0x9E0 */
+ .endr
+ .long do_IRQ /* 0xA00 */
+ .long do_IRQ /* 0xA20 */
+#ifdef CONFIG_MMU
+ .long itlb_miss_or_IRQ /* 0xA40 */
+#else
+ .long do_IRQ
+#endif
+ .long do_IRQ /* 0xA60 */
+ .long do_IRQ /* 0xA80 */
+#ifdef CONFIG_MMU
+ .long itlb_miss_or_IRQ /* 0xAA0 */
+#else
+ .long do_IRQ
+#endif
+ .long do_exception_error /* 0xAC0 */
+ .long do_address_error_exec /* 0xAE0 */
+ .rept 8
+ .long do_exception_error /* 0xB00 - 0xBE0 */
+ .endr
+ .rept 18
+ .long do_IRQ /* 0xC00 - 0xE20 */
+ .endr
+
+ .section .text64, "ax"
+
+/*
+ * --- Exception/Interrupt/Event Handling Section
+ */
+
+/*
+ * VBR and RESVEC blocks.
+ *
+ * First level handler for VBR-based exceptions.
+ *
+ * To avoid waste of space, align to the maximum text block size.
+ * This is assumed to be at most 128 bytes or 32 instructions.
+ * DO NOT EXCEED 32 instructions on the first level handlers !
+ *
+ * Also note that RESVEC is contained within the VBR block
+ * where the room left (1KB - TEXT_SIZE) allows placing
+ * the RESVEC block (at most 512B + TEXT_SIZE).
+ *
+ * So first (and only) level handler for RESVEC-based exceptions.
+ *
+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+ * and interrupt) we are a lot tight with register space until
+ * saving onto the stack frame, which is done in handle_exception().
+ *
+ */
+
+#define TEXT_SIZE 128
+#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
+
+ .balign TEXT_SIZE
+LVBR_block:
+ .space 256, 0 /* Power-on class handler, */
+ /* not required here */
+not_a_tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+ ! VBR+0x200
+ nop
+ .balign 256
+ ! VBR+0x300
+ nop
+ .balign 256
+ /*
+ * Instead of the natural .balign 1024 place RESVEC here
+ * respecting the final 1KB alignment.
+ */
+ .balign TEXT_SIZE
+ /*
+ * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+ * block making sure the final alignment is correct.
+ */
+#ifdef CONFIG_MMU
+tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, KCR1
+ movi reg_save_area, SP
+ /* SP is guaranteed 32-byte aligned. */
+ st.q SP, TLB_SAVED_R0 , r0
+ st.q SP, TLB_SAVED_R1 , r1
+ st.q SP, SAVED_R2 , r2
+ st.q SP, SAVED_R3 , r3
+ st.q SP, SAVED_R4 , r4
+ st.q SP, SAVED_R5 , r5
+ st.q SP, SAVED_R6 , r6
+ st.q SP, SAVED_R18, r18
+
+ /* Save R25 for safety; as/ld may want to use it to achieve the call to
+ * the code in mm/tlbmiss.c */
+ st.q SP, TLB_SAVED_R25, r25
+ gettr tr0, r2
+ gettr tr1, r3
+ gettr tr2, r4
+ gettr tr3, r5
+ gettr tr4, r18
+ st.q SP, SAVED_TR0 , r2
+ st.q SP, TLB_SAVED_TR1 , r3
+ st.q SP, TLB_SAVED_TR2 , r4
+ st.q SP, TLB_SAVED_TR3 , r5
+ st.q SP, TLB_SAVED_TR4 , r18
+
+ pt do_fast_page_fault, tr0
+ getcon SSR, r2
+ getcon EXPEVT, r3
+ getcon TEA, r4
+ shlri r2, 30, r2
+ andi r2, 1, r2 /* r2 = SSR.MD */
+ blink tr0, LINK
+
+ pt fixup_to_invoke_general_handler, tr1
+
+ /* If the fast path handler fixed the fault, just drop through quickly
+ to the restore code right away to return to the excepting context.
+ */
+ beqi/u r2, 0, tr1
+
+fast_tlb_miss_restore:
+ ld.q SP, SAVED_TR0, r2
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+
+ ptabs r2, tr0
+ ptabs r3, tr1
+ ptabs r4, tr2
+ ptabs r5, tr3
+ ptabs r18, tr4
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+ ld.q SP, SAVED_R2, r2
+ ld.q SP, SAVED_R3, r3
+ ld.q SP, SAVED_R4, r4
+ ld.q SP, SAVED_R5, r5
+ ld.q SP, SAVED_R6, r6
+ ld.q SP, SAVED_R18, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ getcon KCR1, SP
+ rte
+ nop /* for safety, in case the code is run on sh5-101 cut1.x */
+
+fixup_to_invoke_general_handler:
+
+ /* OK, new method. Restore stuff that's not expected to get saved into
+ the 'first-level' reg save area, then just fall through to setting
+ up the registers and calling the second-level handler. */
+
+ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
+ r25,tr1-4 and save r6 to get into the right state. */
+
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+
+ ptabs/u r3, tr1
+ ptabs/u r4, tr2
+ ptabs/u r5, tr3
+ ptabs/u r18, tr4
+
+ /* Set args for Non-debug, TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+#else /* CONFIG_MMU */
+ .balign 256
+#endif
+
+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+ DOES END UP AT VBR+0x600 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .balign 256
+ /* VBR + 0x600 */
+
+interrupt:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for interrupt class handler */
+ getcon INTEVT, r2
+ movi ret_from_irq, r3
+ ori r3, 1, r3
+ movi EVENT_INTERRUPT, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+ .balign TEXT_SIZE /* let's waste the bare minimum */
+
+LVBR_block_end: /* Marker. Used for total checking */
+
+ .balign 256
+LRESVEC_block:
+ /* Panic handler. Called with MMU off. Possible causes/actions:
+ * - Reset: Jump to program start.
+ * - Single Step: Turn off Single Step & return.
+ * - Others: Call panic handler, passing PC as arg.
+ * (this may need to be extended...)
+ */
+reset_or_panic:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, DCR
+ /* First save r0-1 and tr0, as we need to use these */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ gettr tr0, r0
+ st.q SP, 32, r0
+
+ /* Check cause */
+ getcon EXPEVT, r0
+ movi RESET_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if reset */
+ movi _stext-CONFIG_PAGE_OFFSET, r0
+ ori r0, 1, r0
+ ptabs r0, tr0
+ beqi r1, 0, tr0 /* Jump to start address if reset */
+
+ getcon EXPEVT, r0
+ movi DEBUGSS_CAUSE, r1
+ sub r1, r0, r1 /* r1=0 if single step */
+ pta single_step_panic, tr0
+ beqi r1, 0, tr0 /* jump if single step */
+
+ /* Now jump to where we save the registers. */
+ movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ blink tr0, r63
+
+single_step_panic:
+ /* We are in a handler with Single Step set. We need to resume the
+ * handler, by turning on MMU & turning off Single Step. */
+ getcon SSR, r0
+ movi SR_MMU, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Restore EXPEVT, as the rte won't do this */
+ getcon PEXPEVT, r0
+ putcon r0, EXPEVT
+ /* Restore regs */
+ ld.q SP, 32, r0
+ ptabs r0, tr0
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+ getcon DCR, SP
+ synco
+ rte
+
+
+ .balign 256
+debug_exception:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /*
+ * Single step/software_break_point first level handler.
+ * Called with MMU off, so the first thing we do is enable it
+ * by doing an rte with appropriate SSR.
+ */
+ putcon SP, DCR
+ /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+
+ /* With the MMU off, we are bypassing the cache, so purge any
+ * data that will be made stale by the following stores.
+ */
+ ocbp SP, 0
+ synco
+
+ st.q SP, 0, r0
+ st.q SP, 8, r1
+ getcon SPC, r0
+ st.q SP, 16, r0
+ getcon SSR, r0
+ st.q SP, 24, r0
+
+ /* Enable MMU, block exceptions, set priv mode, disable single step */
+ movi SR_MMU | SR_BL | SR_MD, r1
+ or r0, r1, r0
+ movi ~SR_SS, r1
+ and r0, r1, r0
+ putcon r0, SSR
+ /* Force control to debug_exception_2 when rte is executed */
+ movi debug_exeception_2, r0
+ ori r0, 1, r0 /* force SHmedia, just in case */
+ putcon r0, SPC
+ getcon DCR, SP
+ synco
+ rte
+debug_exeception_2:
+ /* Restore saved regs */
+ putcon SP, KCR1
+ movi resvec_save_area, SP
+ ld.q SP, 24, r0
+ putcon r0, SSR
+ ld.q SP, 16, r0
+ putcon r0, SPC
+ ld.q SP, 0, r0
+ ld.q SP, 8, r1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for debug class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_DEBUG, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+debug_interrupt:
+ /* !!! WE COME HERE IN REAL MODE !!! */
+ /* Hook-up debug interrupt to allow various debugging options to be
+ * hooked into its handler. */
+ /* Save original stack pointer into KCR1 */
+ synco
+ putcon SP, KCR1
+ movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
+ ocbp SP, 0
+ ocbp SP, 32
+ synco
+
+ /* Save other original registers into reg_save_area thru real addresses */
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* move (spc,ssr)->(pspc,pssr). The rte will shift
+ them back again, so that they look like the originals
+ as far as the real handler code is concerned. */
+ getcon spc, r6
+ putcon r6, pspc
+ getcon ssr, r6
+ putcon r6, pssr
+
+ ! construct useful SR for handle_exception
+ movi 3, r6
+ shlli r6, 30, r6
+ getcon sr, r18
+ or r18, r6, r6
+ putcon r6, ssr
+
+ ! SSR is now the current SR with the MD and MMU bits set
+ ! i.e. the rte will switch back to priv mode and put
+ ! the mmu back on
+
+ ! construct spc
+ movi handle_exception, r18
+ ori r18, 1, r18 ! for safety (do we need this?)
+ putcon r18, spc
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+
+ ! EXPEVT==0x80 is unused, so 'steal' this value to put the
+ ! debug interrupt handler in the vectoring table
+ movi 0x80, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+
+ or SP, ZERO, r5
+ movi CONFIG_PAGE_OFFSET, r6
+ add r6, r5, r5
+ getcon KCR1, SP
+
+ synco ! for safety
+ rte ! -> handle_exception, switch back to priv mode again
+
+LRESVEC_block_end: /* Marker. Unused. */
+
+ .balign TEXT_SIZE
+
+/*
+ * Second level handler for VBR-based exceptions. Pre-handler.
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (KCR0) Current [current task union]
+ * (KCR1) Original SP
+ * (r2) INTEVT/EXPEVT
+ * (r3) appropriate return address
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
+ * (r5) Pointer to reg_save_area
+ * (SP) Original SP
+ *
+ * Available registers:
+ * (r6)
+ * (r18)
+ * (tr0)
+ *
+ */
+handle_exception:
+ /* Common 2nd level handler. */
+
+ /* First thing we need an appropriate stack pointer */
+ getcon SSR, r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta stack_ok, tr0
+ bne r6, ZERO, tr0 /* Original stack pointer is fine */
+
+ /* Set stack pointer for user fault */
+ getcon KCR0, SP
+ movi THREAD_SIZE, r6 /* Point to the end */
+ add SP, r6, SP
+
+stack_ok:
+
+/* DEBUG : check for underflow/overflow of the kernel stack */
+ pta no_underflow, tr0
+ getcon KCR0, r6
+ movi 1024, r18
+ add r6, r18, r6
+ bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
+
+/* Just panic to cause a crash. */
+bad_sp:
+ ld.b r63, 0, r6
+ nop
+
+no_underflow:
+ pta bad_sp, tr0
+ getcon kcr0, r6
+ movi THREAD_SIZE, r18
+ add r18, r6, r6
+ bgt SP, r6, tr0 ! sp above the stack
+
+ /* Make some room for the BASIC frame. */
+ movi -(FRAME_SIZE), r6
+ add SP, r6, SP
+
+/* Could do this with no stalling if we had another spare register, but the
+ code below will be OK. */
+ ld.q r5, SAVED_R2, r6
+ ld.q r5, SAVED_R3, r18
+ st.q SP, FRAME_R(2), r6
+ ld.q r5, SAVED_R4, r6
+ st.q SP, FRAME_R(3), r18
+ ld.q r5, SAVED_R5, r18
+ st.q SP, FRAME_R(4), r6
+ ld.q r5, SAVED_R6, r6
+ st.q SP, FRAME_R(5), r18
+ ld.q r5, SAVED_R18, r18
+ st.q SP, FRAME_R(6), r6
+ ld.q r5, SAVED_TR0, r6
+ st.q SP, FRAME_R(18), r18
+ st.q SP, FRAME_T(0), r6
+
+ /* Keep old SP around */
+ getcon KCR1, r6
+
+ /* Save the rest of the general purpose registers */
+ st.q SP, FRAME_R(0), r0
+ st.q SP, FRAME_R(1), r1
+ st.q SP, FRAME_R(7), r7
+ st.q SP, FRAME_R(8), r8
+ st.q SP, FRAME_R(9), r9
+ st.q SP, FRAME_R(10), r10
+ st.q SP, FRAME_R(11), r11
+ st.q SP, FRAME_R(12), r12
+ st.q SP, FRAME_R(13), r13
+ st.q SP, FRAME_R(14), r14
+
+ /* SP is somewhere else */
+ st.q SP, FRAME_R(15), r6
+
+ st.q SP, FRAME_R(16), r16
+ st.q SP, FRAME_R(17), r17
+ /* r18 is saved earlier. */
+ st.q SP, FRAME_R(19), r19
+ st.q SP, FRAME_R(20), r20
+ st.q SP, FRAME_R(21), r21
+ st.q SP, FRAME_R(22), r22
+ st.q SP, FRAME_R(23), r23
+ st.q SP, FRAME_R(24), r24
+ st.q SP, FRAME_R(25), r25
+ st.q SP, FRAME_R(26), r26
+ st.q SP, FRAME_R(27), r27
+ st.q SP, FRAME_R(28), r28
+ st.q SP, FRAME_R(29), r29
+ st.q SP, FRAME_R(30), r30
+ st.q SP, FRAME_R(31), r31
+ st.q SP, FRAME_R(32), r32
+ st.q SP, FRAME_R(33), r33
+ st.q SP, FRAME_R(34), r34
+ st.q SP, FRAME_R(35), r35
+ st.q SP, FRAME_R(36), r36
+ st.q SP, FRAME_R(37), r37
+ st.q SP, FRAME_R(38), r38
+ st.q SP, FRAME_R(39), r39
+ st.q SP, FRAME_R(40), r40
+ st.q SP, FRAME_R(41), r41
+ st.q SP, FRAME_R(42), r42
+ st.q SP, FRAME_R(43), r43
+ st.q SP, FRAME_R(44), r44
+ st.q SP, FRAME_R(45), r45
+ st.q SP, FRAME_R(46), r46
+ st.q SP, FRAME_R(47), r47
+ st.q SP, FRAME_R(48), r48
+ st.q SP, FRAME_R(49), r49
+ st.q SP, FRAME_R(50), r50
+ st.q SP, FRAME_R(51), r51
+ st.q SP, FRAME_R(52), r52
+ st.q SP, FRAME_R(53), r53
+ st.q SP, FRAME_R(54), r54
+ st.q SP, FRAME_R(55), r55
+ st.q SP, FRAME_R(56), r56
+ st.q SP, FRAME_R(57), r57
+ st.q SP, FRAME_R(58), r58
+ st.q SP, FRAME_R(59), r59
+ st.q SP, FRAME_R(60), r60
+ st.q SP, FRAME_R(61), r61
+ st.q SP, FRAME_R(62), r62
+
+ /*
+ * Save the S* registers.
+ */
+ getcon SSR, r61
+ st.q SP, FRAME_S(FSSR), r61
+ getcon SPC, r62
+ st.q SP, FRAME_S(FSPC), r62
+ movi -1, r62 /* Reset syscall_nr */
+ st.q SP, FRAME_S(FSYSCALL_ID), r62
+
+ /* Save the rest of the target registers */
+ gettr tr1, r6
+ st.q SP, FRAME_T(1), r6
+ gettr tr2, r6
+ st.q SP, FRAME_T(2), r6
+ gettr tr3, r6
+ st.q SP, FRAME_T(3), r6
+ gettr tr4, r6
+ st.q SP, FRAME_T(4), r6
+ gettr tr5, r6
+ st.q SP, FRAME_T(5), r6
+ gettr tr6, r6
+ st.q SP, FRAME_T(6), r6
+ gettr tr7, r6
+ st.q SP, FRAME_T(7), r6
+
+ ! setup FP so that unwinder can wind back through nested kernel mode
+ ! exceptions
+ add SP, ZERO, r14
+
+ /* For syscall and debug race condition, get TRA now */
+ getcon TRA, r5
+
+ /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
+ * Also set FD, to catch FPU usage in the kernel.
+ *
+ * benedict.gaster@superh.com 29/07/2002
+ *
+ * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
+ * same time change BL from 1->0, as any pending interrupt of a level
+ * higher than he previous value of IMASK will leak through and be
+ * taken unexpectedly.
+ *
+ * To avoid this we raise the IMASK and then issue another PUTCON to
+ * enable interrupts.
+ */
+ getcon SR, r6
+ movi SR_IMASK | SR_FD, r7
+ or r6, r7, r6
+ putcon r6, SR
+ movi SR_UNBLOCK_EXC, r7
+ and r6, r7, r6
+ putcon r6, SR
+
+
+ /* Now call the appropriate 3rd level handler */
+ or r3, ZERO, LINK
+ movi trap_jtable, r3
+ shlri r2, 3, r2
+ ldx.l r2, r3, r3
+ shlri r2, 2, r2
+ ptabs r3, tr0
+ or SP, ZERO, r3
+ blink tr0, ZERO
+
+/*
+ * Second level handler for VBR-based exceptions. Post-handlers.
+ *
+ * Post-handlers for interrupts (ret_from_irq), exceptions
+ * (ret_from_exception) and common reentrance doors (restore_all
+ * to get back to the original context, ret_from_syscall loop to
+ * check kernel exiting).
+ *
+ * ret_with_reschedule and work_notifysig are an inner lables of
+ * the ret_from_syscall loop.
+ *
+ * In common to all stack-frame sensitive handlers.
+ *
+ * Inputs:
+ * (SP) struct pt_regs *, original register's frame pointer (basic)
+ *
+ */
+ .global ret_from_irq
+ret_from_irq:
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+ STI()
+ pta ret_with_reschedule, tr0
+ blink tr0, ZERO /* Do not check softirqs */
+
+ .global ret_from_exception
+ret_from_exception:
+ preempt_stop()
+
+ ld.q SP, FRAME_S(FSSR), r6
+ shlri r6, 30, r6
+ andi r6, 1, r6
+ pta resume_kernel, tr0
+ bne r6, ZERO, tr0 /* no further checks */
+
+ /* Check softirqs */
+
+#ifdef CONFIG_PREEMPT
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+resume_kernel:
+ CLI()
+
+ pta restore_all, tr0
+
+ getcon KCR0, r6
+ ld.l r6, TI_PRE_COUNT, r7
+ beq/u r7, ZERO, tr0
+
+need_resched:
+ ld.l r6, TI_FLAGS, r7
+ movi (1 << TIF_NEED_RESCHED), r8
+ and r8, r7, r8
+ bne r8, ZERO, tr0
+
+ getcon SR, r7
+ andi r7, 0xf0, r7
+ bne r7, ZERO, tr0
+
+ movi preempt_schedule_irq, r7
+ ori r7, 1, r7
+ ptabs r7, tr1
+ blink tr1, LINK
+
+ pta need_resched, tr1
+ blink tr1, ZERO
+#endif
+
+ .global ret_from_syscall
+ret_from_syscall:
+
+ret_with_reschedule:
+ getcon KCR0, r6 ! r6 contains current_thread_info
+ ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
+
+ movi _TIF_NEED_RESCHED, r8
+ and r8, r7, r8
+ pta work_resched, tr0
+ bne r8, ZERO, tr0
+
+ pta restore_all, tr1
+
+ movi _TIF_SIGPENDING, r8
+ and r8, r7, r8
+ pta work_notifysig, tr0
+ bne r8, ZERO, tr0
+
+ blink tr1, ZERO
+
+work_resched:
+ pta ret_from_syscall, tr0
+ gettr tr0, LINK
+ movi schedule, r6
+ ptabs r6, tr0
+ blink tr0, ZERO /* Call schedule(), return on top */
+
+work_notifysig:
+ gettr tr1, LINK
+
+ movi do_notify_resume, r6
+ ptabs r6, tr0
+ or SP, ZERO, r2
+ or r7, ZERO, r3
+ blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
+
+restore_all:
+ /* Do prefetches */
+
+ ld.q SP, FRAME_T(0), r6
+ ld.q SP, FRAME_T(1), r7
+ ld.q SP, FRAME_T(2), r8
+ ld.q SP, FRAME_T(3), r9
+ ptabs r6, tr0
+ ptabs r7, tr1
+ ptabs r8, tr2
+ ptabs r9, tr3
+ ld.q SP, FRAME_T(4), r6
+ ld.q SP, FRAME_T(5), r7
+ ld.q SP, FRAME_T(6), r8
+ ld.q SP, FRAME_T(7), r9
+ ptabs r6, tr4
+ ptabs r7, tr5
+ ptabs r8, tr6
+ ptabs r9, tr7
+
+ ld.q SP, FRAME_R(0), r0
+ ld.q SP, FRAME_R(1), r1
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+ ld.q SP, FRAME_R(8), r8
+ ld.q SP, FRAME_R(9), r9
+ ld.q SP, FRAME_R(10), r10
+ ld.q SP, FRAME_R(11), r11
+ ld.q SP, FRAME_R(12), r12
+ ld.q SP, FRAME_R(13), r13
+ ld.q SP, FRAME_R(14), r14
+
+ ld.q SP, FRAME_R(16), r16
+ ld.q SP, FRAME_R(17), r17
+ ld.q SP, FRAME_R(18), r18
+ ld.q SP, FRAME_R(19), r19
+ ld.q SP, FRAME_R(20), r20
+ ld.q SP, FRAME_R(21), r21
+ ld.q SP, FRAME_R(22), r22
+ ld.q SP, FRAME_R(23), r23
+ ld.q SP, FRAME_R(24), r24
+ ld.q SP, FRAME_R(25), r25
+ ld.q SP, FRAME_R(26), r26
+ ld.q SP, FRAME_R(27), r27
+ ld.q SP, FRAME_R(28), r28
+ ld.q SP, FRAME_R(29), r29
+ ld.q SP, FRAME_R(30), r30
+ ld.q SP, FRAME_R(31), r31
+ ld.q SP, FRAME_R(32), r32
+ ld.q SP, FRAME_R(33), r33
+ ld.q SP, FRAME_R(34), r34
+ ld.q SP, FRAME_R(35), r35
+ ld.q SP, FRAME_R(36), r36
+ ld.q SP, FRAME_R(37), r37
+ ld.q SP, FRAME_R(38), r38
+ ld.q SP, FRAME_R(39), r39
+ ld.q SP, FRAME_R(40), r40
+ ld.q SP, FRAME_R(41), r41
+ ld.q SP, FRAME_R(42), r42
+ ld.q SP, FRAME_R(43), r43
+ ld.q SP, FRAME_R(44), r44
+ ld.q SP, FRAME_R(45), r45
+ ld.q SP, FRAME_R(46), r46
+ ld.q SP, FRAME_R(47), r47
+ ld.q SP, FRAME_R(48), r48
+ ld.q SP, FRAME_R(49), r49
+ ld.q SP, FRAME_R(50), r50
+ ld.q SP, FRAME_R(51), r51
+ ld.q SP, FRAME_R(52), r52
+ ld.q SP, FRAME_R(53), r53
+ ld.q SP, FRAME_R(54), r54
+ ld.q SP, FRAME_R(55), r55
+ ld.q SP, FRAME_R(56), r56
+ ld.q SP, FRAME_R(57), r57
+ ld.q SP, FRAME_R(58), r58
+
+ getcon SR, r59
+ movi SR_BLOCK_EXC, r60
+ or r59, r60, r59
+ putcon r59, SR /* SR.BL = 1, keep nesting out */
+ ld.q SP, FRAME_S(FSSR), r61
+ ld.q SP, FRAME_S(FSPC), r62
+ movi SR_ASID_MASK, r60
+ and r59, r60, r59
+ andc r61, r60, r61 /* Clear out older ASID */
+ or r59, r61, r61 /* Retain current ASID */
+ putcon r61, SSR
+ putcon r62, SPC
+
+ /* Ignore FSYSCALL_ID */
+
+ ld.q SP, FRAME_R(59), r59
+ ld.q SP, FRAME_R(60), r60
+ ld.q SP, FRAME_R(61), r61
+ ld.q SP, FRAME_R(62), r62
+
+ /* Last touch */
+ ld.q SP, FRAME_R(15), SP
+ rte
+ nop
+
+/*
+ * Third level handlers for VBR-based exceptions. Adapting args to
+ * and/or deflecting to fourth level handlers.
+ *
+ * Fourth level handlers interface.
+ * Most are C-coded handlers directly pointed by the trap_jtable.
+ * (Third = Fourth level)
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
+ * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
+ * (r5) TRA control register (for syscall/debug benefit only)
+ * (LINK) return address
+ * (SP) = r3
+ *
+ * Kernel TLB fault handlers will get a slightly different interface.
+ * (r2) struct pt_regs *, original register's frame pointer
+ * (r3) writeaccess, whether it's a store fault as opposed to load fault
+ * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
+ * (r5) Effective Address of fault
+ * (LINK) return address
+ * (SP) = r2
+ *
+ * fpu_error_or_IRQ? is a helper to deflect to the right cause.
+ *
+ */
+#ifdef CONFIG_MMU
+tlb_miss_load:
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+tlb_miss_store:
+ or SP, ZERO, r2
+ movi 1, r3 /* Write */
+ or ZERO, ZERO, r4 /* Data */
+ getcon TEA, r5
+ pta call_do_page_fault, tr0
+ beq ZERO, ZERO, tr0
+
+itlb_miss_or_IRQ:
+ pta its_IRQ, tr0
+ beqi/u r4, EVENT_INTERRUPT, tr0
+ or SP, ZERO, r2
+ or ZERO, ZERO, r3 /* Read */
+ movi 1, r4 /* Text */
+ getcon TEA, r5
+ /* Fall through */
+
+call_do_page_fault:
+ movi do_page_fault, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+#endif /* CONFIG_MMU */
+
+fpu_error_or_IRQA:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi fpu_state_restore_trap_handler, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+fpu_error_or_IRQB:
+ pta its_IRQ, tr0
+ beqi/l r4, EVENT_INTERRUPT, tr0
+#ifdef CONFIG_SH_FPU
+ movi fpu_state_restore_trap_handler, r6
+#else
+ movi do_exception_error, r6
+#endif
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+its_IRQ:
+ movi do_IRQ, r6
+ ptabs r6, tr0
+ blink tr0, ZERO
+
+/*
+ * system_call/unknown_trap third level handler:
+ *
+ * Inputs:
+ * (r2) fault/interrupt code, entry number (TRAP = 11)
+ * (r3) struct pt_regs *, original register's frame pointer
+ * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
+ * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
+ * (SP) = r3
+ * (LINK) return address: ret_from_exception
+ * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
+ *
+ * Outputs:
+ * (*r3) Syscall reply (Saved r2)
+ * (LINK) In case of syscall only it can be scrapped.
+ * Common second level post handler will be ret_from_syscall.
+ * Common (non-trace) exit point to that is syscall_ret (saving
+ * result to r2). Common bad exit point is syscall_bad (returning
+ * ENOSYS then saved to r2).
+ *
+ */
+
+unknown_trap:
+ /* Unknown Trap or User Trace */
+ movi do_unknown_trapa, r6
+ ptabs r6, tr0
+ ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
+ andi r2, 0x1ff, r2 /* r2 = syscall # */
+ blink tr0, LINK
+
+ pta syscall_ret, tr0
+ blink tr0, ZERO
+
+ /* New syscall implementation*/
+system_call:
+ pta unknown_trap, tr0
+ or r5, ZERO, r4 /* TRA (=r5) -> r4 */
+ shlri r4, 20, r4
+ bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
+
+ /* It's a system call */
+ st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
+ andi r5, 0x1ff, r5 /* syscall # -> r5 */
+
+ STI()
+
+ pta syscall_allowed, tr0
+ movi NR_syscalls - 1, r4 /* Last valid */
+ bgeu/l r4, r5, tr0
+
+syscall_bad:
+ /* Return ENOSYS ! */
+ movi -(ENOSYS), r2 /* Fall-through */
+
+ .global syscall_ret
+syscall_ret:
+ st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+
+/* A different return path for ret_from_fork, because we now need
+ * to call schedule_tail with the later kernels. Because prev is
+ * loaded into r2 by switch_to() means we can just call it straight away
+ */
+
+.global ret_from_fork
+ret_from_fork:
+
+ movi schedule_tail,r5
+ ori r5, 1, r5
+ ptabs r5, tr0
+ blink tr0, LINK
+
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+syscall_allowed:
+ /* Use LINK to deflect the exit point, default is syscall_ret */
+ pta syscall_ret, tr0
+ gettr tr0, LINK
+ pta syscall_notrace, tr0
+
+ getcon KCR0, r2
+ ld.l r2, TI_FLAGS, r4
+ movi _TIF_WORK_SYSCALL_MASK, r6
+ and r6, r4, r6
+ beq/l r6, ZERO, tr0
+
+ /* Trace it by calling syscall_trace before and after */
+ movi do_syscall_trace_enter, r4
+ or SP, ZERO, r2
+ ptabs r4, tr0
+ blink tr0, LINK
+
+ /* Save the retval */
+ st.q SP, FRAME_R(2), r2
+
+ /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
+ ld.q SP, FRAME_S(FSYSCALL_ID), r5
+ andi r5, 0x1ff, r5
+
+ pta syscall_ret_trace, tr0
+ gettr tr0, LINK
+
+syscall_notrace:
+ /* Now point to the appropriate 4th level syscall handler */
+ movi sys_call_table, r4
+ shlli r5, 2, r5
+ ldx.l r4, r5, r5
+ ptabs r5, tr0
+
+ /* Prepare original args */
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ld.q SP, FRAME_R(4), r4
+ ld.q SP, FRAME_R(5), r5
+ ld.q SP, FRAME_R(6), r6
+ ld.q SP, FRAME_R(7), r7
+
+ /* And now the trick for those syscalls requiring regs * ! */
+ or SP, ZERO, r8
+
+ /* Call it */
+ blink tr0, ZERO /* LINK is already properly set */
+
+syscall_ret_trace:
+ /* We get back here only if under trace */
+ st.q SP, FRAME_R(9), r2 /* Save return value */
+
+ movi do_syscall_trace_leave, LINK
+ or SP, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, LINK
+
+ /* This needs to be done after any syscall tracing */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO /* Resume normal return sequence */
+
+/*
+ * --- Switch to running under a particular ASID and return the previous ASID value
+ * --- The caller is assumed to have done a cli before calling this.
+ *
+ * Input r2 : new ASID
+ * Output r2 : old ASID
+ */
+
+ .global switch_and_save_asid
+switch_and_save_asid:
+ getcon sr, r0
+ movi 255, r4
+ shlli r4, 16, r4 /* r4 = mask to select ASID */
+ and r0, r4, r3 /* r3 = shifted old ASID */
+ andi r2, 255, r2 /* mask down new ASID */
+ shlli r2, 16, r2 /* align new ASID against SR.ASID */
+ andc r0, r4, r0 /* efface old ASID from SR */
+ or r0, r2, r0 /* insert the new ASID */
+ putcon r0, ssr
+ movi 1f, r0
+ putcon r0, spc
+ rte
+ nop
+1:
+ ptabs LINK, tr0
+ shlri r3, 16, r2 /* r2 = old ASID */
+ blink tr0, r63
+
+ .global route_to_panic_handler
+route_to_panic_handler:
+ /* Switch to real mode, goto panic_handler, don't return. Useful for
+ last-chance debugging, e.g. if no output wants to go to the console.
+ */
+
+ movi panic_handler - CONFIG_PAGE_OFFSET, r1
+ ptabs r1, tr0
+ pta 1f, tr1
+ gettr tr1, r0
+ putcon r0, spc
+ getcon sr, r0
+ movi 1, r1
+ shlli r1, 31, r1
+ andc r0, r1, r0
+ putcon r0, ssr
+ rte
+ nop
+1: /* Now in real mode */
+ blink tr0, r63
+ nop
+
+ .global peek_real_address_q
+peek_real_address_q:
+ /* Two args:
+ r2 : real mode address to peek
+ r2(out) : result quadword
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to ioremap the debug
+ module, and to avoid the need to ioremap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.peek0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual peek. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ ld.q r2, 0, r2
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+ .global poke_real_address_q
+poke_real_address_q:
+ /* Two args:
+ r2 : real mode address to poke
+ r3 : quadword value to write.
+
+ This is provided as a cheapskate way of manipulating device
+ registers for debugging (to avoid the need to ioremap the debug
+ module, and to avoid the need to ioremap the watchpoint
+ controller in a way that identity maps sufficient bits to avoid the
+ SH5-101 cut2 silicon defect).
+
+ This code is not performance critical
+ */
+
+ add.l r2, r63, r2 /* sign extend address */
+ getcon sr, r0 /* r0 = saved original SR */
+ movi 1, r1
+ shlli r1, 28, r1
+ or r0, r1, r1 /* r0 with block bit set */
+ putcon r1, sr /* now in critical section */
+ movi 1, r36
+ shlli r36, 31, r36
+ andc r1, r36, r1 /* turn sr.mmu off in real mode section */
+
+ putcon r1, ssr
+ movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
+ movi 1f, r37 /* virtual mode return addr */
+ putcon r36, spc
+
+ synco
+ rte
+ nop
+
+.poke0: /* come here in real mode, don't touch caches!!
+ still in critical section (sr.bl==1) */
+ putcon r0, ssr
+ putcon r37, spc
+ /* Here's the actual poke. If the address is bad, all bets are now off
+ * what will happen (handlers invoked in real-mode = bad news) */
+ st.q r2, 0, r3
+ synco
+ rte /* Back to virtual mode */
+ nop
+
+1:
+ ptabs LINK, tr0
+ blink tr0, r63
+
+#ifdef CONFIG_MMU
+/*
+ * --- User Access Handling Section
+ */
+
+/*
+ * User Access support. It all moved to non inlined Assembler
+ * functions in here.
+ *
+ * __kernel_size_t __copy_user(void *__to, const void *__from,
+ * __kernel_size_t __n)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) size in bytes
+ *
+ * Ouputs:
+ * (*r2) target data
+ * (r2) non-copied bytes
+ *
+ * If a fault occurs on the user pointer, bail out early and return the
+ * number of bytes not copied in r2.
+ * Strategy : for large blocks, call a real memcpy function which can
+ * move >1 byte at a time using unaligned ld/st instructions, and can
+ * manipulate the cache using prefetch + alloco to improve the speed
+ * further. If a fault occurs in that function, just revert to the
+ * byte-by-byte approach used for small blocks; this is rare so the
+ * performance hit for that case does not matter.
+ *
+ * For small blocks it's not worth the overhead of setting up and calling
+ * the memcpy routine; do the copy a byte at a time.
+ *
+ */
+ .global __copy_user
+__copy_user:
+ pta __copy_user_byte_by_byte, tr1
+ movi 16, r0 ! this value is a best guess, should tune it by benchmarking
+ bge/u r0, r4, tr1
+ pta copy_user_memcpy, tr0
+ addi SP, -32, SP
+ /* Save arguments in case we have to fix-up unhandled page fault */
+ st.q SP, 0, r2
+ st.q SP, 8, r3
+ st.q SP, 16, r4
+ st.q SP, 24, r35 ! r35 is callee-save
+ /* Save LINK in a register to reduce RTS time later (otherwise
+ ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
+ ori LINK, 0, r35
+ blink tr0, LINK
+
+ /* Copy completed normally if we get back here */
+ ptabs r35, tr0
+ ld.q SP, 24, r35
+ /* don't restore r2-r4, pointless */
+ /* set result=r2 to zero as the copy must have succeeded. */
+ or r63, r63, r2
+ addi SP, 32, SP
+ blink tr0, r63 ! RTS
+
+ .global __copy_user_fixup
+__copy_user_fixup:
+ /* Restore stack frame */
+ ori r35, 0, LINK
+ ld.q SP, 24, r35
+ ld.q SP, 16, r4
+ ld.q SP, 8, r3
+ ld.q SP, 0, r2
+ addi SP, 32, SP
+ /* Fall through to original code, in the 'same' state we entered with */
+
+/* The slow byte-by-byte method is used if the fast copy traps due to a bad
+ user address. In that rare case, the speed drop can be tolerated. */
+__copy_user_byte_by_byte:
+ pta ___copy_user_exit, tr1
+ pta ___copy_user1, tr0
+ beq/u r4, r63, tr1 /* early exit for zero length copy */
+ sub r2, r3, r0
+ addi r0, -1, r0
+
+___copy_user1:
+ ld.b r3, 0, r5 /* Fault address 1 */
+
+ /* Could rewrite this to use just 1 add, but the second comes 'free'
+ due to load latency */
+ addi r3, 1, r3
+ addi r4, -1, r4 /* No real fixup required */
+___copy_user2:
+ stx.b r3, r0, r5 /* Fault address 2 */
+ bne r4, ZERO, tr0
+
+___copy_user_exit:
+ or r4, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) size in bytes
+ *
+ * Ouputs:
+ * (*r2) zero-ed target data
+ * (r2) non-zero-ed bytes
+ */
+ .global __clear_user
+__clear_user:
+ pta ___clear_user_exit, tr1
+ pta ___clear_user1, tr0
+ beq/u r3, r63, tr1
+
+___clear_user1:
+ st.b r2, 0, ZERO /* Fault address */
+ addi r2, 1, r2
+ addi r3, -1, r3 /* No real fixup required */
+ bne r3, ZERO, tr0
+
+___clear_user_exit:
+ or r3, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+#endif /* CONFIG_MMU */
+
+/*
+ * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
+ * int __count)
+ *
+ * Inputs:
+ * (r2) target address
+ * (r3) source address
+ * (r4) maximum size in bytes
+ *
+ * Ouputs:
+ * (*r2) copied data
+ * (r2) -EFAULT (in case of faulting)
+ * copied data (otherwise)
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ pta ___strncpy_from_user1, tr0
+ pta ___strncpy_from_user_done, tr1
+ or r4, ZERO, r5 /* r5 = original count */
+ beq/u r4, r63, tr1 /* early exit if r4==0 */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+
+___strncpy_from_user1:
+ ld.b r3, 0, r7 /* Fault address: only in reading */
+ st.b r2, 0, r7
+ addi r2, 1, r2
+ addi r3, 1, r3
+ beq/u ZERO, r7, tr1
+ addi r4, -1, r4 /* return real number of copied bytes */
+ bne/l ZERO, r4, tr0
+
+___strncpy_from_user_done:
+ sub r5, r4, r6 /* If done, return copied */
+
+___strncpy_from_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __strnlen_user(const char *__s, long __n)
+ *
+ * Inputs:
+ * (r2) source address
+ * (r3) source size in bytes
+ *
+ * Ouputs:
+ * (r2) -EFAULT (in case of faulting)
+ * string length (otherwise)
+ */
+ .global __strnlen_user
+__strnlen_user:
+ pta ___strnlen_user_set_reply, tr0
+ pta ___strnlen_user1, tr1
+ or ZERO, ZERO, r5 /* r5 = counter */
+ movi -(EFAULT), r6 /* r6 = reply, no real fixup */
+ or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
+ beq r3, ZERO, tr0
+
+___strnlen_user1:
+ ldx.b r2, r5, r7 /* Fault address: only in reading */
+ addi r3, -1, r3 /* No real fixup */
+ addi r5, 1, r5
+ beq r3, ZERO, tr0
+ bne r7, ZERO, tr1
+! The line below used to be active. This meant led to a junk byte lying between each pair
+! of entries in the argv & envp structures in memory. Whilst the program saw the right data
+! via the argv and envp arguments to main, it meant the 'flat' representation visible through
+! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
+! addi r5, 1, r5 /* Include '\0' */
+
+___strnlen_user_set_reply:
+ or r5, ZERO, r6 /* If done, return counter */
+
+___strnlen_user_exit:
+ or r6, ZERO, r2
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __get_user_asm_?(void *val, long addr)
+ *
+ * Inputs:
+ * (r2) dest address
+ * (r3) source address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __get_user_asm_b
+__get_user_asm_b:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_b1:
+ ld.b r3, 0, r5 /* r5 = data */
+ st.b r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_w
+__get_user_asm_w:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_w1:
+ ld.w r3, 0, r5 /* r5 = data */
+ st.w r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_l
+__get_user_asm_l:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_l1:
+ ld.l r3, 0, r5 /* r5 = data */
+ st.l r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __get_user_asm_q
+__get_user_asm_q:
+ or r2, ZERO, r4
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___get_user_asm_q1:
+ ld.q r3, 0, r5 /* r5 = data */
+ st.q r4, 0, r5
+ or ZERO, ZERO, r2
+
+___get_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+/*
+ * extern long __put_user_asm_?(void *pval, long addr)
+ *
+ * Inputs:
+ * (r2) kernel pointer to value
+ * (r3) dest address (in User Space)
+ *
+ * Ouputs:
+ * (r2) -EFAULT (faulting)
+ * 0 (not faulting)
+ */
+ .global __put_user_asm_b
+__put_user_asm_b:
+ ld.b r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_b1:
+ st.b r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_b_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_w
+__put_user_asm_w:
+ ld.w r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_w1:
+ st.w r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_w_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_l
+__put_user_asm_l:
+ ld.l r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_l1:
+ st.l r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_l_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+
+ .global __put_user_asm_q
+__put_user_asm_q:
+ ld.q r2, 0, r4 /* r4 = data */
+ movi -(EFAULT), r2 /* r2 = reply, no real fixup */
+
+___put_user_asm_q1:
+ st.q r3, 0, r4
+ or ZERO, ZERO, r2
+
+___put_user_asm_q_exit:
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
+panic_stash_regs:
+ /* The idea is : when we get an unhandled panic, we dump the registers
+ to a known memory location, the just sit in a tight loop.
+ This allows the human to look at the memory region through the GDB
+ session (assuming the debug module's SHwy initiator isn't locked up
+ or anything), to hopefully analyze the cause of the panic. */
+
+ /* On entry, former r15 (SP) is in DCR
+ former r0 is at resvec_saved_area + 0
+ former r1 is at resvec_saved_area + 8
+ former tr0 is at resvec_saved_area + 32
+ DCR is the only register whose value is lost altogether.
+ */
+
+ movi 0xffffffff80000000, r0 ! phy of dump area
+ ld.q SP, 0x000, r1 ! former r0
+ st.q r0, 0x000, r1
+ ld.q SP, 0x008, r1 ! former r1
+ st.q r0, 0x008, r1
+ st.q r0, 0x010, r2
+ st.q r0, 0x018, r3
+ st.q r0, 0x020, r4
+ st.q r0, 0x028, r5
+ st.q r0, 0x030, r6
+ st.q r0, 0x038, r7
+ st.q r0, 0x040, r8
+ st.q r0, 0x048, r9
+ st.q r0, 0x050, r10
+ st.q r0, 0x058, r11
+ st.q r0, 0x060, r12
+ st.q r0, 0x068, r13
+ st.q r0, 0x070, r14
+ getcon dcr, r14
+ st.q r0, 0x078, r14
+ st.q r0, 0x080, r16
+ st.q r0, 0x088, r17
+ st.q r0, 0x090, r18
+ st.q r0, 0x098, r19
+ st.q r0, 0x0a0, r20
+ st.q r0, 0x0a8, r21
+ st.q r0, 0x0b0, r22
+ st.q r0, 0x0b8, r23
+ st.q r0, 0x0c0, r24
+ st.q r0, 0x0c8, r25
+ st.q r0, 0x0d0, r26
+ st.q r0, 0x0d8, r27
+ st.q r0, 0x0e0, r28
+ st.q r0, 0x0e8, r29
+ st.q r0, 0x0f0, r30
+ st.q r0, 0x0f8, r31
+ st.q r0, 0x100, r32
+ st.q r0, 0x108, r33
+ st.q r0, 0x110, r34
+ st.q r0, 0x118, r35
+ st.q r0, 0x120, r36
+ st.q r0, 0x128, r37
+ st.q r0, 0x130, r38
+ st.q r0, 0x138, r39
+ st.q r0, 0x140, r40
+ st.q r0, 0x148, r41
+ st.q r0, 0x150, r42
+ st.q r0, 0x158, r43
+ st.q r0, 0x160, r44
+ st.q r0, 0x168, r45
+ st.q r0, 0x170, r46
+ st.q r0, 0x178, r47
+ st.q r0, 0x180, r48
+ st.q r0, 0x188, r49
+ st.q r0, 0x190, r50
+ st.q r0, 0x198, r51
+ st.q r0, 0x1a0, r52
+ st.q r0, 0x1a8, r53
+ st.q r0, 0x1b0, r54
+ st.q r0, 0x1b8, r55
+ st.q r0, 0x1c0, r56
+ st.q r0, 0x1c8, r57
+ st.q r0, 0x1d0, r58
+ st.q r0, 0x1d8, r59
+ st.q r0, 0x1e0, r60
+ st.q r0, 0x1e8, r61
+ st.q r0, 0x1f0, r62
+ st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
+
+ ld.q SP, 0x020, r1 ! former tr0
+ st.q r0, 0x200, r1
+ gettr tr1, r1
+ st.q r0, 0x208, r1
+ gettr tr2, r1
+ st.q r0, 0x210, r1
+ gettr tr3, r1
+ st.q r0, 0x218, r1
+ gettr tr4, r1
+ st.q r0, 0x220, r1
+ gettr tr5, r1
+ st.q r0, 0x228, r1
+ gettr tr6, r1
+ st.q r0, 0x230, r1
+ gettr tr7, r1
+ st.q r0, 0x238, r1
+
+ getcon sr, r1
+ getcon ssr, r2
+ getcon pssr, r3
+ getcon spc, r4
+ getcon pspc, r5
+ getcon intevt, r6
+ getcon expevt, r7
+ getcon pexpevt, r8
+ getcon tra, r9
+ getcon tea, r10
+ getcon kcr0, r11
+ getcon kcr1, r12
+ getcon vbr, r13
+ getcon resvec, r14
+
+ st.q r0, 0x240, r1
+ st.q r0, 0x248, r2
+ st.q r0, 0x250, r3
+ st.q r0, 0x258, r4
+ st.q r0, 0x260, r5
+ st.q r0, 0x268, r6
+ st.q r0, 0x270, r7
+ st.q r0, 0x278, r8
+ st.q r0, 0x280, r9
+ st.q r0, 0x288, r10
+ st.q r0, 0x290, r11
+ st.q r0, 0x298, r12
+ st.q r0, 0x2a0, r13
+ st.q r0, 0x2a8, r14
+
+ getcon SPC,r2
+ getcon SSR,r3
+ getcon EXPEVT,r4
+ /* Prepare to jump to C - physical address */
+ movi panic_handler-CONFIG_PAGE_OFFSET, r1
+ ori r1, 1, r1
+ ptabs r1, tr0
+ getcon DCR, SP
+ blink tr0, ZERO
+ nop
+ nop
+ nop
+ nop
+
+
+
+
+/*
+ * --- Signal Handling Section
+ */
+
+/*
+ * extern long long _sa_default_rt_restorer
+ * extern long long _sa_default_restorer
+ *
+ * or, better,
+ *
+ * extern void _sa_default_rt_restorer(void)
+ * extern void _sa_default_restorer(void)
+ *
+ * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
+ * from user space. Copied into user space by signal management.
+ * Both must be quad aligned and 2 quad long (4 instructions).
+ *
+ */
+ .balign 8
+ .global sa_default_rt_restorer
+sa_default_rt_restorer:
+ movi 0x10, r9
+ shori __NR_rt_sigreturn, r9
+ trapa r9
+ nop
+
+ .balign 8
+ .global sa_default_restorer
+sa_default_restorer:
+ movi 0x10, r9
+ shori __NR_sigreturn, r9
+ trapa r9
+ nop
+
+/*
+ * --- __ex_table Section
+ */
+
+/*
+ * User Access Exception Table.
+ */
+ .section __ex_table, "a"
+
+ .global asm_uaccess_start /* Just a marker */
+asm_uaccess_start:
+
+#ifdef CONFIG_MMU
+ .long ___copy_user1, ___copy_user_exit
+ .long ___copy_user2, ___copy_user_exit
+ .long ___clear_user1, ___clear_user_exit
+#endif
+ .long ___strncpy_from_user1, ___strncpy_from_user_exit
+ .long ___strnlen_user1, ___strnlen_user_exit
+ .long ___get_user_asm_b1, ___get_user_asm_b_exit
+ .long ___get_user_asm_w1, ___get_user_asm_w_exit
+ .long ___get_user_asm_l1, ___get_user_asm_l_exit
+ .long ___get_user_asm_q1, ___get_user_asm_q_exit
+ .long ___put_user_asm_b1, ___put_user_asm_b_exit
+ .long ___put_user_asm_w1, ___put_user_asm_w_exit
+ .long ___put_user_asm_l1, ___put_user_asm_l_exit
+ .long ___put_user_asm_q1, ___put_user_asm_q_exit
+
+ .global asm_uaccess_end /* Just a marker */
+asm_uaccess_end:
+
+
+
+
+/*
+ * --- .init.text Section
+ */
+
+ __INIT
+
+/*
+ * void trap_init (void)
+ *
+ */
+ .global trap_init
+trap_init:
+ addi SP, -24, SP /* Room to save r28/r29/r30 */
+ st.q SP, 0, r28
+ st.q SP, 8, r29
+ st.q SP, 16, r30
+
+ /* Set VBR and RESVEC */
+ movi LVBR_block, r19
+ andi r19, -4, r19 /* reset MMUOFF + reserved */
+ /* For RESVEC exceptions we force the MMU off, which means we need the
+ physical address. */
+ movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
+ andi r20, -4, r20 /* reset reserved */
+ ori r20, 1, r20 /* set MMUOFF */
+ putcon r19, VBR
+ putcon r20, RESVEC
+
+ /* Sanity check */
+ movi LVBR_block_end, r21
+ andi r21, -4, r21
+ movi BLOCK_SIZE, r29 /* r29 = expected size */
+ or r19, ZERO, r30
+ add r19, r29, r19
+
+ /*
+ * Ugly, but better loop forever now than crash afterwards.
+ * We should print a message, but if we touch LVBR or
+ * LRESVEC blocks we should not be surprised if we get stuck
+ * in trap_init().
+ */
+ pta trap_init_loop, tr1
+ gettr tr1, r28 /* r28 = trap_init_loop */
+ sub r21, r30, r30 /* r30 = actual size */
+
+ /*
+ * VBR/RESVEC handlers overlap by being bigger than
+ * allowed. Very bad. Just loop forever.
+ * (r28) panic/loop address
+ * (r29) expected size
+ * (r30) actual size
+ */
+trap_init_loop:
+ bne r19, r21, tr1
+
+ /* Now that exception vectors are set up reset SR.BL */
+ getcon SR, r22
+ movi SR_UNBLOCK_EXC, r23
+ and r22, r23, r22
+ putcon r22, SR
+
+ addi SP, 24, SP
+ ptabs LINK, tr0
+ blink tr0, ZERO
+
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
new file mode 100644
index 00000000..4b3bb35e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -0,0 +1,114 @@
+/*
+ * arch/sh/kernel/cpu/sh5/fpu.c
+ *
+ * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
+ * Copyright (C) 2002 STMicroelectronics Limited
+ * Author : Stuart Menefy
+ *
+ * Started from SH4 version:
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+
+void save_fpu(struct task_struct *tsk)
+{
+ asm volatile("fst.p %0, (0*8), fp0\n\t"
+ "fst.p %0, (1*8), fp2\n\t"
+ "fst.p %0, (2*8), fp4\n\t"
+ "fst.p %0, (3*8), fp6\n\t"
+ "fst.p %0, (4*8), fp8\n\t"
+ "fst.p %0, (5*8), fp10\n\t"
+ "fst.p %0, (6*8), fp12\n\t"
+ "fst.p %0, (7*8), fp14\n\t"
+ "fst.p %0, (8*8), fp16\n\t"
+ "fst.p %0, (9*8), fp18\n\t"
+ "fst.p %0, (10*8), fp20\n\t"
+ "fst.p %0, (11*8), fp22\n\t"
+ "fst.p %0, (12*8), fp24\n\t"
+ "fst.p %0, (13*8), fp26\n\t"
+ "fst.p %0, (14*8), fp28\n\t"
+ "fst.p %0, (15*8), fp30\n\t"
+ "fst.p %0, (16*8), fp32\n\t"
+ "fst.p %0, (17*8), fp34\n\t"
+ "fst.p %0, (18*8), fp36\n\t"
+ "fst.p %0, (19*8), fp38\n\t"
+ "fst.p %0, (20*8), fp40\n\t"
+ "fst.p %0, (21*8), fp42\n\t"
+ "fst.p %0, (22*8), fp44\n\t"
+ "fst.p %0, (23*8), fp46\n\t"
+ "fst.p %0, (24*8), fp48\n\t"
+ "fst.p %0, (25*8), fp50\n\t"
+ "fst.p %0, (26*8), fp52\n\t"
+ "fst.p %0, (27*8), fp54\n\t"
+ "fst.p %0, (28*8), fp56\n\t"
+ "fst.p %0, (29*8), fp58\n\t"
+ "fst.p %0, (30*8), fp60\n\t"
+ "fst.p %0, (31*8), fp62\n\t"
+
+ "fgetscr fr63\n\t"
+ "fst.s %0, (32*8), fr63\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.xstate->hardfpu)
+ : "memory");
+}
+
+void restore_fpu(struct task_struct *tsk)
+{
+ asm volatile("fld.p %0, (0*8), fp0\n\t"
+ "fld.p %0, (1*8), fp2\n\t"
+ "fld.p %0, (2*8), fp4\n\t"
+ "fld.p %0, (3*8), fp6\n\t"
+ "fld.p %0, (4*8), fp8\n\t"
+ "fld.p %0, (5*8), fp10\n\t"
+ "fld.p %0, (6*8), fp12\n\t"
+ "fld.p %0, (7*8), fp14\n\t"
+ "fld.p %0, (8*8), fp16\n\t"
+ "fld.p %0, (9*8), fp18\n\t"
+ "fld.p %0, (10*8), fp20\n\t"
+ "fld.p %0, (11*8), fp22\n\t"
+ "fld.p %0, (12*8), fp24\n\t"
+ "fld.p %0, (13*8), fp26\n\t"
+ "fld.p %0, (14*8), fp28\n\t"
+ "fld.p %0, (15*8), fp30\n\t"
+ "fld.p %0, (16*8), fp32\n\t"
+ "fld.p %0, (17*8), fp34\n\t"
+ "fld.p %0, (18*8), fp36\n\t"
+ "fld.p %0, (19*8), fp38\n\t"
+ "fld.p %0, (20*8), fp40\n\t"
+ "fld.p %0, (21*8), fp42\n\t"
+ "fld.p %0, (22*8), fp44\n\t"
+ "fld.p %0, (23*8), fp46\n\t"
+ "fld.p %0, (24*8), fp48\n\t"
+ "fld.p %0, (25*8), fp50\n\t"
+ "fld.p %0, (26*8), fp52\n\t"
+ "fld.p %0, (27*8), fp54\n\t"
+ "fld.p %0, (28*8), fp56\n\t"
+ "fld.p %0, (29*8), fp58\n\t"
+ "fld.p %0, (30*8), fp60\n\t"
+
+ "fld.s %0, (32*8), fr63\n\t"
+ "fputscr fr63\n\t"
+
+ "fld.p %0, (31*8), fp62\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.xstate->hardfpu)
+ : "memory");
+}
+
+asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ regs->pc += 4;
+
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+
+ force_sig(SIGFPE, tsk);
+}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
new file mode 100644
index 00000000..9e882409
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -0,0 +1,75 @@
+/*
+ * arch/sh/kernel/cpu/sh5/probe.c
+ *
+ * CPU Subtype Probing for SH-5.
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/tlb.h>
+
+void __cpuinit cpu_probe(void)
+{
+ unsigned long long cir;
+
+ /*
+ * Do peeks in real mode to avoid having to set up a mapping for
+ * the WPC registers. On SH5-101 cut2, such a mapping would be
+ * exposed to an address translation erratum which would make it
+ * hard to set up correctly.
+ */
+ cir = peek_real_address_q(0x0d000008);
+ if ((cir & 0xffff) == 0x5103)
+ boot_cpu_data.type = CPU_SH5_103;
+ else if (((cir >> 32) & 0xffff) == 0x51e2)
+ /* CPU.VCR aliased at CIR address on SH5-101 */
+ boot_cpu_data.type = CPU_SH5_101;
+
+ boot_cpu_data.family = CPU_FAMILY_SH5;
+
+ /*
+ * First, setup some sane values for the I-cache.
+ */
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.icache.sets = 256;
+ boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.icache.way_incr = (1 << 13);
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
+ boot_cpu_data.icache.linesz;
+ boot_cpu_data.icache.entry_mask = 0x1fe0;
+ boot_cpu_data.icache.flags = 0;
+
+ /*
+ * Next, setup some sane values for the D-cache.
+ *
+ * On the SH5, these are pretty consistent with the I-cache settings,
+ * so we just copy over the existing definitions.. these can be fixed
+ * up later, especially if we add runtime CPU probing.
+ *
+ * Though in the meantime it saves us from having to duplicate all of
+ * the above definitions..
+ */
+ boot_cpu_data.dcache = boot_cpu_data.icache;
+
+ /*
+ * Setup any cache-related flags here
+ */
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+ set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
+#endif
+
+ /* Setup some I/D TLB defaults */
+ sh64_tlb_init();
+}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
new file mode 100644
index 00000000..18419f1d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -0,0 +1,184 @@
+/*
+ * SH5-101/SH5-103 CPU Setup
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sh_timer.h>
+#include <asm/addrspace.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .scbrr_algo_id = SCBRR_ALGO_2,
+ .type = PORT_SCIF,
+ .irqs = { 39, 40, 42, 0 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
+ .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = IRQ_PRI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = IRQ_CUI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = IRQ_ATI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+#define TMU_BLOCK_OFF 0x01020000
+#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
+#define TMU0_BASE (TMU_BASE + 0x8 + (0xc * 0x0))
+#define TMU1_BASE (TMU_BASE + 0x8 + (0xc * 0x1))
+#define TMU2_BASE (TMU_BASE + 0x8 + (0xc * 0x2))
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .start = TMU0_BASE,
+ .end = TMU0_BASE + 0xc - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TUNI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .start = TMU1_BASE,
+ .end = TMU1_BASE + 0xc - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TUNI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channel_offset = 0x1c,
+ .timer_bit = 2,
+};
+
+static struct resource tmu2_resources[] = {
+ [0] = {
+ .start = TMU2_BASE,
+ .end = TMU2_BASE + 0xc - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TUNI2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh_tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh5_early_devices[] __initdata = {
+ &scif0_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+static struct platform_device *sh5_devices[] __initdata = {
+ &rtc_device,
+};
+
+static int __init sh5_devices_setup(void)
+{
+ int ret;
+
+ ret = platform_add_devices(sh5_early_devices,
+ ARRAY_SIZE(sh5_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(sh5_devices,
+ ARRAY_SIZE(sh5_devices));
+}
+arch_initcall(sh5_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh5_early_devices,
+ ARRAY_SIZE(sh5_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
new file mode 100644
index 00000000..45c351b0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/switchto.S
@@ -0,0 +1,198 @@
+/*
+ * arch/sh/kernel/cpu/sh5/switchto.S
+ *
+ * sh64 context switch
+ *
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+*/
+
+ .section .text..SHmedia32,"ax"
+ .little
+
+ .balign 32
+
+ .type sh64_switch_to,@function
+ .global sh64_switch_to
+ .global __sh64_switch_to_end
+sh64_switch_to:
+
+/* Incoming args
+ r2 - prev
+ r3 - &prev->thread
+ r4 - next
+ r5 - &next->thread
+
+ Outgoing results
+ r2 - last (=prev) : this just stays in r2 throughout
+
+ Want to create a full (struct pt_regs) on the stack to allow backtracing
+ functions to work. However, we only need to populate the callee-save
+ register slots in this structure; since we're a function our ancestors must
+ have themselves preserved all caller saved state in the stack. This saves
+ some wasted effort since we won't need to look at the values.
+
+ In particular, all caller-save registers are immediately available for
+ scratch use.
+
+*/
+
+#define FRAME_SIZE (76*8 + 8)
+
+ movi FRAME_SIZE, r0
+ sub.l r15, r0, r15
+ ! Do normal-style register save to support backtrace
+
+ st.l r15, 0, r18 ! save link reg
+ st.l r15, 4, r14 ! save fp
+ add.l r15, r63, r14 ! setup frame pointer
+
+ ! hopefully this looks normal to the backtrace now.
+
+ addi.l r15, 8, r1 ! base of pt_regs
+ addi.l r1, 24, r0 ! base of pt_regs.regs
+ addi.l r0, (63*8), r8 ! base of pt_regs.trregs
+
+ /* Note : to be fixed?
+ struct pt_regs is really designed for holding the state on entry
+ to an exception, i.e. pc,sr,regs etc. However, for the context
+ switch state, some of this is not required. But the unwinder takes
+ struct pt_regs * as an arg so we have to build this structure
+ to allow unwinding switched tasks in show_state() */
+
+ st.q r0, ( 9*8), r9
+ st.q r0, (10*8), r10
+ st.q r0, (11*8), r11
+ st.q r0, (12*8), r12
+ st.q r0, (13*8), r13
+ st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
+ ! the point where the process is left in suspended animation, i.e. current
+ ! fp here, not the saved one.
+ st.q r0, (16*8), r16
+
+ st.q r0, (24*8), r24
+ st.q r0, (25*8), r25
+ st.q r0, (26*8), r26
+ st.q r0, (27*8), r27
+ st.q r0, (28*8), r28
+ st.q r0, (29*8), r29
+ st.q r0, (30*8), r30
+ st.q r0, (31*8), r31
+ st.q r0, (32*8), r32
+ st.q r0, (33*8), r33
+ st.q r0, (34*8), r34
+ st.q r0, (35*8), r35
+
+ st.q r0, (44*8), r44
+ st.q r0, (45*8), r45
+ st.q r0, (46*8), r46
+ st.q r0, (47*8), r47
+ st.q r0, (48*8), r48
+ st.q r0, (49*8), r49
+ st.q r0, (50*8), r50
+ st.q r0, (51*8), r51
+ st.q r0, (52*8), r52
+ st.q r0, (53*8), r53
+ st.q r0, (54*8), r54
+ st.q r0, (55*8), r55
+ st.q r0, (56*8), r56
+ st.q r0, (57*8), r57
+ st.q r0, (58*8), r58
+ st.q r0, (59*8), r59
+
+ ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
+ ! Use a local label to avoid creating a symbol that will confuse the !
+ ! backtrace
+ pta .Lsave_pc, tr0
+
+ gettr tr5, r45
+ gettr tr6, r46
+ gettr tr7, r47
+ st.q r8, (5*8), r45
+ st.q r8, (6*8), r46
+ st.q r8, (7*8), r47
+
+ ! Now switch context
+ gettr tr0, r9
+ st.l r3, 0, r15 ! prev->thread.sp
+ st.l r3, 8, r1 ! prev->thread.kregs
+ st.l r3, 4, r9 ! prev->thread.pc
+ st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
+
+ ! Load PC for next task (init value or save_pc later)
+ ld.l r5, 4, r18 ! next->thread.pc
+ ! Switch stacks
+ ld.l r5, 0, r15 ! next->thread.sp
+ ptabs r18, tr0
+
+ ! Update current
+ ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
+ putcon r9, kcr0 ! current = next->thread_info
+
+ ! go to save_pc for a reschedule, or the initial thread.pc for a new process
+ blink tr0, r63
+
+ ! Restore (when we come back to a previously saved task)
+.Lsave_pc:
+ addi.l r15, 32, r0 ! r0 = next's regs
+ addi.l r0, (63*8), r8 ! r8 = next's tr_regs
+
+ ld.q r8, (5*8), r45
+ ld.q r8, (6*8), r46
+ ld.q r8, (7*8), r47
+ ptabs r45, tr5
+ ptabs r46, tr6
+ ptabs r47, tr7
+
+ ld.q r0, ( 9*8), r9
+ ld.q r0, (10*8), r10
+ ld.q r0, (11*8), r11
+ ld.q r0, (12*8), r12
+ ld.q r0, (13*8), r13
+ ld.q r0, (14*8), r14
+ ld.q r0, (16*8), r16
+
+ ld.q r0, (24*8), r24
+ ld.q r0, (25*8), r25
+ ld.q r0, (26*8), r26
+ ld.q r0, (27*8), r27
+ ld.q r0, (28*8), r28
+ ld.q r0, (29*8), r29
+ ld.q r0, (30*8), r30
+ ld.q r0, (31*8), r31
+ ld.q r0, (32*8), r32
+ ld.q r0, (33*8), r33
+ ld.q r0, (34*8), r34
+ ld.q r0, (35*8), r35
+
+ ld.q r0, (44*8), r44
+ ld.q r0, (45*8), r45
+ ld.q r0, (46*8), r46
+ ld.q r0, (47*8), r47
+ ld.q r0, (48*8), r48
+ ld.q r0, (49*8), r49
+ ld.q r0, (50*8), r50
+ ld.q r0, (51*8), r51
+ ld.q r0, (52*8), r52
+ ld.q r0, (53*8), r53
+ ld.q r0, (54*8), r54
+ ld.q r0, (55*8), r55
+ ld.q r0, (56*8), r56
+ ld.q r0, (57*8), r57
+ ld.q r0, (58*8), r58
+ ld.q r0, (59*8), r59
+
+ ! epilogue
+ ld.l r15, 0, r18
+ ld.l r15, 4, r14
+ ptabs r18, tr0
+ movi FRAME_SIZE, r0
+ add r15, r0, r15
+ blink tr0, r63
+__sh64_switch_to_end:
+.LFE1:
+ .size sh64_switch_to,.LFE1-sh64_switch_to
+
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
new file mode 100644
index 00000000..b205b25e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -0,0 +1,326 @@
+/*
+ * arch/sh/kernel/cpu/sh5/unwind.c
+ *
+ * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+static u8 regcache[63];
+
+/*
+ * Finding the previous stack frame isn't horribly straightforward as it is
+ * on some other platforms. In the sh64 case, we don't have "linked" stack
+ * frames, so we need to do a bit of work to determine the previous frame,
+ * and in turn, the previous r14/r18 pair.
+ *
+ * There are generally a few cases which determine where we can find out
+ * the r14/r18 values. In the general case, this can be determined by poking
+ * around the prologue of the symbol PC is in (note that we absolutely must
+ * have frame pointer support as well as the kernel symbol table mapped,
+ * otherwise we can't even get this far).
+ *
+ * In other cases, such as the interrupt/exception path, we can poke around
+ * the sp/fp.
+ *
+ * Notably, this entire approach is somewhat error prone, and in the event
+ * that the previous frame cannot be determined, that's all we can do.
+ * Either way, this still leaves us with a more correct backtrace then what
+ * we would be able to come up with by walking the stack (which is garbage
+ * for anything beyond the first frame).
+ * -- PFM.
+ */
+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+ unsigned long *pprev_fp, unsigned long *pprev_pc,
+ struct pt_regs *regs)
+{
+ const char *sym;
+ char namebuf[128];
+ unsigned long offset;
+ unsigned long prologue = 0;
+ unsigned long fp_displacement = 0;
+ unsigned long fp_prev = 0;
+ unsigned long offset_r14 = 0, offset_r18 = 0;
+ int i, found_prologue_end = 0;
+
+ sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
+ if (!sym)
+ return -EINVAL;
+
+ prologue = pc - offset;
+ if (!prologue)
+ return -EINVAL;
+
+ /* Validate fp, to avoid risk of dereferencing a bad pointer later.
+ Assume 128Mb since that's the amount of RAM on a Cayman. Modify
+ when there is an SH-5 board with more. */
+ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
+ (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
+ ((fp & 7) != 0)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Depth to walk, depth is completely arbitrary.
+ */
+ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
+ unsigned long op;
+ u8 major, minor;
+ u8 src, dest, disp;
+
+ op = *(unsigned long *)prologue;
+
+ major = (op >> 26) & 0x3f;
+ src = (op >> 20) & 0x3f;
+ minor = (op >> 16) & 0xf;
+ disp = (op >> 10) & 0x3f;
+ dest = (op >> 4) & 0x3f;
+
+ /*
+ * Stack frame creation happens in a number of ways.. in the
+ * general case when the stack frame is less than 511 bytes,
+ * it's generally created by an addi or addi.l:
+ *
+ * addi/addi.l r15, -FRAME_SIZE, r15
+ *
+ * in the event that the frame size is bigger than this, it's
+ * typically created using a movi/sub pair as follows:
+ *
+ * movi FRAME_SIZE, rX
+ * sub r15, rX, r15
+ */
+
+ switch (major) {
+ case (0x00 >> 2):
+ switch (minor) {
+ case 0x8: /* add.l */
+ case 0x9: /* add */
+ /* Look for r15, r63, r14 */
+ if (src == 15 && disp == 63 && dest == 14)
+ found_prologue_end = 1;
+
+ break;
+ case 0xa: /* sub.l */
+ case 0xb: /* sub */
+ if (src != 15 || dest != 15)
+ continue;
+
+ fp_displacement -= regcache[disp];
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+ break;
+ case (0xa8 >> 2): /* st.l */
+ if (src != 15)
+ continue;
+
+ switch (dest) {
+ case 14:
+ if (offset_r14 || fp_displacement == 0)
+ continue;
+
+ offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r14 *= sizeof(unsigned long);
+ offset_r14 += fp_displacement;
+ break;
+ case 18:
+ if (offset_r18 || fp_displacement == 0)
+ continue;
+
+ offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ offset_r18 *= sizeof(unsigned long);
+ offset_r18 += fp_displacement;
+ break;
+ }
+
+ break;
+ case (0xcc >> 2): /* movi */
+ if (dest >= 63) {
+ printk(KERN_NOTICE "%s: Invalid dest reg %d "
+ "specified in movi handler. Failed "
+ "opcode was 0x%lx: ", __func__,
+ dest, op);
+
+ continue;
+ }
+
+ /* Sign extend */
+ regcache[dest] =
+ ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
+ break;
+ case (0xd0 >> 2): /* addi */
+ case (0xd4 >> 2): /* addi.l */
+ /* Look for r15, -FRAME_SIZE, r15 */
+ if (src != 15 || dest != 15)
+ continue;
+
+ /* Sign extended frame size.. */
+ fp_displacement +=
+ (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
+ fp_prev = fp - fp_displacement;
+ break;
+ }
+
+ if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
+ break;
+ }
+
+ if (offset_r14 == 0 || fp_prev == 0) {
+ if (!offset_r14)
+ pr_debug("Unable to find r14 offset\n");
+ if (!fp_prev)
+ pr_debug("Unable to find previous fp\n");
+
+ return -EINVAL;
+ }
+
+ /* For innermost leaf function, there might not be a offset_r18 */
+ if (!*pprev_pc && (offset_r18 == 0))
+ return -EINVAL;
+
+ *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
+
+ if (offset_r18)
+ *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
+
+ *pprev_pc &= ~1;
+
+ return 0;
+}
+
+/* Don't put this on the stack since we'll want to call sh64_unwind
+ * when we're close to underflowing the stack anyway. */
+static struct pt_regs here_regs;
+
+extern const char syscall_ret;
+extern const char ret_from_syscall;
+extern const char ret_from_exception;
+extern const char ret_from_irq;
+
+static void sh64_unwind_inner(struct pt_regs *regs);
+
+static void unwind_nested (unsigned long pc, unsigned long fp)
+{
+ if ((fp >= __MEMORY_START) &&
+ ((fp & 7) == 0)) {
+ sh64_unwind_inner((struct pt_regs *) fp);
+ }
+}
+
+static void sh64_unwind_inner(struct pt_regs *regs)
+{
+ unsigned long pc, fp;
+ int ofs = 0;
+ int first_pass;
+
+ pc = regs->pc & ~1;
+ fp = regs->regs[14];
+
+ first_pass = 1;
+ for (;;) {
+ int cond;
+ unsigned long next_fp, next_pc;
+
+ if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ printk("SYSCALL\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ printk("SYSCALL (PREEMPTED)\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ /* In this case, the PC is discovered by lookup_prev_stack_frame but
+ it has 4 taken off it to look like the 'caller' */
+ if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ printk("EXCEPTION\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ printk("IRQ\n");
+ unwind_nested(pc,fp);
+ return;
+ }
+
+ cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
+ ((pc & 3) == 0) && ((fp & 7) == 0));
+
+ pc -= ofs;
+
+ printk("[<%08lx>] ", pc);
+ print_symbol("%s\n", pc);
+
+ if (first_pass) {
+ /* If the innermost frame is a leaf function, it's
+ * possible that r18 is never saved out to the stack.
+ */
+ next_pc = regs->regs[18];
+ } else {
+ next_pc = 0;
+ }
+
+ if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
+ ofs = sizeof(unsigned long);
+ pc = next_pc & ~1;
+ fp = next_fp;
+ } else {
+ printk("Unable to lookup previous stack frame\n");
+ break;
+ }
+ first_pass = 0;
+ }
+
+ printk("\n");
+
+}
+
+void sh64_unwind(struct pt_regs *regs)
+{
+ if (!regs) {
+ /*
+ * Fetch current regs if we have no other saved state to back
+ * trace from.
+ */
+ regs = &here_regs;
+
+ __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
+ __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
+ __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
+
+ __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
+ __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
+ __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
+ __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
+ __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
+ __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
+ __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
+ __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
+
+ __asm__ __volatile__ (
+ "pta 0f, tr0\n\t"
+ "blink tr0, %0\n\t"
+ "0: nop"
+ : "=r" (regs->pc)
+ );
+ }
+
+ printk("\nCall Trace:\n");
+ sh64_unwind_inner(regs);
+}
+
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
new file mode 100644
index 00000000..e8a5111e
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-Mobile backends.
+#
+
+# Power Management & Sleep mode
+obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 00000000..1ddc876d
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,120 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <linux/export.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+
+static unsigned long cpuidle_mode[] = {
+ SUSP_SH_SLEEP, /* regular sleep mode */
+ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long allowed_mode = SUSP_SH_SLEEP;
+ int requested_state = index;
+ int allowed_state;
+ int k;
+
+ /* convert allowed mode to allowed state */
+ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+ if (cpuidle_mode[k] == allowed_mode)
+ break;
+
+ allowed_state = k;
+
+ /* take the following into account for sleep mode selection:
+ * - allowed_state: best mode allowed by hardware (clock deps)
+ * - requested_state: best mode allowed by software (latencies)
+ */
+ k = min_t(int, allowed_state, requested_state);
+
+ sh_mobile_call_standby(cpuidle_mode[k]);
+
+ return k;
+}
+
+static struct cpuidle_device cpuidle_dev;
+static struct cpuidle_driver cpuidle_driver = {
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+};
+
+void sh_mobile_setup_cpuidle(void)
+{
+ struct cpuidle_device *dev = &cpuidle_dev;
+ struct cpuidle_driver *drv = &cpuidle_driver;
+ struct cpuidle_state *state;
+ int i;
+
+
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ drv->states[i].name[0] = '\0';
+ drv->states[i].desc[0] = '\0';
+ }
+
+ i = CPUIDLE_DRIVER_STATE_START;
+
+ state = &drv->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
+ state->exit_latency = 1;
+ state->target_residency = 1 * 2;
+ state->power_usage = 3;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ drv->safe_state_index = i-1;
+
+ if (sh_mobile_sleep_supported & SUSP_SH_SF) {
+ state = &drv->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
+ strncpy(state->desc, "SuperH Sleep Mode [SF]",
+ CPUIDLE_DESC_LEN);
+ state->exit_latency = 100;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+ }
+
+ if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
+ state = &drv->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
+ strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
+ CPUIDLE_DESC_LEN);
+ state->exit_latency = 2300;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+ }
+
+ drv->state_count = i;
+ dev->state_count = i;
+
+ cpuidle_register_driver(&cpuidle_driver);
+
+ cpuidle_register_device(dev);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
new file mode 100644
index 00000000..08d27fac
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -0,0 +1,157 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/pm.c
+ *
+ * Power management support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/bl_bit.h>
+
+/*
+ * Notifier lists for pre/post sleep notification
+ */
+ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
+ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
+
+/*
+ * Sleep modes available on SuperH Mobile:
+ *
+ * Sleep mode is just plain "sleep" instruction
+ * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh
+ * Standby Self-Refresh mode is above plus stopped clocks
+ */
+#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
+#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
+#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
+#define SUSP_MODE_RSTANDBY_SF \
+ (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
+ /*
+ * U-standby mode is unsupported since it needs bootloader hacks
+ */
+
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+#define RAM_BASE 0xfd800000 /* RSMEM */
+#else
+#define RAM_BASE 0xe5200000 /* ILRAM */
+#endif
+
+void sh_mobile_call_standby(unsigned long mode)
+{
+ void *onchip_mem = (void *)RAM_BASE;
+ struct sh_sleep_data *sdp = onchip_mem;
+ void (*standby_onchip_mem)(unsigned long, unsigned long);
+
+ /* code located directly after data structure */
+ standby_onchip_mem = (void *)(sdp + 1);
+
+ atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
+ mode, NULL);
+
+ /* flush the caches if MMU flag is set */
+ if (mode & SUSP_SH_MMU)
+ flush_cache_all();
+
+ /* Let assembly snippet in on-chip memory handle the rest */
+ standby_onchip_mem(mode, RAM_BASE);
+
+ atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
+ mode, NULL);
+}
+
+extern char sh_mobile_sleep_enter_start;
+extern char sh_mobile_sleep_enter_end;
+
+extern char sh_mobile_sleep_resume_start;
+extern char sh_mobile_sleep_resume_end;
+
+unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
+
+void sh_mobile_register_self_refresh(unsigned long flags,
+ void *pre_start, void *pre_end,
+ void *post_start, void *post_end)
+{
+ void *onchip_mem = (void *)RAM_BASE;
+ void *vp;
+ struct sh_sleep_data *sdp;
+ int n;
+
+ /* part 0: data area */
+ sdp = onchip_mem;
+ sdp->addr.stbcr = 0xa4150020; /* STBCR */
+ sdp->addr.bar = 0xa4150040; /* BAR */
+ sdp->addr.pteh = 0xff000000; /* PTEH */
+ sdp->addr.ptel = 0xff000004; /* PTEL */
+ sdp->addr.ttb = 0xff000008; /* TTB */
+ sdp->addr.tea = 0xff00000c; /* TEA */
+ sdp->addr.mmucr = 0xff000010; /* MMUCR */
+ sdp->addr.ptea = 0xff000034; /* PTEA */
+ sdp->addr.pascr = 0xff000070; /* PASCR */
+ sdp->addr.irmcr = 0xff000078; /* IRMCR */
+ sdp->addr.ccr = 0xff00001c; /* CCR */
+ sdp->addr.ramcr = 0xff000074; /* RAMCR */
+ vp = sdp + 1;
+
+ /* part 1: common code to enter sleep mode */
+ n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
+ memcpy(vp, &sh_mobile_sleep_enter_start, n);
+ vp += roundup(n, 4);
+
+ /* part 2: board specific code to enter self-refresh mode */
+ n = pre_end - pre_start;
+ memcpy(vp, pre_start, n);
+ sdp->sf_pre = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 3: board specific code to resume from self-refresh mode */
+ n = post_end - post_start;
+ memcpy(vp, post_start, n);
+ sdp->sf_post = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 4: common code to resume from sleep mode */
+ WARN_ON(vp > (onchip_mem + 0x600));
+ vp = onchip_mem + 0x600; /* located at interrupt vector */
+ n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
+ memcpy(vp, &sh_mobile_sleep_resume_start, n);
+ sdp->resume = (unsigned long)vp;
+
+ sh_mobile_sleep_supported |= flags;
+}
+
+static int sh_pm_enter(suspend_state_t state)
+{
+ if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
+ return -ENXIO;
+
+ local_irq_disable();
+ set_bl_bit();
+ sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
+ local_irq_disable();
+ clear_bl_bit();
+ return 0;
+}
+
+static const struct platform_suspend_ops sh_pm_ops = {
+ .enter = sh_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init sh_pm_init(void)
+{
+ suspend_set_ops(&sh_pm_ops);
+ sh_mobile_setup_cpuidle();
+ return 0;
+}
+
+late_initcall(sh_pm_init);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
new file mode 100644
index 00000000..e6aac65f
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -0,0 +1,405 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S
+ *
+ * Sleep mode and Standby modes support for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+
+/*
+ * Kernel mode register usage, see entry.S:
+ * k0 scratch
+ * k1 scratch
+ */
+#define k0 r0
+#define k1 r1
+
+/* manage self-refresh and enter standby mode. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(sh_mobile_sleep_enter_start)
+
+ /* save mode flags */
+ mov.l r4, @(SH_SLEEP_MODE, r5)
+
+ /* save original vbr */
+ stc vbr, r0
+ mov.l r0, @(SH_SLEEP_VBR, r5)
+
+ /* point vbr to our on-chip memory page */
+ ldc r5, vbr
+
+ /* save return address */
+ sts pr, r0
+ mov.l r0, @(SH_SLEEP_SPC, r5)
+
+ /* save sr */
+ stc sr, r0
+ mov.l r0, @(SH_SLEEP_SR, r5)
+
+ /* save general purpose registers to stack if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_regs_save
+
+ sts.l pr, @-r15
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ /* make sure bank0 is selected, save low registers */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+ bsr save_low_regs
+ nop
+
+ /* switch to bank 1, save low registers */
+ mov.l rb_bit, r10
+ bsr set_sr
+ mov #-1, r9
+
+ bsr save_low_regs
+ nop
+
+ /* switch back to bank 0 */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+skip_regs_save:
+
+ /* save sp, also set to internal ram */
+ mov.l r15, @(SH_SLEEP_SP, r5)
+ mov r5, r15
+
+ /* save stbcr */
+ bsr save_register
+ mov #SH_SLEEP_REG_STBCR, r0
+
+ /* save mmu and cache context if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_mmu_save_disable
+
+ /* save mmu state */
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PASCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+
+ /* invalidate TLBs and disable the MMU */
+ bsr get_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ mov #4, r1
+ mov.l r1, @r0
+ icbi @r0
+
+ /* save cache registers and disable caches */
+ bsr save_register
+ mov #SH_SLEEP_REG_CCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+
+ bsr get_register
+ mov #SH_SLEEP_REG_CCR, r0
+ mov #0, r1
+ mov.l r1, @r0
+ icbi @r0
+
+skip_mmu_save_disable:
+ /* call self-refresh entering code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_SF, r0
+ bt skip_set_sf
+
+ mov.l @(SH_SLEEP_SF_PRE, r5), r0
+ jsr @r0
+ nop
+
+skip_set_sf:
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_STANDBY, r0
+ bt test_rstandby
+
+ /* set mode to "software standby mode" */
+ bra do_sleep
+ mov #0x80, r1
+
+test_rstandby:
+ tst #SUSP_SH_RSTANDBY, r0
+ bt test_ustandby
+
+ /* setup BAR register */
+ bsr get_register
+ mov #SH_SLEEP_REG_BAR, r0
+ mov.l @(SH_SLEEP_RESUME, r5), r1
+ mov.l r1, @r0
+
+ /* set mode to "r-standby mode" */
+ bra do_sleep
+ mov #0x20, r1
+
+test_ustandby:
+ tst #SUSP_SH_USTANDBY, r0
+ bt force_sleep
+
+ /* set mode to "u-standby mode" */
+ bra do_sleep
+ mov #0x10, r1
+
+force_sleep:
+
+ /* set mode to "sleep mode" */
+ mov #0x00, r1
+
+do_sleep:
+ /* setup and enter selected standby mode */
+ bsr get_register
+ mov #SH_SLEEP_REG_STBCR, r0
+ mov.l r1, @r0
+again:
+ sleep
+ bra again
+ nop
+
+save_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_ADDR, r0
+ mov.l @r1, r1
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l r1, @(r0, r5)
+ add #-SH_SLEEP_BASE_DATA, r0
+ rts
+ nop
+
+get_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ rts
+ nop
+
+set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+save_low_regs:
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ rts
+ mov.l r0, @-r15
+
+ .balign 4
+rb_bit: .long 0x20000000 ! RB=1
+
+ENTRY(sh_mobile_sleep_enter_end)
+
+ .balign 4
+ENTRY(sh_mobile_sleep_resume_start)
+
+ /* figure out start address */
+ bsr 0f
+ nop
+0:
+ sts pr, k1
+ mov.l 1f, k0
+ and k0, k1
+
+ /* store pointer to data area in VBR */
+ ldc k1, vbr
+
+ /* setup sr with saved sr */
+ mov.l @(SH_SLEEP_SR, k1), k0
+ ldc k0, sr
+
+ /* now: user register set! */
+ stc vbr, r5
+
+ /* setup spc with return address to c code */
+ mov.l @(SH_SLEEP_SPC, r5), r0
+ ldc r0, spc
+
+ /* restore vbr */
+ mov.l @(SH_SLEEP_VBR, r5), r0
+ ldc r0, vbr
+
+ /* setup ssr with saved sr */
+ mov.l @(SH_SLEEP_SR, r5), r0
+ ldc r0, ssr
+
+ /* restore sp */
+ mov.l @(SH_SLEEP_SP, r5), r15
+
+ /* restore sleep mode register */
+ bsr restore_register
+ mov #SH_SLEEP_REG_STBCR, r0
+
+ /* call self-refresh resume code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_SF, r0
+ bt skip_restore_sf
+
+ mov.l @(SH_SLEEP_SF_POST, r5), r0
+ jsr @r0
+ nop
+
+skip_restore_sf:
+ /* restore mmu and cache state if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_restore_mmu
+
+ /* restore mmu state */
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PASCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ icbi @r0
+
+ /* restore cache settings */
+ bsr restore_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+ icbi @r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_CCR, r0
+ icbi @r0
+
+skip_restore_mmu:
+
+ /* restore general purpose registers if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_restore_regs
+
+ /* switch to bank 1, restore low registers */
+ mov.l _rb_bit, r10
+ bsr _set_sr
+ mov #-1, r9
+
+ bsr restore_low_regs
+ nop
+
+ /* switch to bank0, restore low registers */
+ mov.l _rb_bit, r9
+ not r9, r9
+ bsr _set_sr
+ mov #0, r10
+
+ bsr restore_low_regs
+ nop
+
+ /* restore the rest of the registers */
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ lds.l @r15+, pr
+
+skip_restore_regs:
+ rte
+ nop
+
+restore_register:
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_DATA, r0
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ mov.l r1, @r0
+ rts
+ nop
+
+_set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+restore_low_regs:
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ rts
+ mov.l @r15+, r7
+
+ .balign 4
+_rb_bit: .long 0x20000000 ! RB=1
+1: .long ~0x7ff
+ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
new file mode 100644
index 00000000..e68b45b6
--- /dev/null
+++ b/arch/sh/kernel/cpufreq.c
@@ -0,0 +1,201 @@
+/*
+ * arch/sh/kernel/cpufreq.c
+ *
+ * cpufreq driver for the SuperH processors.
+ *
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown
+ *
+ * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
+ *
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/clk.h>
+#include <linux/percpu.h>
+#include <linux/sh_clk.h>
+
+static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+
+static unsigned int sh_cpufreq_get(unsigned int cpu)
+{
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ struct device *dev;
+ long freq;
+
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ BUG_ON(smp_processor_id() != cpu);
+
+ dev = get_cpu_device(cpu);
+
+ /* Convert target_freq from kHz to Hz */
+ freq = clk_round_rate(cpuclk, target_freq * 1000);
+
+ if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+ dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.cpu = cpu;
+ freqs.old = sh_cpufreq_get(cpu);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+ clk_set_rate(cpuclk, freq);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_frequency_table *freq_table;
+ struct device *dev;
+
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ dev = get_cpu_device(cpu);
+
+ cpuclk = clk_get(dev, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ dev_err(dev, "couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu);
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table) {
+ int result;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!result)
+ cpufreq_frequency_table_get_attr(freq_table, cpu);
+ } else {
+ dev_notice(dev, "no frequency table found, falling back "
+ "to rate rounding.\n");
+
+ policy->cpuinfo.min_freq =
+ (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->cpuinfo.max_freq =
+ (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
+ "Maximum %u.%03u MHz.\n",
+ policy->min / 1000, policy->min % 1000,
+ policy->max / 1000, policy->max % 1000);
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+
+ cpufreq_frequency_table_put_attr(cpu);
+ clk_put(cpuclk);
+
+ return 0;
+}
+
+static struct freq_attr *sh_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver sh_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .name = "sh",
+ .get = sh_cpufreq_get,
+ .target = sh_cpufreq_target,
+ .verify = sh_cpufreq_verify,
+ .init = sh_cpufreq_cpu_init,
+ .exit = sh_cpufreq_cpu_exit,
+ .attr = sh_freq_attr,
+};
+
+static int __init sh_cpufreq_module_init(void)
+{
+ pr_notice("SuperH CPU frequency driver.\n");
+ return cpufreq_register_driver(&sh_cpufreq_driver);
+}
+
+static void __exit sh_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&sh_cpufreq_driver);
+}
+
+module_init(sh_cpufreq_module_init);
+module_exit(sh_cpufreq_module_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("cpufreq driver for SuperH");
+MODULE_LICENSE("GPL");
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
new file mode 100644
index 00000000..569e7b17
--- /dev/null
+++ b/arch/sh/kernel/crash_dump.c
@@ -0,0 +1,45 @@
+/*
+ * crash_dump.c - Memory preserving reboot related code.
+ *
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ * Copyright (C) IBM Corporation, 2004. All rights reserved
+ */
+#include <linux/errno.h>
+#include <linux/crash_dump.h>
+#include <linux/io.h>
+#include <asm/uaccess.h>
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+
+ if (userbuf) {
+ if (copy_to_user(buf, (vaddr + offset), csize)) {
+ iounmap(vaddr);
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf, (vaddr + offset), csize);
+
+ iounmap(vaddr);
+ return csize;
+}
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
new file mode 100644
index 00000000..7a1b46fe
--- /dev/null
+++ b/arch/sh/kernel/debugtraps.S
@@ -0,0 +1,41 @@
+/*
+ * arch/sh/kernel/debugtraps.S
+ *
+ * Debug trap jump tables for SuperH
+ *
+ * Copyright (C) 2006 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#if !defined(CONFIG_KGDB)
+#define singlestep_trap_handler debug_trap_handler
+#endif
+
+#if !defined(CONFIG_SH_STANDARD_BIOS)
+#define sh_bios_handler debug_trap_handler
+#endif
+
+ .data
+
+ENTRY(debug_trap_table)
+ .long debug_trap_handler /* 0x30 */
+ .long debug_trap_handler /* 0x31 */
+ .long debug_trap_handler /* 0x32 */
+ .long debug_trap_handler /* 0x33 */
+ .long debug_trap_handler /* 0x34 */
+ .long debug_trap_handler /* 0x35 */
+ .long debug_trap_handler /* 0x36 */
+ .long debug_trap_handler /* 0x37 */
+ .long debug_trap_handler /* 0x38 */
+ .long debug_trap_handler /* 0x39 */
+ .long debug_trap_handler /* 0x3a */
+ .long debug_trap_handler /* 0x3b */
+ .long breakpoint_trap_handler /* 0x3c */
+ .long singlestep_trap_handler /* 0x3d */
+ .long bug_trap_handler /* 0x3e */
+ .long sh_bios_handler /* 0x3f */
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
new file mode 100644
index 00000000..64d5d8dd
--- /dev/null
+++ b/arch/sh/kernel/disassemble.c
@@ -0,0 +1,573 @@
+/*
+ * Disassemble SuperH instructions.
+ *
+ * Copyright (C) 1999 kaz Kojima
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+/*
+ * Format of an instruction in memory.
+ */
+typedef enum {
+ HEX_0, HEX_1, HEX_2, HEX_3, HEX_4, HEX_5, HEX_6, HEX_7,
+ HEX_8, HEX_9, HEX_A, HEX_B, HEX_C, HEX_D, HEX_E, HEX_F,
+ REG_N, REG_M, REG_NM, REG_B,
+ BRANCH_12, BRANCH_8,
+ DISP_8, DISP_4,
+ IMM_4, IMM_4BY2, IMM_4BY4, PCRELIMM_8BY2, PCRELIMM_8BY4,
+ IMM_8, IMM_8BY2, IMM_8BY4,
+} sh_nibble_type;
+
+typedef enum {
+ A_END, A_BDISP12, A_BDISP8,
+ A_DEC_M, A_DEC_N,
+ A_DISP_GBR, A_DISP_PC, A_DISP_REG_M, A_DISP_REG_N,
+ A_GBR,
+ A_IMM,
+ A_INC_M, A_INC_N,
+ A_IND_M, A_IND_N, A_IND_R0_REG_M, A_IND_R0_REG_N,
+ A_MACH, A_MACL,
+ A_PR, A_R0, A_R0_GBR, A_REG_M, A_REG_N, A_REG_B,
+ A_SR, A_VBR, A_SSR, A_SPC, A_SGR, A_DBR,
+ F_REG_N, F_REG_M, D_REG_N, D_REG_M,
+ X_REG_N, /* Only used for argument parsing */
+ X_REG_M, /* Only used for argument parsing */
+ DX_REG_N, DX_REG_M, V_REG_N, V_REG_M,
+ FD_REG_N,
+ XMTRX_M4,
+ F_FR0,
+ FPUL_N, FPUL_M, FPSCR_N, FPSCR_M,
+} sh_arg_type;
+
+static struct sh_opcode_info {
+ char *name;
+ sh_arg_type arg[7];
+ sh_nibble_type nibbles[4];
+} sh_table[] = {
+ {"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM_8}},
+ {"add",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_C}},
+ {"addc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_E}},
+ {"addv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_F}},
+ {"and",{A_IMM,A_R0},{HEX_C,HEX_9,IMM_8}},
+ {"and",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_9}},
+ {"and.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_D,IMM_8}},
+ {"bra",{A_BDISP12},{HEX_A,BRANCH_12}},
+ {"bsr",{A_BDISP12},{HEX_B,BRANCH_12}},
+ {"bt",{A_BDISP8},{HEX_8,HEX_9,BRANCH_8}},
+ {"bf",{A_BDISP8},{HEX_8,HEX_B,BRANCH_8}},
+ {"bt.s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
+ {"bt/s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
+ {"bf.s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
+ {"bf/s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
+ {"clrmac",{0},{HEX_0,HEX_0,HEX_2,HEX_8}},
+ {"clrs",{0},{HEX_0,HEX_0,HEX_4,HEX_8}},
+ {"clrt",{0},{HEX_0,HEX_0,HEX_0,HEX_8}},
+ {"cmp/eq",{A_IMM,A_R0},{HEX_8,HEX_8,IMM_8}},
+ {"cmp/eq",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_0}},
+ {"cmp/ge",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_3}},
+ {"cmp/gt",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_7}},
+ {"cmp/hi",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_6}},
+ {"cmp/hs",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_2}},
+ {"cmp/pl",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_5}},
+ {"cmp/pz",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_1}},
+ {"cmp/str",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_C}},
+ {"div0s",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_7}},
+ {"div0u",{0},{HEX_0,HEX_0,HEX_1,HEX_9}},
+ {"div1",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_4}},
+ {"exts.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_E}},
+ {"exts.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_F}},
+ {"extu.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_C}},
+ {"extu.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_D}},
+ {"jmp",{A_IND_N},{HEX_4,REG_N,HEX_2,HEX_B}},
+ {"jsr",{A_IND_N},{HEX_4,REG_N,HEX_0,HEX_B}},
+ {"ldc",{A_REG_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_E}},
+ {"ldc",{A_REG_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_E}},
+ {"ldc",{A_REG_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_E}},
+ {"ldc",{A_REG_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_E}},
+ {"ldc",{A_REG_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_E}},
+ {"ldc",{A_REG_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_E}},
+ {"ldc",{A_REG_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_E}},
+ {"ldc.l",{A_INC_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_7}},
+ {"ldc.l",{A_INC_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_7}},
+ {"ldc.l",{A_INC_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_7}},
+ {"ldc.l",{A_INC_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_7}},
+ {"ldc.l",{A_INC_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_7}},
+ {"ldc.l",{A_INC_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_7}},
+ {"ldc.l",{A_INC_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_7}},
+ {"lds",{A_REG_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_A}},
+ {"lds",{A_REG_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_A}},
+ {"lds",{A_REG_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_A}},
+ {"lds",{A_REG_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_A}},
+ {"lds",{A_REG_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_A}},
+ {"lds.l",{A_INC_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_6}},
+ {"lds.l",{A_INC_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_6}},
+ {"lds.l",{A_INC_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_6}},
+ {"lds.l",{A_INC_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_6}},
+ {"lds.l",{A_INC_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_6}},
+ {"ldtlb",{0},{HEX_0,HEX_0,HEX_3,HEX_8}},
+ {"mac.w",{A_INC_M,A_INC_N},{HEX_4,REG_N,REG_M,HEX_F}},
+ {"mov",{A_IMM,A_REG_N},{HEX_E,REG_N,IMM_8}},
+ {"mov",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_3}},
+ {"mov.b",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_4}},
+ {"mov.b",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_4}},
+ {"mov.b",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_0}},
+ {"mov.b",{A_DISP_REG_M,A_R0},{HEX_8,HEX_4,REG_M,IMM_4}},
+ {"mov.b",{A_DISP_GBR,A_R0},{HEX_C,HEX_4,IMM_8}},
+ {"mov.b",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_C}},
+ {"mov.b",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_4}},
+ {"mov.b",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_0}},
+ {"mov.b",{A_R0,A_DISP_REG_M},{HEX_8,HEX_0,REG_M,IMM_4}},
+ {"mov.b",{A_R0,A_DISP_GBR},{HEX_C,HEX_0,IMM_8}},
+ {"mov.l",{ A_REG_M,A_DISP_REG_N},{HEX_1,REG_N,REG_M,IMM_4BY4}},
+ {"mov.l",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_6}},
+ {"mov.l",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_6}},
+ {"mov.l",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_2}},
+ {"mov.l",{A_DISP_REG_M,A_REG_N},{HEX_5,REG_N,REG_M,IMM_4BY4}},
+ {"mov.l",{A_DISP_GBR,A_R0},{HEX_C,HEX_6,IMM_8BY4}},
+ {"mov.l",{A_DISP_PC,A_REG_N},{HEX_D,REG_N,PCRELIMM_8BY4}},
+ {"mov.l",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_E}},
+ {"mov.l",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_6}},
+ {"mov.l",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_2}},
+ {"mov.l",{A_R0,A_DISP_GBR},{HEX_C,HEX_2,IMM_8BY4}},
+ {"mov.w",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_5}},
+ {"mov.w",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_5}},
+ {"mov.w",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_1}},
+ {"mov.w",{A_DISP_REG_M,A_R0},{HEX_8,HEX_5,REG_M,IMM_4BY2}},
+ {"mov.w",{A_DISP_GBR,A_R0},{HEX_C,HEX_5,IMM_8BY2}},
+ {"mov.w",{A_DISP_PC,A_REG_N},{HEX_9,REG_N,PCRELIMM_8BY2}},
+ {"mov.w",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_D}},
+ {"mov.w",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_5}},
+ {"mov.w",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_1}},
+ {"mov.w",{A_R0,A_DISP_REG_M},{HEX_8,HEX_1,REG_M,IMM_4BY2}},
+ {"mov.w",{A_R0,A_DISP_GBR},{HEX_C,HEX_1,IMM_8BY2}},
+ {"mova",{A_DISP_PC,A_R0},{HEX_C,HEX_7,PCRELIMM_8BY4}},
+ {"movca.l",{A_R0,A_IND_N},{HEX_0,REG_N,HEX_C,HEX_3}},
+ {"movt",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_9}},
+ {"muls",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_F}},
+ {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
+ {"mulu",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_E}},
+ {"neg",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_B}},
+ {"negc",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_A}},
+ {"nop",{0},{HEX_0,HEX_0,HEX_0,HEX_9}},
+ {"not",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_7}},
+ {"ocbi",{A_IND_N},{HEX_0,REG_N,HEX_9,HEX_3}},
+ {"ocbp",{A_IND_N},{HEX_0,REG_N,HEX_A,HEX_3}},
+ {"ocbwb",{A_IND_N},{HEX_0,REG_N,HEX_B,HEX_3}},
+ {"or",{A_IMM,A_R0},{HEX_C,HEX_B,IMM_8}},
+ {"or",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_B}},
+ {"or.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_F,IMM_8}},
+ {"pref",{A_IND_N},{HEX_0,REG_N,HEX_8,HEX_3}},
+ {"rotcl",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_4}},
+ {"rotcr",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_5}},
+ {"rotl",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_4}},
+ {"rotr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_5}},
+ {"rte",{0},{HEX_0,HEX_0,HEX_2,HEX_B}},
+ {"rts",{0},{HEX_0,HEX_0,HEX_0,HEX_B}},
+ {"sets",{0},{HEX_0,HEX_0,HEX_5,HEX_8}},
+ {"sett",{0},{HEX_0,HEX_0,HEX_1,HEX_8}},
+ {"shad",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_C}},
+ {"shld",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_D}},
+ {"shal",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_0}},
+ {"shar",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_1}},
+ {"shll",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_0}},
+ {"shll16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_8}},
+ {"shll2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_8}},
+ {"shll8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_8}},
+ {"shlr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_1}},
+ {"shlr16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_9}},
+ {"shlr2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_9}},
+ {"shlr8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_9}},
+ {"sleep",{0},{HEX_0,HEX_0,HEX_1,HEX_B}},
+ {"stc",{A_SR,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_2}},
+ {"stc",{A_GBR,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_2}},
+ {"stc",{A_VBR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_2}},
+ {"stc",{A_SSR,A_REG_N},{HEX_0,REG_N,HEX_3,HEX_2}},
+ {"stc",{A_SPC,A_REG_N},{HEX_0,REG_N,HEX_4,HEX_2}},
+ {"stc",{A_SGR,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_2}},
+ {"stc",{A_DBR,A_REG_N},{HEX_0,REG_N,HEX_7,HEX_2}},
+ {"stc",{A_REG_B,A_REG_N},{HEX_0,REG_N,REG_B,HEX_2}},
+ {"stc.l",{A_SR,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_3}},
+ {"stc.l",{A_GBR,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_3}},
+ {"stc.l",{A_VBR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_3}},
+ {"stc.l",{A_SSR,A_DEC_N},{HEX_4,REG_N,HEX_3,HEX_3}},
+ {"stc.l",{A_SPC,A_DEC_N},{HEX_4,REG_N,HEX_4,HEX_3}},
+ {"stc.l",{A_SGR,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_3}},
+ {"stc.l",{A_DBR,A_DEC_N},{HEX_4,REG_N,HEX_7,HEX_3}},
+ {"stc.l",{A_REG_B,A_DEC_N},{HEX_4,REG_N,REG_B,HEX_3}},
+ {"sts",{A_MACH,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_A}},
+ {"sts",{A_MACL,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_A}},
+ {"sts",{A_PR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_A}},
+ {"sts",{FPUL_M,A_REG_N},{HEX_0,REG_N,HEX_5,HEX_A}},
+ {"sts",{FPSCR_M,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_A}},
+ {"sts.l",{A_MACH,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_2}},
+ {"sts.l",{A_MACL,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_2}},
+ {"sts.l",{A_PR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_2}},
+ {"sts.l",{FPUL_M,A_DEC_N},{HEX_4,REG_N,HEX_5,HEX_2}},
+ {"sts.l",{FPSCR_M,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_2}},
+ {"sub",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_8}},
+ {"subc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_A}},
+ {"subv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_B}},
+ {"swap.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_8}},
+ {"swap.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_9}},
+ {"tas.b",{A_IND_N},{HEX_4,REG_N,HEX_1,HEX_B}},
+ {"trapa",{A_IMM},{HEX_C,HEX_3,IMM_8}},
+ {"tst",{A_IMM,A_R0},{HEX_C,HEX_8,IMM_8}},
+ {"tst",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_8}},
+ {"tst.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_C,IMM_8}},
+ {"xor",{A_IMM,A_R0},{HEX_C,HEX_A,IMM_8}},
+ {"xor",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_A}},
+ {"xor.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_E,IMM_8}},
+ {"xtrct",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_D}},
+ {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
+ {"dt",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_0}},
+ {"dmuls.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_D}},
+ {"dmulu.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_5}},
+ {"mac.l",{A_INC_M,A_INC_N},{HEX_0,REG_N,REG_M,HEX_F}},
+ {"braf",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_3}},
+ {"bsrf",{A_REG_N},{HEX_0,REG_N,HEX_0,HEX_3}},
+ {"fabs",{FD_REG_N},{HEX_F,REG_N,HEX_5,HEX_D}},
+ {"fadd",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
+ {"fadd",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
+ {"fcmp/eq",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
+ {"fcmp/eq",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
+ {"fcmp/gt",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
+ {"fcmp/gt",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
+ {"fcnvds",{D_REG_N,FPUL_M},{HEX_F,REG_N,HEX_B,HEX_D}},
+ {"fcnvsd",{FPUL_M,D_REG_N},{HEX_F,REG_N,HEX_A,HEX_D}},
+ {"fdiv",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
+ {"fdiv",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
+ {"fipr",{V_REG_M,V_REG_N},{HEX_F,REG_NM,HEX_E,HEX_D}},
+ {"fldi0",{F_REG_N},{HEX_F,REG_N,HEX_8,HEX_D}},
+ {"fldi1",{F_REG_N},{HEX_F,REG_N,HEX_9,HEX_D}},
+ {"flds",{F_REG_N,FPUL_M},{HEX_F,REG_N,HEX_1,HEX_D}},
+ {"float",{FPUL_M,FD_REG_N},{HEX_F,REG_N,HEX_2,HEX_D}},
+ {"fmac",{F_FR0,F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_E}},
+ {"fmov",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
+ {"fmov",{DX_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
+ {"fmov",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov.d",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov.d",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov.d",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov.d",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov.d",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov.d",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov.s",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov.s",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov.s",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov.s",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov.s",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov.s",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmul",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
+ {"fmul",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
+ {"fneg",{FD_REG_N},{HEX_F,REG_N,HEX_4,HEX_D}},
+ {"frchg",{0},{HEX_F,HEX_B,HEX_F,HEX_D}},
+ {"fschg",{0},{HEX_F,HEX_3,HEX_F,HEX_D}},
+ {"fsqrt",{FD_REG_N},{HEX_F,REG_N,HEX_6,HEX_D}},
+ {"fsts",{FPUL_M,F_REG_N},{HEX_F,REG_N,HEX_0,HEX_D}},
+ {"fsub",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
+ {"fsub",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
+ {"ftrc",{FD_REG_N,FPUL_M},{HEX_F,REG_N,HEX_3,HEX_D}},
+ {"ftrv",{XMTRX_M4,V_REG_N},{HEX_F,REG_NM,HEX_F,HEX_D}},
+ { 0 },
+};
+
+static void print_sh_insn(u32 memaddr, u16 insn)
+{
+ int relmask = ~0;
+ int nibs[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf};
+ int lastsp;
+ struct sh_opcode_info *op = sh_table;
+
+ for (; op->name; op++) {
+ int n;
+ int imm = 0;
+ int rn = 0;
+ int rm = 0;
+ int rb = 0;
+ int disp_pc;
+ int disp_pc_addr = 0;
+
+ for (n = 0; n < 4; n++) {
+ int i = op->nibbles[n];
+
+ if (i < 16) {
+ if (nibs[n] == i)
+ continue;
+ goto fail;
+ }
+ switch (i) {
+ case BRANCH_8:
+ imm = (nibs[2] << 4) | (nibs[3]);
+ if (imm & 0x80)
+ imm |= ~0xff;
+ imm = ((char)imm) * 2 + 4 ;
+ goto ok;
+ case BRANCH_12:
+ imm = ((nibs[1]) << 8) | (nibs[2] << 4) | (nibs[3]);
+ if (imm & 0x800)
+ imm |= ~0xfff;
+ imm = imm * 2 + 4;
+ goto ok;
+ case IMM_4:
+ imm = nibs[3];
+ goto ok;
+ case IMM_4BY2:
+ imm = nibs[3] <<1;
+ goto ok;
+ case IMM_4BY4:
+ imm = nibs[3] <<2;
+ goto ok;
+ case IMM_8:
+ imm = (nibs[2] << 4) | nibs[3];
+ goto ok;
+ case PCRELIMM_8BY2:
+ imm = ((nibs[2] << 4) | nibs[3]) <<1;
+ relmask = ~1;
+ goto ok;
+ case PCRELIMM_8BY4:
+ imm = ((nibs[2] << 4) | nibs[3]) <<2;
+ relmask = ~3;
+ goto ok;
+ case IMM_8BY2:
+ imm = ((nibs[2] << 4) | nibs[3]) <<1;
+ goto ok;
+ case IMM_8BY4:
+ imm = ((nibs[2] << 4) | nibs[3]) <<2;
+ goto ok;
+ case DISP_8:
+ imm = (nibs[2] << 4) | (nibs[3]);
+ goto ok;
+ case DISP_4:
+ imm = nibs[3];
+ goto ok;
+ case REG_N:
+ rn = nibs[n];
+ break;
+ case REG_M:
+ rm = nibs[n];
+ break;
+ case REG_NM:
+ rn = (nibs[n] & 0xc) >> 2;
+ rm = (nibs[n] & 0x3);
+ break;
+ case REG_B:
+ rb = nibs[n] & 0x07;
+ break;
+ default:
+ return;
+ }
+ }
+
+ ok:
+ printk("%-8s ", op->name);
+ lastsp = (op->arg[0] == A_END);
+ disp_pc = 0;
+ for (n = 0; n < 6 && op->arg[n] != A_END; n++) {
+ if (n && op->arg[1] != A_END)
+ printk(", ");
+ switch (op->arg[n]) {
+ case A_IMM:
+ printk("#%d", (char)(imm));
+ break;
+ case A_R0:
+ printk("r0");
+ break;
+ case A_REG_N:
+ printk("r%d", rn);
+ break;
+ case A_INC_N:
+ printk("@r%d+", rn);
+ break;
+ case A_DEC_N:
+ printk("@-r%d", rn);
+ break;
+ case A_IND_N:
+ printk("@r%d", rn);
+ break;
+ case A_DISP_REG_N:
+ printk("@(%d,r%d)", imm, rn);
+ break;
+ case A_REG_M:
+ printk("r%d", rm);
+ break;
+ case A_INC_M:
+ printk("@r%d+", rm);
+ break;
+ case A_DEC_M:
+ printk("@-r%d", rm);
+ break;
+ case A_IND_M:
+ printk("@r%d", rm);
+ break;
+ case A_DISP_REG_M:
+ printk("@(%d,r%d)", imm, rm);
+ break;
+ case A_REG_B:
+ printk("r%d_bank", rb);
+ break;
+ case A_DISP_PC:
+ disp_pc = 1;
+ disp_pc_addr = imm + 4 + (memaddr & relmask);
+ printk("%08x <%pS>", disp_pc_addr,
+ (void *)disp_pc_addr);
+ break;
+ case A_IND_R0_REG_N:
+ printk("@(r0,r%d)", rn);
+ break;
+ case A_IND_R0_REG_M:
+ printk("@(r0,r%d)", rm);
+ break;
+ case A_DISP_GBR:
+ printk("@(%d,gbr)",imm);
+ break;
+ case A_R0_GBR:
+ printk("@(r0,gbr)");
+ break;
+ case A_BDISP12:
+ case A_BDISP8:
+ printk("%08x", imm + memaddr);
+ break;
+ case A_SR:
+ printk("sr");
+ break;
+ case A_GBR:
+ printk("gbr");
+ break;
+ case A_VBR:
+ printk("vbr");
+ break;
+ case A_SSR:
+ printk("ssr");
+ break;
+ case A_SPC:
+ printk("spc");
+ break;
+ case A_MACH:
+ printk("mach");
+ break;
+ case A_MACL:
+ printk("macl");
+ break;
+ case A_PR:
+ printk("pr");
+ break;
+ case A_SGR:
+ printk("sgr");
+ break;
+ case A_DBR:
+ printk("dbr");
+ break;
+ case FD_REG_N:
+ if (0)
+ goto d_reg_n;
+ case F_REG_N:
+ printk("fr%d", rn);
+ break;
+ case F_REG_M:
+ printk("fr%d", rm);
+ break;
+ case DX_REG_N:
+ if (rn & 1) {
+ printk("xd%d", rn & ~1);
+ break;
+ }
+ d_reg_n:
+ case D_REG_N:
+ printk("dr%d", rn);
+ break;
+ case DX_REG_M:
+ if (rm & 1) {
+ printk("xd%d", rm & ~1);
+ break;
+ }
+ case D_REG_M:
+ printk("dr%d", rm);
+ break;
+ case FPSCR_M:
+ case FPSCR_N:
+ printk("fpscr");
+ break;
+ case FPUL_M:
+ case FPUL_N:
+ printk("fpul");
+ break;
+ case F_FR0:
+ printk("fr0");
+ break;
+ case V_REG_N:
+ printk("fv%d", rn*4);
+ break;
+ case V_REG_M:
+ printk("fv%d", rm*4);
+ break;
+ case XMTRX_M4:
+ printk("xmtrx");
+ break;
+ default:
+ return;
+ }
+ }
+
+ if (disp_pc && strcmp(op->name, "mova") != 0) {
+ u32 val;
+
+ if (relmask == ~1)
+ __get_user(val, (u16 *)disp_pc_addr);
+ else
+ __get_user(val, (u32 *)disp_pc_addr);
+
+ printk(" ! %08x <%pS>", val, (void *)val);
+ }
+
+ return;
+ fail:
+ ;
+
+ }
+
+ printk(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]);
+}
+
+void show_code(struct pt_regs *regs)
+{
+ unsigned short *pc = (unsigned short *)regs->pc;
+ long i;
+
+ if (regs->pc & 0x1)
+ return;
+
+ printk("Code:\n");
+
+ for (i = -3 ; i < 6 ; i++) {
+ unsigned short insn;
+
+ if (__get_user(insn, pc + i)) {
+ printk(" (Bad address in pc)\n");
+ break;
+ }
+
+ printk("%s%08lx: ", (i ? " ": "->"), (unsigned long)(pc + i));
+ print_sh_insn((unsigned long)(pc + i), insn);
+ printk("\n");
+ }
+
+ printk("\n");
+}
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
new file mode 100644
index 00000000..5b0bfcda
--- /dev/null
+++ b/arch/sh/kernel/dma-nommu.c
@@ -0,0 +1,82 @@
+/*
+ * DMA mapping support for platforms lacking IOMMUs.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t addr = page_to_phys(page) + offset;
+
+ WARN_ON(size == 0);
+ dma_cache_sync(dev, page_address(page) + offset, size, dir);
+
+ return addr;
+}
+
+static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ BUG_ON(!sg_page(s));
+
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+
+ s->dma_address = sg_phys(s);
+ s->dma_length = s->length;
+ }
+
+ return nents;
+}
+
+#ifdef CONFIG_DMA_NONCOHERENT
+static void nommu_sync_single(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_cache_sync(dev, phys_to_virt(addr), size, dir);
+}
+
+static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nelems, i)
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+}
+#endif
+
+struct dma_map_ops nommu_dma_ops = {
+ .alloc = dma_generic_alloc_coherent,
+ .free = dma_generic_free_coherent,
+ .map_page = nommu_map_page,
+ .map_sg = nommu_map_sg,
+#ifdef CONFIG_DMA_NONCOHERENT
+ .sync_single_for_device = nommu_sync_single,
+ .sync_sg_for_device = nommu_sync_sg,
+#endif
+ .is_phys = 1,
+};
+
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ dma_ops = &nommu_dma_ops;
+}
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 00000000..694158b9
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2009 Matt Fleming
+ */
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+#include <linux/debug_locks.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
+
+void printk_address(unsigned long address, int reliable)
+{
+ printk(" [<%p>] %s%pS\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{
+ struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+ if (addr != (unsigned long)return_to_handler)
+ return;
+
+ if (!task->ret_stack || index < *graph)
+ return;
+
+ index -= *graph;
+ ret_addr = task->ret_stack[index].ret;
+
+ ops->address(data, ret_addr, 1);
+
+ (*graph)++;
+}
+#else
+static inline void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{ }
+#endif
+
+void
+stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct thread_info *context;
+ int graph = 0;
+
+ context = (struct thread_info *)
+ ((unsigned long)sp & (~(THREAD_SIZE - 1)));
+
+ while (!kstack_end(sp)) {
+ unsigned long addr = *sp++;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+
+ print_ftrace_graph_addr(addr, data, ops,
+ context, &graph);
+ }
+ }
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+ printk("%s <%s> ", (char *)data, name);
+ return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+ printk(data);
+ printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+};
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ if (regs && user_mode(regs))
+ return;
+
+ printk("\nCall trace:\n");
+
+ unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+
+ printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
+}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 00000000..49c09c7d
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,1220 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This is an implementation of a DWARF unwinder. Its main purpose is
+ * for generating stacktrace information. Based on the DWARF 3
+ * specification from http://www.dwarfstd.org.
+ *
+ * TODO:
+ * - DWARF64 doesn't work.
+ * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
+ */
+
+/* #define DEBUG */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/dwarf.h>
+#include <asm/unwinder.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/stacktrace.h>
+
+/* Reserve enough memory for two stack frames */
+#define DWARF_FRAME_MIN_REQ 2
+/* ... with 4 registers per frame. */
+#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
+
+static struct kmem_cache *dwarf_frame_cachep;
+static mempool_t *dwarf_frame_pool;
+
+static struct kmem_cache *dwarf_reg_cachep;
+static mempool_t *dwarf_reg_pool;
+
+static struct rb_root cie_root;
+static DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static struct rb_root fde_root;
+static DEFINE_SPINLOCK(dwarf_fde_lock);
+
+static struct dwarf_cie *cached_cie;
+
+static unsigned int dwarf_unwinder_ready;
+
+/**
+ * dwarf_frame_alloc_reg - allocate memory for a DWARF register
+ * @frame: the DWARF frame whose list of registers we insert on
+ * @reg_num: the register number
+ *
+ * Allocate space for, and initialise, a dwarf reg from
+ * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
+ * dwarf registers for @frame.
+ *
+ * Return the initialised DWARF reg.
+ */
+static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
+ if (!reg) {
+ printk(KERN_WARNING "Unable to allocate a DWARF register\n");
+ /*
+ * Let's just bomb hard here, we have no way to
+ * gracefully recover.
+ */
+ UNWINDER_BUG();
+ }
+
+ reg->number = reg_num;
+ reg->addr = 0;
+ reg->flags = 0;
+
+ list_add(&reg->link, &frame->reg_list);
+
+ return reg;
+}
+
+static void dwarf_frame_free_regs(struct dwarf_frame *frame)
+{
+ struct dwarf_reg *reg, *n;
+
+ list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
+ list_del(&reg->link);
+ mempool_free(reg, dwarf_reg_pool);
+ }
+}
+
+/**
+ * dwarf_frame_reg - return a DWARF register
+ * @frame: the DWARF frame to search in for @reg_num
+ * @reg_num: the register number to search for
+ *
+ * Lookup and return the dwarf reg @reg_num for this frame. Return
+ * NULL if @reg_num is an register invalid number.
+ */
+static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ list_for_each_entry(reg, &frame->reg_list, link) {
+ if (reg->number == reg_num)
+ return reg;
+ }
+
+ return NULL;
+}
+
+/**
+ * dwarf_read_addr - read dwarf data
+ * @src: source address of data
+ * @dst: destination address to store the data to
+ *
+ * Read 'n' bytes from @src, where 'n' is the size of an address on
+ * the native machine. We return the number of bytes read, which
+ * should always be 'n'. We also have to be careful when reading
+ * from @src and writing to @dst, because they can be arbitrarily
+ * aligned. Return 'n' - the number of bytes read.
+ */
+static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
+{
+ u32 val = get_unaligned(src);
+ put_unaligned(val, dst);
+ return sizeof(unsigned long *);
+}
+
+/**
+ * dwarf_read_uleb128 - read unsigned LEB128 data
+ * @addr: the address where the ULEB128 data is stored
+ * @ret: address to store the result
+ *
+ * Decode an unsigned LEB128 encoded datum. The algorithm is taken
+ * from Appendix C of the DWARF 3 spec. For information on the
+ * encodings refer to section "7.6 - Variable Length Data". Return
+ * the number of bytes read.
+ */
+static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
+{
+ unsigned int result;
+ unsigned char byte;
+ int shift, count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ count++;
+
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_leb128 - read signed LEB128 data
+ * @addr: the address of the LEB128 encoded data
+ * @ret: address to store the result
+ *
+ * Decode signed LEB128 data. The algorithm is taken from Appendix
+ * C of the DWARF 3 spec. Return the number of bytes read.
+ */
+static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
+{
+ unsigned char byte;
+ int result, shift;
+ int num_bits;
+ int count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ count++;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ /* The number of bits in a signed integer. */
+ num_bits = 8 * sizeof(result);
+
+ if ((shift < num_bits) && (byte & 0x40))
+ result |= (-1 << shift);
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_encoded_value - return the decoded value at @addr
+ * @addr: the address of the encoded value
+ * @val: where to write the decoded value
+ * @encoding: the encoding with which we can decode @addr
+ *
+ * GCC emits encoded address in the .eh_frame FDE entries. Decode
+ * the value at @addr using @encoding. The decoded value is written
+ * to @val and the number of bytes read is returned.
+ */
+static int dwarf_read_encoded_value(char *addr, unsigned long *val,
+ char encoding)
+{
+ unsigned long decoded_addr = 0;
+ int count = 0;
+
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ decoded_addr = (unsigned long)addr;
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", (encoding & 0x70));
+ UNWINDER_BUG();
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & 0x0f) {
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_udata4:
+ count += 4;
+ decoded_addr += get_unaligned((u32 *)addr);
+ __raw_writel(decoded_addr, val);
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", encoding);
+ UNWINDER_BUG();
+ }
+
+ return count;
+}
+
+/**
+ * dwarf_entry_len - return the length of an FDE or CIE
+ * @addr: the address of the entry
+ * @len: the length of the entry
+ *
+ * Read the initial_length field of the entry and store the size of
+ * the entry in @len. We return the number of bytes read. Return a
+ * count of 0 on error.
+ */
+static inline int dwarf_entry_len(char *addr, unsigned long *len)
+{
+ u32 initial_len;
+ int count;
+
+ initial_len = get_unaligned((u32 *)addr);
+ count = 4;
+
+ /*
+ * An initial length field value in the range DW_LEN_EXT_LO -
+ * DW_LEN_EXT_HI indicates an extension, and should not be
+ * interpreted as a length. The only extension that we currently
+ * understand is the use of DWARF64 addresses.
+ */
+ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
+ /*
+ * The 64-bit length field immediately follows the
+ * compulsory 32-bit length field.
+ */
+ if (initial_len == DW_EXT_DWARF64) {
+ *len = get_unaligned((u64 *)addr + 4);
+ count = 12;
+ } else {
+ printk(KERN_WARNING "Unknown DWARF extension\n");
+ count = 0;
+ }
+ } else
+ *len = initial_len;
+
+ return count;
+}
+
+/**
+ * dwarf_lookup_cie - locate the cie
+ * @cie_ptr: pointer to help with lookup
+ */
+static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
+{
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct dwarf_cie *cie = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ /*
+ * We've cached the last CIE we looked up because chances are
+ * that the FDE wants this CIE.
+ */
+ if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
+ cie = cached_cie;
+ goto out;
+ }
+
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+ BUG_ON(!cie_tmp);
+
+ if (cie_ptr == cie_tmp->cie_pointer) {
+ cie = cie_tmp;
+ cached_cie = cie_tmp;
+ goto out;
+ } else {
+ if (cie_ptr < cie_tmp->cie_pointer)
+ rb_node = &(*rb_node)->rb_left;
+ else
+ rb_node = &(*rb_node)->rb_right;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+ return cie;
+}
+
+/**
+ * dwarf_lookup_fde - locate the FDE that covers pc
+ * @pc: the program counter
+ */
+struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+{
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct dwarf_fde *fde = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
+
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+ BUG_ON(!fde_tmp);
+
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+ if (pc < tmp_start) {
+ rb_node = &(*rb_node)->rb_left;
+ } else {
+ if (pc < tmp_end) {
+ fde = fde_tmp;
+ goto out;
+ } else
+ rb_node = &(*rb_node)->rb_right;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return fde;
+}
+
+/**
+ * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
+ * @insn_start: address of the first instruction
+ * @insn_end: address of the last instruction
+ * @cie: the CIE for this function
+ * @fde: the FDE for this function
+ * @frame: the instructions calculate the CFA for this frame
+ * @pc: the program counter of the address we're interested in
+ *
+ * Execute the Call Frame instruction sequence starting at
+ * @insn_start and ending at @insn_end. The instructions describe
+ * how to calculate the Canonical Frame Address of a stackframe.
+ * Store the results in @frame.
+ */
+static int dwarf_cfa_execute_insns(unsigned char *insn_start,
+ unsigned char *insn_end,
+ struct dwarf_cie *cie,
+ struct dwarf_fde *fde,
+ struct dwarf_frame *frame,
+ unsigned long pc)
+{
+ unsigned char insn;
+ unsigned char *current_insn;
+ unsigned int count, delta, reg, expr_len, offset;
+ struct dwarf_reg *regp;
+
+ current_insn = insn_start;
+
+ while (current_insn < insn_end && frame->pc <= pc) {
+ insn = __raw_readb(current_insn++);
+
+ /*
+ * Firstly, handle the opcodes that embed their operands
+ * in the instructions.
+ */
+ switch (DW_CFA_opcode(insn)) {
+ case DW_CFA_advance_loc:
+ delta = DW_CFA_operand(insn);
+ delta *= cie->code_alignment_factor;
+ frame->pc += delta;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_offset:
+ reg = DW_CFA_operand(insn);
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->addr = offset;
+ regp->flags |= DWARF_REG_OFFSET;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_restore:
+ reg = DW_CFA_operand(insn);
+ continue;
+ /* NOTREACHED */
+ }
+
+ /*
+ * Secondly, handle the opcodes that don't embed their
+ * operands in the instruction.
+ */
+ switch (insn) {
+ case DW_CFA_nop:
+ continue;
+ case DW_CFA_advance_loc1:
+ delta = *current_insn++;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc2:
+ delta = get_unaligned((u16 *)current_insn);
+ current_insn += 2;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc4:
+ delta = get_unaligned((u32 *)current_insn);
+ current_insn += 4;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ break;
+ case DW_CFA_restore_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_undefined:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_UNDEFINED;
+ break;
+ case DW_CFA_def_cfa:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_offset);
+ current_insn += count;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_register:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_offset:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ frame->cfa_offset = offset;
+ break;
+ case DW_CFA_def_cfa_expression:
+ count = dwarf_read_uleb128(current_insn, &expr_len);
+ current_insn += count;
+
+ frame->cfa_expr = current_insn;
+ frame->cfa_expr_len = expr_len;
+ current_insn += expr_len;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_EXP;
+ break;
+ case DW_CFA_offset_extended_sf:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_val_offset:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_VAL_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_GNU_args_size:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = -offset;
+ break;
+ default:
+ pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+ UNWINDER_BUG();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dwarf_free_frame - free the memory allocated for @frame
+ * @frame: the frame to free
+ */
+void dwarf_free_frame(struct dwarf_frame *frame)
+{
+ dwarf_frame_free_regs(frame);
+ mempool_free(frame, dwarf_frame_pool);
+}
+
+extern void ret_from_irq(void);
+
+/**
+ * dwarf_unwind_stack - unwind the stack
+ *
+ * @pc: address of the function to unwind
+ * @prev: struct dwarf_frame of the previous stackframe on the callstack
+ *
+ * Return a struct dwarf_frame representing the most recent frame
+ * on the callstack. Each of the lower (older) stack frames are
+ * linked via the "prev" member.
+ */
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
+{
+ struct dwarf_frame *frame;
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+ struct dwarf_reg *reg;
+ unsigned long addr;
+
+ /*
+ * If we've been called in to before initialization has
+ * completed, bail out immediately.
+ */
+ if (!dwarf_unwinder_ready)
+ return NULL;
+
+ /*
+ * If we're starting at the top of the stack we need get the
+ * contents of a physical register to get the CFA in order to
+ * begin the virtual unwinding of the stack.
+ *
+ * NOTE: the return address is guaranteed to be setup by the
+ * time this function makes its first function call.
+ */
+ if (!pc || !prev)
+ pc = (unsigned long)current_text_addr();
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /*
+ * If our stack has been patched by the function graph tracer
+ * then we might see the address of return_to_handler() where we
+ * expected to find the real return address.
+ */
+ if (pc == (unsigned long)&return_to_handler) {
+ int index = current->curr_ret_stack;
+
+ /*
+ * We currently have no way of tracking how many
+ * return_to_handler()'s we've seen. If there is more
+ * than one patched return address on our stack,
+ * complain loudly.
+ */
+ WARN_ON(index > 0);
+
+ pc = current->ret_stack[index].ret;
+ }
+#endif
+
+ frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
+ if (!frame) {
+ printk(KERN_ERR "Unable to allocate a dwarf frame\n");
+ UNWINDER_BUG();
+ }
+
+ INIT_LIST_HEAD(&frame->reg_list);
+ frame->flags = 0;
+ frame->prev = prev;
+ frame->return_addr = 0;
+
+ fde = dwarf_lookup_fde(pc);
+ if (!fde) {
+ /*
+ * This is our normal exit path. There are two reasons
+ * why we might exit here,
+ *
+ * a) pc has no asscociated DWARF frame info and so
+ * we don't know how to unwind this frame. This is
+ * usually the case when we're trying to unwind a
+ * frame that was called from some assembly code
+ * that has no DWARF info, e.g. syscalls.
+ *
+ * b) the DEBUG info for pc is bogus. There's
+ * really no way to distinguish this case from the
+ * case above, which sucks because we could print a
+ * warning here.
+ */
+ goto bail;
+ }
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+
+ frame->pc = fde->initial_location;
+
+ /* CIE initial instructions */
+ dwarf_cfa_execute_insns(cie->initial_instructions,
+ cie->instructions_end, cie, fde,
+ frame, pc);
+
+ /* FDE instructions */
+ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
+ fde, frame, pc);
+
+ /* Calculate the CFA */
+ switch (frame->flags) {
+ case DWARF_FRAME_CFA_REG_OFFSET:
+ if (prev) {
+ reg = dwarf_frame_reg(prev, frame->cfa_register);
+ UNWINDER_BUG_ON(!reg);
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = prev->cfa + reg->addr;
+ frame->cfa = __raw_readl(addr);
+
+ } else {
+ /*
+ * Again, we're starting from the top of the
+ * stack. We need to physically read
+ * the contents of a register in order to get
+ * the Canonical Frame Address for this
+ * function.
+ */
+ frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
+ }
+
+ frame->cfa += frame->cfa_offset;
+ break;
+ default:
+ UNWINDER_BUG();
+ }
+
+ reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
+
+ /*
+ * If we haven't seen the return address register or the return
+ * address column is undefined then we must assume that this is
+ * the end of the callstack.
+ */
+ if (!reg || reg->flags == DWARF_UNDEFINED)
+ goto bail;
+
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = frame->cfa + reg->addr;
+ frame->return_addr = __raw_readl(addr);
+
+ /*
+ * Ah, the joys of unwinding through interrupts.
+ *
+ * Interrupts are tricky - the DWARF info needs to be _really_
+ * accurate and unfortunately I'm seeing a lot of bogus DWARF
+ * info. For example, I've seen interrupts occur in epilogues
+ * just after the frame pointer (r14) had been restored. The
+ * problem was that the DWARF info claimed that the CFA could be
+ * reached by using the value of the frame pointer before it was
+ * restored.
+ *
+ * So until the compiler can be trusted to produce reliable
+ * DWARF info when it really matters, let's stop unwinding once
+ * we've calculated the function that was interrupted.
+ */
+ if (prev && prev->pc == (unsigned long)ret_from_irq)
+ frame->return_addr = 0;
+
+ return frame;
+
+bail:
+ dwarf_free_frame(frame);
+ return NULL;
+}
+
+static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
+ unsigned char *end, struct module *mod)
+{
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct rb_node *parent = *rb_node;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+
+ cie = kzalloc(sizeof(*cie), GFP_KERNEL);
+ if (!cie)
+ return -ENOMEM;
+
+ cie->length = len;
+
+ /*
+ * Record the offset into the .eh_frame section
+ * for this CIE. It allows this CIE to be
+ * quickly and easily looked up from the
+ * corresponding FDE.
+ */
+ cie->cie_pointer = (unsigned long)entry;
+
+ cie->version = *(char *)p++;
+ UNWINDER_BUG_ON(cie->version != 1);
+
+ cie->augmentation = p;
+ p += strlen(cie->augmentation) + 1;
+
+ count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
+ p += count;
+
+ count = dwarf_read_leb128(p, &cie->data_alignment_factor);
+ p += count;
+
+ /*
+ * Which column in the rule table contains the
+ * return address?
+ */
+ if (cie->version == 1) {
+ cie->return_address_reg = __raw_readb(p);
+ p++;
+ } else {
+ count = dwarf_read_uleb128(p, &cie->return_address_reg);
+ p += count;
+ }
+
+ if (cie->augmentation[0] == 'z') {
+ unsigned int length, count;
+ cie->flags |= DWARF_CIE_Z_AUGMENTATION;
+
+ count = dwarf_read_uleb128(p, &length);
+ p += count;
+
+ UNWINDER_BUG_ON((unsigned char *)p > end);
+
+ cie->initial_instructions = p + length;
+ cie->augmentation++;
+ }
+
+ while (*cie->augmentation) {
+ /*
+ * "L" indicates a byte showing how the
+ * LSDA pointer is encoded. Skip it.
+ */
+ if (*cie->augmentation == 'L') {
+ p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'R') {
+ /*
+ * "R" indicates a byte showing
+ * how FDE addresses are
+ * encoded.
+ */
+ cie->encoding = *(char *)p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'P') {
+ /*
+ * "R" indicates a personality
+ * routine in the CIE
+ * augmentation.
+ */
+ UNWINDER_BUG();
+ } else if (*cie->augmentation == 'S') {
+ UNWINDER_BUG();
+ } else {
+ /*
+ * Unknown augmentation. Assume
+ * 'z' augmentation.
+ */
+ p = cie->initial_instructions;
+ UNWINDER_BUG_ON(!p);
+ break;
+ }
+ }
+
+ cie->initial_instructions = p;
+ cie->instructions_end = end;
+
+ /* Add to list */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+ parent = *rb_node;
+
+ if (cie->cie_pointer < cie_tmp->cie_pointer)
+ rb_node = &parent->rb_left;
+ else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&cie->node, parent, rb_node);
+ rb_insert_color(&cie->node, &cie_root);
+
+#ifdef CONFIG_MODULES
+ if (mod != NULL)
+ list_add_tail(&cie->link, &mod->arch.cie_list);
+#endif
+
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ return 0;
+}
+
+static int dwarf_parse_fde(void *entry, u32 entry_type,
+ void *start, unsigned long len,
+ unsigned char *end, struct module *mod)
+{
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct rb_node *parent = *rb_node;
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+ void *p = start;
+
+ fde = kzalloc(sizeof(*fde), GFP_KERNEL);
+ if (!fde)
+ return -ENOMEM;
+
+ fde->length = len;
+
+ /*
+ * In a .eh_frame section the CIE pointer is the
+ * delta between the address within the FDE
+ */
+ fde->cie_pointer = (unsigned long)(p - entry_type - 4);
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+ fde->cie = cie;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->initial_location,
+ cie->encoding);
+ else
+ count = dwarf_read_addr(p, &fde->initial_location);
+
+ p += count;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->address_range,
+ cie->encoding & 0x0f);
+ else
+ count = dwarf_read_addr(p, &fde->address_range);
+
+ p += count;
+
+ if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
+ unsigned int length;
+ count = dwarf_read_uleb128(p, &length);
+ p += count + length;
+ }
+
+ /* Call frame instructions. */
+ fde->instructions = p;
+ fde->end = end;
+
+ /* Add to list. */
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
+ unsigned long start, end;
+
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+ parent = *rb_node;
+
+ if (start < tmp_start)
+ rb_node = &parent->rb_left;
+ else if (start >= tmp_end)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&fde->node, parent, rb_node);
+ rb_insert_color(&fde->node, &fde_root);
+
+#ifdef CONFIG_MODULES
+ if (mod != NULL)
+ list_add_tail(&fde->link, &mod->arch.fde_list);
+#endif
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return 0;
+}
+
+static void dwarf_unwinder_dump(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct dwarf_frame *frame, *_frame;
+ unsigned long return_addr;
+
+ _frame = NULL;
+ return_addr = 0;
+
+ while (1) {
+ frame = dwarf_unwind_stack(return_addr, _frame);
+
+ if (_frame)
+ dwarf_free_frame(_frame);
+
+ _frame = frame;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ return_addr = frame->return_addr;
+ ops->address(data, return_addr, 1);
+ }
+
+ if (frame)
+ dwarf_free_frame(frame);
+}
+
+static struct unwinder dwarf_unwinder = {
+ .name = "dwarf-unwinder",
+ .dump = dwarf_unwinder_dump,
+ .rating = 150,
+};
+
+static void dwarf_unwinder_cleanup(void)
+{
+ struct rb_node **fde_rb_node = &fde_root.rb_node;
+ struct rb_node **cie_rb_node = &cie_root.rb_node;
+
+ /*
+ * Deallocate all the memory allocated for the DWARF unwinder.
+ * Traverse all the FDE/CIE lists and remove and free all the
+ * memory associated with those data structures.
+ */
+ while (*fde_rb_node) {
+ struct dwarf_fde *fde;
+
+ fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
+ rb_erase(*fde_rb_node, &fde_root);
+ kfree(fde);
+ }
+
+ while (*cie_rb_node) {
+ struct dwarf_cie *cie;
+
+ cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
+ rb_erase(*cie_rb_node, &cie_root);
+ kfree(cie);
+ }
+
+ kmem_cache_destroy(dwarf_reg_cachep);
+ kmem_cache_destroy(dwarf_frame_cachep);
+}
+
+/**
+ * dwarf_parse_section - parse DWARF section
+ * @eh_frame_start: start address of the .eh_frame section
+ * @eh_frame_end: end address of the .eh_frame section
+ * @mod: the kernel module containing the .eh_frame section
+ *
+ * Parse the information in a .eh_frame section.
+ */
+static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
+ struct module *mod)
+{
+ u32 entry_type;
+ void *p, *entry;
+ int count, err = 0;
+ unsigned long len = 0;
+ unsigned int c_entries, f_entries;
+ unsigned char *end;
+
+ c_entries = 0;
+ f_entries = 0;
+ entry = eh_frame_start;
+
+ while ((char *)entry < eh_frame_end) {
+ p = entry;
+
+ count = dwarf_entry_len(p, &len);
+ if (count == 0) {
+ /*
+ * We read a bogus length field value. There is
+ * nothing we can do here apart from disabling
+ * the DWARF unwinder. We can't even skip this
+ * entry and move to the next one because 'len'
+ * tells us where our next entry is.
+ */
+ err = -EINVAL;
+ goto out;
+ } else
+ p += count;
+
+ /* initial length does not include itself */
+ end = p + len;
+
+ entry_type = get_unaligned((u32 *)p);
+ p += 4;
+
+ if (entry_type == DW_EH_FRAME_CIE) {
+ err = dwarf_parse_cie(entry, p, len, end, mod);
+ if (err < 0)
+ goto out;
+ else
+ c_entries++;
+ } else {
+ err = dwarf_parse_fde(entry, entry_type, p, len,
+ end, mod);
+ if (err < 0)
+ goto out;
+ else
+ f_entries++;
+ }
+
+ entry = (char *)entry + len + 4;
+ }
+
+ printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
+ c_entries, f_entries);
+
+ return 0;
+
+out:
+ return err;
+}
+
+#ifdef CONFIG_MODULES
+int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ unsigned int i, err;
+ unsigned long start, end;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ start = end = 0;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ /* Alloc bit cleared means "ignore it." */
+ if ((sechdrs[i].sh_flags & SHF_ALLOC)
+ && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
+ start = sechdrs[i].sh_addr;
+ end = start + sechdrs[i].sh_size;
+ break;
+ }
+ }
+
+ /* Did we find the .eh_frame section? */
+ if (i != hdr->e_shnum) {
+ INIT_LIST_HEAD(&me->arch.cie_list);
+ INIT_LIST_HEAD(&me->arch.fde_list);
+ err = dwarf_parse_section((char *)start, (char *)end, me);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to parse DWARF info\n",
+ me->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
+ * @mod: the module that is being unloaded
+ *
+ * Remove any FDEs and CIEs from the global lists that came from
+ * @mod's .eh_frame section because @mod is being unloaded.
+ */
+void module_dwarf_cleanup(struct module *mod)
+{
+ struct dwarf_fde *fde, *ftmp;
+ struct dwarf_cie *cie, *ctmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
+ list_del(&cie->link);
+ rb_erase(&cie->node, &cie_root);
+ kfree(cie);
+ }
+
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
+ list_del(&fde->link);
+ rb_erase(&fde->node, &fde_root);
+ kfree(fde);
+ }
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+#endif /* CONFIG_MODULES */
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+static int __init dwarf_unwinder_init(void)
+{
+ int err = -ENOMEM;
+
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
+ sizeof(struct dwarf_frame), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
+ sizeof(struct dwarf_reg), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_frame_cachep);
+ if (!dwarf_frame_pool)
+ goto out;
+
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_reg_cachep);
+ if (!dwarf_reg_pool)
+ goto out;
+
+ err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
+ if (err)
+ goto out;
+
+ err = unwinder_register(&dwarf_unwinder);
+ if (err)
+ goto out;
+
+ dwarf_unwinder_ready = 1;
+
+ return 0;
+
+out:
+ printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
+ dwarf_unwinder_cleanup();
+ return err;
+}
+early_initcall(dwarf_unwinder_init);
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
new file mode 100644
index 00000000..f67601cb
--- /dev/null
+++ b/arch/sh/kernel/entry-common.S
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ * jmp @k0 ! control-transfer instruction
+ * ldc k1, ssr ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in ptrace.c and ptrace.h
+ *
+ * r0
+ * ...
+ * r15 = stack pointer
+ * spc
+ * pr
+ * ssr
+ * gbr
+ * mach
+ * macl
+ * syscall #
+ *
+ */
+#include <asm/dwarf.h>
+
+#if defined(CONFIG_PREEMPT)
+# define preempt_stop() cli ; TRACE_IRQS_OFF
+#else
+# define preempt_stop()
+# define resume_kernel __restore_all
+#endif
+
+
+ .align 2
+ENTRY(exception_error)
+ !
+ TRACE_IRQS_ON
+ sti
+ mov.l 1f, r0
+ jmp @r0
+ nop
+
+ .align 2
+1: .long do_exception_error
+
+ .align 2
+ret_from_exception:
+ CFI_STARTPROC simple
+ CFI_DEF_CFA r14, 0
+ CFI_REL_OFFSET 17, 64
+ CFI_REL_OFFSET 15, 60
+ CFI_REL_OFFSET 14, 56
+ CFI_REL_OFFSET 13, 52
+ CFI_REL_OFFSET 12, 48
+ CFI_REL_OFFSET 11, 44
+ CFI_REL_OFFSET 10, 40
+ CFI_REL_OFFSET 9, 36
+ CFI_REL_OFFSET 8, 32
+ preempt_stop()
+ENTRY(ret_from_irq)
+ !
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
+ get_current_thread_info r8, r0
+ bt resume_kernel ! Yes, it's from kernel, go back soon
+
+#ifdef CONFIG_PREEMPT
+ bra resume_userspace
+ nop
+ENTRY(resume_kernel)
+ cli
+ TRACE_IRQS_OFF
+ mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
+ tst r0, r0
+ bf noresched
+need_resched:
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
+ bt noresched
+
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shlr r0
+ and #(0xf0>>1), r0 ! interrupts off (exception path)?
+ cmp/eq #(0xf0>>1), r0
+ bt noresched
+ mov.l 3f, r0
+ jsr @r0 ! call preempt_schedule_irq
+ nop
+ bra need_resched
+ nop
+
+noresched:
+ bra __restore_all
+ nop
+
+ .align 2
+1: .long PREEMPT_ACTIVE
+2: .long schedule
+3: .long preempt_schedule_irq
+#endif
+
+ENTRY(resume_userspace)
+ ! r8: current_thread_info
+ cli
+ TRACE_IRQS_OFF
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #(_TIF_WORK_MASK & 0xff), r0
+ bt/s __restore_all
+ tst #_TIF_NEED_RESCHED, r0
+
+ .align 2
+work_pending:
+ ! r0: current_thread_info->flags
+ ! r8: current_thread_info
+ ! t: result of "tst #_TIF_NEED_RESCHED, r0"
+ bf/s work_resched
+ tst #_TIF_SIGPENDING, r0
+work_notifysig:
+ bt/s __restore_all
+ mov r15, r4
+ mov r12, r5 ! set arg1(save_r0)
+ mov r0, r6
+ sti
+ mov.l 2f, r1
+ mov.l 3f, r0
+ jmp @r1
+ lds r0, pr
+work_resched:
+ mov.l 1f, r1
+ jsr @r1 ! schedule
+ nop
+ cli
+ TRACE_IRQS_OFF
+ !
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #(_TIF_WORK_MASK & 0xff), r0
+ bt __restore_all
+ bra work_pending
+ tst #_TIF_NEED_RESCHED, r0
+
+ .align 2
+1: .long schedule
+2: .long do_notify_resume
+3: .long resume_userspace
+
+ .align 2
+syscall_exit_work:
+ ! r0: current_thread_info->flags
+ ! r8: current_thread_info
+ tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
+ bt/s work_pending
+ tst #_TIF_NEED_RESCHED, r0
+ TRACE_IRQS_ON
+ sti
+ mov r15, r4
+ mov.l 8f, r0 ! do_syscall_trace_leave
+ jsr @r0
+ nop
+ bra resume_userspace
+ nop
+
+ .align 2
+syscall_trace_entry:
+ ! Yes it is traced.
+ mov r15, r4
+ mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies
+ jsr @r11 ! superior (will chomp R[0-7])
+ nop
+ mov.l r0, @(OFF_R0,r15) ! Save return value
+ ! Reload R0-R4 from kernel stack, where the
+ ! parent may have modified them using
+ ! ptrace(POKEUSR). (Note that R0-R2 are
+ ! used by the system call handler directly
+ ! from the kernel stack anyway, so don't need
+ ! to be reloaded here.) This allows the parent
+ ! to rewrite system calls and args on the fly.
+ mov.l @(OFF_R4,r15), r4 ! arg0
+ mov.l @(OFF_R5,r15), r5
+ mov.l @(OFF_R6,r15), r6
+ mov.l @(OFF_R7,r15), r7 ! arg3
+ mov.l @(OFF_R3,r15), r3 ! syscall_nr
+ !
+ mov.l 2f, r10 ! Number of syscalls
+ cmp/hs r10, r3
+ bf syscall_call
+ mov #-ENOSYS, r0
+ bra syscall_exit
+ mov.l r0, @(OFF_R0,r15) ! Return value
+
+__restore_all:
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bt 1f
+ TRACE_IRQS_ON
+ bra 2f
+ nop
+1:
+ TRACE_IRQS_OFF
+2:
+ mov.l 3f, r0
+ jmp @r0
+ nop
+
+ .align 2
+3: .long restore_all
+
+ .align 2
+syscall_badsys: ! Bad syscall number
+ get_current_thread_info r8, r0
+ mov #-ENOSYS, r0
+ bra resume_userspace
+ mov.l r0, @(OFF_R0,r15) ! Return value
+
+/*
+ * The main debug trap handler.
+ *
+ * r8=TRA (not the trap number!)
+ *
+ * Note: This assumes that the trapa value is left in its original
+ * form (without the shlr2 shift) so the calculation for the jump
+ * call table offset remains a simple in place mask.
+ */
+debug_trap:
+ mov r8, r0
+ and #(0xf << 2), r0
+ mov.l 1f, r8
+ add r0, r8
+ mov.l @r8, r8
+ jsr @r8
+ nop
+ bra __restore_all
+ nop
+ CFI_ENDPROC
+
+ .align 2
+1: .long debug_trap_table
+
+/*
+ * Syscall interface:
+ *
+ * Syscall #: R3
+ * Arguments #0 to #3: R4--R7
+ * Arguments #4 to #6: R0, R1, R2
+ * TRA: (number of arguments + ABI revision) x 4
+ *
+ * This code also handles delegating other traps to the BIOS/gdb stub
+ * according to:
+ *
+ * Trap number
+ * (TRA>>2) Purpose
+ * -------- -------
+ * 0x00-0x0f original SH-3/4 syscall ABI (not in general use).
+ * 0x10-0x1f general SH-3/4 syscall ABI.
+ * 0x20-0x2f syscall ABI for SH-2 parts.
+ * 0x30-0x3f debug traps used by the kernel.
+ * 0x40-0xff Not supported by all parts, so left unhandled.
+ *
+ * Note: When we're first called, the TRA value must be shifted
+ * right 2 bits in order to get the value that was used as the "trapa"
+ * argument.
+ */
+
+ .align 2
+ .globl ret_from_fork
+ret_from_fork:
+ mov.l 1f, r8
+ jsr @r8
+ mov r0, r4
+ bra syscall_exit
+ nop
+ .align 2
+1: .long schedule_tail
+
+/*
+ * The poorly named main trapa decode and dispatch routine, for
+ * system calls and debug traps through their respective jump tables.
+ */
+ENTRY(system_call)
+ setup_frame_reg
+#if !defined(CONFIG_CPU_SH2)
+ mov.l 1f, r9
+ mov.l @r9, r8 ! Read from TRA (Trap Address) Register
+#endif
+
+ mov #OFF_TRA, r10
+ add r15, r10
+ mov.l r8, @r10 ! set TRA value to tra
+
+ /*
+ * Check the trap type
+ */
+ mov #((0x20 << 2) - 1), r9
+ cmp/hi r9, r8
+ bt/s debug_trap ! it's a debug trap..
+ nop
+
+ TRACE_IRQS_ON
+ sti
+
+ !
+ get_current_thread_info r8, r10
+ mov.l @(TI_FLAGS,r8), r8
+ mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
+ mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
+ tst r10, r8
+ shll8 r9
+ bf syscall_trace_entry
+ tst r9, r8
+ bf syscall_trace_entry
+ !
+ mov.l 2f, r8 ! Number of syscalls
+ cmp/hs r8, r3
+ bt syscall_badsys
+ !
+syscall_call:
+ shll2 r3 ! x4
+ mov.l 3f, r8 ! Load the address of sys_call_table
+ add r8, r3
+ mov.l @r3, r8
+ jsr @r8 ! jump to specific syscall handler
+ nop
+ mov.l @(OFF_R0,r15), r12 ! save r0
+ mov.l r0, @(OFF_R0,r15) ! save the return value
+ !
+syscall_exit:
+ cli
+ TRACE_IRQS_OFF
+ !
+ get_current_thread_info r8, r0
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #(_TIF_ALLWORK_MASK & 0xff), r0
+ mov #(_TIF_ALLWORK_MASK >> 8), r1
+ bf syscall_exit_work
+ shlr8 r0
+ tst r0, r1
+ bf syscall_exit_work
+ bra __restore_all
+ nop
+ .align 2
+#if !defined(CONFIG_CPU_SH2)
+1: .long TRA
+#endif
+2: .long NR_syscalls
+3: .long sys_call_table
+7: .long do_syscall_trace_enter
+8: .long do_syscall_trace_leave
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
new file mode 100644
index 00000000..30e13196
--- /dev/null
+++ b/arch/sh/kernel/ftrace.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
+ * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
+ *
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes to Ingo Molnar, for suggesting the idea.
+ * Mathieu Desnoyers, for suggesting postponing the modifications.
+ * Arjan van de Ven, for keeping me straight, and explaining to me
+ * the dangers of modifying code on the run.
+ */
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <asm/ftrace.h>
+#include <asm/cacheflush.h>
+#include <asm/unistd.h>
+#include <trace/syscall.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
+
+static unsigned char ftrace_nop[4];
+/*
+ * If we're trying to nop out a call to a function, we instead
+ * place a call to the address after the memory table.
+ *
+ * 8c011060 <a>:
+ * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
+ * 8c011062: 22 4f sts.l pr,@-r15
+ * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
+ * 8c011066: 2b 41 jmp @r1
+ * 8c011068: 2a 40 lds r0,pr
+ * 8c01106a: 09 00 nop
+ * 8c01106c: 68 24 .word 0x2468 <--- ip
+ * 8c01106e: 1d 8c .word 0x8c1d
+ * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
+ *
+ * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
+ * past the _mcount call and continue executing code like normal.
+ */
+static unsigned char *ftrace_nop_replace(unsigned long ip)
+{
+ __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
+ return ftrace_nop;
+}
+
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ /* Place the address in the memory table. */
+ __raw_writel(addr, ftrace_replaced_code);
+
+ /*
+ * No locking needed, this must be called via kstop_machine
+ * which in essence is like running on a uniprocessor machine.
+ */
+ return ftrace_replaced_code;
+}
+
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ * and the new code into the "code" buffer.
+ * 2) Wait for any running NMIs to finish and set a flag that says
+ * we are modifying code, it is done in an atomic operation.
+ * 3) Write the code
+ * 4) clear the flag.
+ * 5) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
+static atomic_t nmi_running = ATOMIC_INIT(0);
+static int mod_code_status; /* holds return value of text write */
+static void *mod_code_ip; /* holds the IP to write to */
+static void *mod_code_newcode; /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+ int r;
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+ atomic_read(&nmi_update_count));
+ return r;
+}
+
+static void clear_mod_flag(void)
+{
+ int old = atomic_read(&nmi_running);
+
+ for (;;) {
+ int new = old & ~MOD_CODE_WRITE_FLAG;
+
+ if (old == new)
+ break;
+
+ old = atomic_cmpxchg(&nmi_running, old, new);
+ }
+}
+
+static void ftrace_mod_code(void)
+{
+ /*
+ * Yes, more than one CPU process can be writing to mod_code_status.
+ * (and the code itself)
+ * But if one were to fail, then they all should, and if one were
+ * to succeed, then they all should.
+ */
+ mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+ MCOUNT_INSN_SIZE);
+
+ /* if we fail, then kill any new writers */
+ if (mod_code_status)
+ clear_mod_flag();
+}
+
+void ftrace_nmi_enter(void)
+{
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
+ ftrace_mod_code();
+ atomic_inc(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+}
+
+void ftrace_nmi_exit(void)
+{
+ /* Finish all executions before clearing nmi_running */
+ smp_mb();
+ atomic_dec(&nmi_running);
+}
+
+static void wait_for_nmi_and_set_mod_flag(void)
+{
+ if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
+
+ nmi_wait_count++;
+}
+
+static void wait_for_nmi(void)
+{
+ if (!atomic_read(&nmi_running))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_read(&nmi_running));
+
+ nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+ mod_code_ip = (void *)ip;
+ mod_code_newcode = new_code;
+
+ /* The buffers need to be visible before we let NMIs write them */
+ smp_mb();
+
+ wait_for_nmi_and_set_mod_flag();
+
+ /* Make sure all running NMIs have finished before we write the code */
+ smp_mb();
+
+ ftrace_mod_code();
+
+ /* Make sure the write happens before clearing the bit */
+ smp_mb();
+
+ clear_mod_flag();
+ wait_for_nmi();
+
+ return mod_code_status;
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+ unsigned char *new_code)
+{
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+ * as well as code changing. We do this by using the
+ * probe_kernel_* functions.
+ *
+ * No real locking needed, this code is run through
+ * kstop_machine, or before SMP starts.
+ */
+
+ /* read the text we want to modify */
+ if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure it is what we expect it to be */
+ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+ return -EINVAL;
+
+ /* replace the text with the new text */
+ if (do_ftrace_mod_code(ip, new_code))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
+
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+
+ memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop_replace(ip);
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_nop_replace(ip);
+ new = ftrace_call_replace(ip, addr);
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* The return code is retured via data */
+ __raw_writel(0, (unsigned long)data);
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+static int ftrace_mod(unsigned long ip, unsigned long old_addr,
+ unsigned long new_addr)
+{
+ unsigned char code[MCOUNT_INSN_SIZE];
+
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (old_addr != __raw_readl((unsigned long *)code))
+ return -EINVAL;
+
+ __raw_writel(new_addr, ip);
+ return 0;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&skip_trace);
+ new_addr = (unsigned long)(&ftrace_graph_caller);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&ftrace_graph_caller);
+ new_addr = (unsigned long)(&skip_trace);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in the current thread info.
+ *
+ * This is the main routine for the function graph tracer. The function
+ * graph tracer essentially works like this:
+ *
+ * parent is the stack address containing self_addr's return address.
+ * We pull the real return address out of parent and store it in
+ * current's ret_stack. Then, we replace the return address on the stack
+ * with the address of return_to_handler. self_addr is the function that
+ * called mcount.
+ *
+ * When self_addr returns, it will jump to return_to_handler which calls
+ * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
+ * return address off of current's ret_stack and jump to it.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%2, %0 \n\t"
+ "2: \n\t"
+ "mov.l %3, @%2 \n\t"
+ "mov #0, %1 \n\t"
+ "3: \n\t"
+ ".section .fixup, \"ax\" \n\t"
+ "4: \n\t"
+ "mov.l 5f, %0 \n\t"
+ "jmp @%0 \n\t"
+ " mov #1, %1 \n\t"
+ ".balign 4 \n\t"
+ "5: .long 3b \n\t"
+ ".previous \n\t"
+ ".section __ex_table,\"a\" \n\t"
+ ".long 1b, 4b \n\t"
+ ".long 2b, 4b \n\t"
+ ".previous \n\t"
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ __raw_writel(old, parent);
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ __raw_writel(old, parent);
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
new file mode 100644
index 00000000..7db24893
--- /dev/null
+++ b/arch/sh/kernel/head_32.S
@@ -0,0 +1,355 @@
+/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
+ *
+ * arch/sh/kernel/head.S
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Head.S contains the SH exception handlers and startup code.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
+
+#ifdef CONFIG_CPU_SH4A
+#define SYNCO() synco
+
+#define PREFI(label, reg) \
+ mov.l label, reg; \
+ prefi @reg
+#else
+#define SYNCO()
+#define PREFI(label, reg)
+#endif
+
+ .section .empty_zero_page, "aw"
+ENTRY(empty_zero_page)
+ .long 1 /* MOUNT_ROOT_RDONLY */
+ .long 0 /* RAMDISK_FLAGS */
+ .long 0x0200 /* ORIG_ROOT_DEV */
+ .long 1 /* LOADER_TYPE */
+ .long 0x00000000 /* INITRD_START */
+ .long 0x00000000 /* INITRD_SIZE */
+#ifdef CONFIG_32BIT
+ .long 0x53453f00 + 32 /* "SE?" = 32 bit */
+#else
+ .long 0x53453f00 + 29 /* "SE?" = 29 bit */
+#endif
+1:
+ .skip PAGE_SIZE - empty_zero_page - 1b
+
+ __HEAD
+
+/*
+ * Condition at the entry of _stext:
+ *
+ * BSC has already been initialized.
+ * INTC may or may not be initialized.
+ * VBR may or may not be initialized.
+ * MMU may or may not be initialized.
+ * Cache may or may not be initialized.
+ * Hardware (including on-chip modules) may or may not be initialized.
+ *
+ */
+ENTRY(_stext)
+ ! Initialize Status Register
+ mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
+ ldc r0, sr
+ ! Initialize global interrupt mask
+#ifdef CONFIG_CPU_HAS_SR_RB
+ mov #0, r0
+ ldc r0, r6_bank
+#endif
+
+ /*
+ * Prefetch if possible to reduce cache miss penalty.
+ *
+ * We do this early on for SH-4A as a micro-optimization,
+ * as later on we will have speculative execution enabled
+ * and this will become less of an issue.
+ */
+ PREFI(5f, r0)
+ PREFI(6f, r0)
+
+ !
+ mov.l 2f, r0
+ mov r0, r15 ! Set initial r15 (stack pointer)
+#ifdef CONFIG_CPU_HAS_SR_RB
+ mov.l 7f, r0
+ ldc r0, r7_bank ! ... and initial thread_info
+#endif
+
+#ifdef CONFIG_PMB
+/*
+ * Reconfigure the initial PMB mappings setup by the hardware.
+ *
+ * When we boot in 32-bit MMU mode there are 2 PMB entries already
+ * setup for us.
+ *
+ * Entry VPN PPN V SZ C UB WT
+ * ---------------------------------------------------------------
+ * 0 0x80000000 0x00000000 1 512MB 1 0 1
+ * 1 0xA0000000 0x00000000 1 512MB 0 0 0
+ *
+ * But we reprogram them here because we want complete control over
+ * our address space and the initial mappings may not map PAGE_OFFSET
+ * to __MEMORY_START (or even map all of our RAM).
+ *
+ * Once we've setup cached and uncached mappings we clear the rest of the
+ * PMB entries. This clearing also deals with the fact that PMB entries
+ * can persist across reboots. The PMB could have been left in any state
+ * when the reboot occurred, so to be safe we clear all entries and start
+ * with with a clean slate.
+ *
+ * The uncached mapping is constructed using the smallest possible
+ * mapping with a single unbufferable page. Only the kernel text needs to
+ * be covered via the uncached mapping so that certain functions can be
+ * run uncached.
+ *
+ * Drivers and the like that have previously abused the 1:1 identity
+ * mapping are unsupported in 32-bit mode and must specify their caching
+ * preference when page tables are constructed.
+ *
+ * This frees up the P2 space for more nefarious purposes.
+ *
+ * Register utilization is as follows:
+ *
+ * r0 = PMB_DATA data field
+ * r1 = PMB_DATA address field
+ * r2 = PMB_ADDR data field
+ * r3 = PMB_ADDR address field
+ * r4 = PMB_E_SHIFT
+ * r5 = remaining amount of RAM to map
+ * r6 = PMB mapping size we're trying to use
+ * r7 = cached_to_uncached
+ * r8 = scratch register
+ * r9 = scratch register
+ * r10 = number of PMB entries we've setup
+ * r11 = scratch register
+ */
+
+ mov.l .LMMUCR, r1 /* Flush the TLB */
+ mov.l @r1, r0
+ or #MMUCR_TI, r0
+ mov.l r0, @r1
+
+ mov.l .LMEMORY_SIZE, r5
+
+ mov #PMB_E_SHIFT, r0
+ mov #0x1, r4
+ shld r0, r4
+
+ mov.l .LFIRST_DATA_ENTRY, r0
+ mov.l .LPMB_DATA, r1
+ mov.l .LFIRST_ADDR_ENTRY, r2
+ mov.l .LPMB_ADDR, r3
+
+ /*
+ * First we need to walk the PMB and figure out if there are any
+ * existing mappings that match the initial mappings VPN/PPN.
+ * If these have already been established by the bootloader, we
+ * don't bother setting up new entries here, and let the late PMB
+ * initialization take care of things instead.
+ *
+ * Note that we may need to coalesce and merge entries in order
+ * to reclaim more available PMB slots, which is much more than
+ * we want to do at this early stage.
+ */
+ mov #0, r10
+ mov #NR_PMB_ENTRIES, r9
+
+ mov r1, r7 /* temporary PMB_DATA iter */
+
+.Lvalidate_existing_mappings:
+
+ mov.l .LPMB_DATA_MASK, r11
+ mov.l @r7, r8
+ and r11, r8
+ cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
+ bt .Lpmb_done
+
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r9, r10
+ bf/s .Lvalidate_existing_mappings
+ add r4, r7 /* Increment to the next PMB_DATA entry */
+
+ /*
+ * If we've fallen through, continue with setting up the initial
+ * mappings.
+ */
+
+ mov r5, r7 /* cached_to_uncached */
+ mov #0, r10
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Uncached mapping
+ */
+ mov #(PMB_SZ_16M >> 2), r9
+ shll2 r9
+
+ mov #(PMB_UB >> 8), r8
+ shll8 r8
+
+ or r0, r8
+ or r9, r8
+ mov.l r8, @r1
+ mov r2, r8
+ add r7, r8
+ mov.l r8, @r3
+
+ add r4, r1
+ add r4, r3
+ add #1, r10
+#endif
+
+/*
+ * Iterate over all of the available sizes from largest to
+ * smallest for constructing the cached mapping.
+ */
+#define __PMB_ITER_BY_SIZE(size) \
+.L##size: \
+ mov #(size >> 4), r6; \
+ shll16 r6; \
+ shll8 r6; \
+ \
+ cmp/hi r5, r6; \
+ bt 9999f; \
+ \
+ mov #(PMB_SZ_##size##M >> 2), r9; \
+ shll2 r9; \
+ \
+ /* \
+ * Cached mapping \
+ */ \
+ mov #PMB_C, r8; \
+ or r0, r8; \
+ or r9, r8; \
+ mov.l r8, @r1; \
+ mov.l r2, @r3; \
+ \
+ /* Increment to the next PMB_DATA entry */ \
+ add r4, r1; \
+ /* Increment to the next PMB_ADDR entry */ \
+ add r4, r3; \
+ /* Increment number of PMB entries */ \
+ add #1, r10; \
+ \
+ sub r6, r5; \
+ add r6, r0; \
+ add r6, r2; \
+ \
+ bra .L##size; \
+9999:
+
+ __PMB_ITER_BY_SIZE(512)
+ __PMB_ITER_BY_SIZE(128)
+ __PMB_ITER_BY_SIZE(64)
+ __PMB_ITER_BY_SIZE(16)
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Now that we can access it, update cached_to_uncached and
+ * uncached_size.
+ */
+ mov.l .Lcached_to_uncached, r0
+ mov.l r7, @r0
+
+ mov.l .Luncached_size, r0
+ mov #1, r7
+ shll16 r7
+ shll8 r7
+ mov.l r7, @r0
+#endif
+
+ /*
+ * Clear the remaining PMB entries.
+ *
+ * r3 = entry to begin clearing from
+ * r10 = number of entries we've setup so far
+ */
+ mov #0, r1
+ mov #NR_PMB_ENTRIES, r0
+
+.Lagain:
+ mov.l r1, @r3 /* Clear PMB_ADDR entry */
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r0, r10
+ bf/s .Lagain
+ add r4, r3 /* Increment to the next PMB_ADDR entry */
+
+ mov.l 6f, r0
+ icbi @r0
+
+.Lpmb_done:
+#endif /* CONFIG_PMB */
+
+#ifndef CONFIG_SH_NO_BSS_INIT
+ /*
+ * Don't clear BSS if running on slow platforms such as an RTL simulation,
+ * remote memory via SHdebug link, etc. For these the memory can be guaranteed
+ * to be all zero on boot anyway.
+ */
+ ! Clear BSS area
+#ifdef CONFIG_SMP
+ mov.l 3f, r0
+ cmp/eq #0, r0 ! skip clear if set to zero
+ bt 10f
+#endif
+
+ mov.l 3f, r1
+ add #4, r1
+ mov.l 4f, r2
+ mov #0, r0
+9: cmp/hs r2, r1
+ bf/s 9b ! while (r1 < r2)
+ mov.l r0,@-r2
+
+10:
+#endif
+
+ ! Additional CPU initialization
+ mov.l 6f, r0
+ jsr @r0
+ nop
+
+ SYNCO() ! Wait for pending instructions..
+
+ ! Start kernel
+ mov.l 5f, r0
+ jmp @r0
+ nop
+
+ .balign 4
+#if defined(CONFIG_CPU_SH2)
+1: .long 0x000000F0 ! IMASK=0xF
+#else
+1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
+#endif
+ENTRY(stack_start)
+2: .long init_thread_union+THREAD_SIZE
+3: .long __bss_start
+4: .long _end
+5: .long start_kernel
+6: .long cpu_init
+7: .long init_thread_union
+
+#ifdef CONFIG_PMB
+.LPMB_ADDR: .long PMB_ADDR
+.LPMB_DATA: .long PMB_DATA
+.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
+.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
+.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
+.LMMUCR: .long MMUCR
+.LMEMORY_SIZE: .long __MEMORY_SIZE
+#ifdef CONFIG_UNCACHED_MAPPING
+.Lcached_to_uncached: .long cached_to_uncached
+.Luncached_size: .long uncached_size
+#endif
+#endif
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
new file mode 100644
index 00000000..defd851a
--- /dev/null
+++ b/arch/sh/kernel/head_64.S
@@ -0,0 +1,357 @@
+/*
+ * arch/sh/kernel/head_64.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/tlb.h>
+#include <cpu/registers.h>
+#include <cpu/mmu_context.h>
+#include <asm/thread_info.h>
+
+/*
+ * MMU defines: TLB boundaries.
+ */
+
+#define MMUIR_FIRST ITLB_FIXED
+#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUIR_STEP TLB_STEP
+
+#define MMUDR_FIRST DTLB_FIXED
+#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
+#define MMUDR_STEP TLB_STEP
+
+/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
+#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
+#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
+#endif
+
+/*
+ * MMU defines: Fixed TLBs.
+ */
+/* Deal safely with the case where the base of RAM is not 512Mb aligned */
+
+#define ALIGN_512M_MASK (0xffffffffe0000000)
+#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
+#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
+
+#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
+ /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+
+#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
+ /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
+
+#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
+ /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
+#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
+ /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
+
+#ifdef CONFIG_CACHE_OFF
+#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
+#else
+#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
+#endif
+#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
+
+#if defined (CONFIG_CACHE_OFF)
+#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
+#elif defined (CONFIG_CACHE_WRITETHROUGH)
+#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
+ /* WT, invalidate */
+#elif defined (CONFIG_CACHE_WRITEBACK)
+#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
+ /* WB, invalidate */
+#else
+#error preprocessor flag CONFIG_CACHE_... not recognized!
+#endif
+
+#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
+
+ .section .empty_zero_page, "aw"
+ .global empty_zero_page
+
+empty_zero_page:
+ .long 1 /* MOUNT_ROOT_RDONLY */
+ .long 0 /* RAMDISK_FLAGS */
+ .long 0x0200 /* ORIG_ROOT_DEV */
+ .long 1 /* LOADER_TYPE */
+ .long 0x00800000 /* INITRD_START */
+ .long 0x00800000 /* INITRD_SIZE */
+ .long 0
+
+ .text
+ .balign 4096,0,4096
+
+ .section .data, "aw"
+ .balign PAGE_SIZE
+
+ .section .data, "aw"
+ .balign PAGE_SIZE
+
+ .global mmu_pdtp_cache
+mmu_pdtp_cache:
+ .space PAGE_SIZE, 0
+
+ .global empty_bad_page
+empty_bad_page:
+ .space PAGE_SIZE, 0
+
+ .global empty_bad_pte_table
+empty_bad_pte_table:
+ .space PAGE_SIZE, 0
+
+ .global fpu_in_use
+fpu_in_use: .quad 0
+
+
+ __HEAD
+ .balign L1_CACHE_BYTES
+/*
+ * Condition at the entry of __stext:
+ * . Reset state:
+ * . SR.FD = 1 (FPU disabled)
+ * . SR.BL = 1 (Exceptions disabled)
+ * . SR.MD = 1 (Privileged Mode)
+ * . SR.MMU = 0 (MMU Disabled)
+ * . SR.CD = 0 (CTC User Visible)
+ * . SR.IMASK = Undefined (Interrupt Mask)
+ *
+ * Operations supposed to be performed by __stext:
+ * . prevent speculative fetch onto device memory while MMU is off
+ * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
+ * . first, save CPU state and set it to something harmless
+ * . any CPU detection and/or endianness settings (?)
+ * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
+ * . set initial TLB entries for cached and uncached regions
+ * (no fine granularity paging)
+ * . set initial cache state
+ * . enable MMU and caches
+ * . set CPU to a consistent state
+ * . registers (including stack pointer and current/KCR0)
+ * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
+ * at this stage. This is all to later Linux initialization steps.
+ * . initialize FPU
+ * . clear BSS
+ * . jump into start_kernel()
+ * . be prepared to hopeless start_kernel() returns.
+ *
+ */
+ .global _stext
+_stext:
+ /*
+ * Prevent speculative fetch on device memory due to
+ * uninitialized target registers.
+ */
+ ptabs/u ZERO, tr0
+ ptabs/u ZERO, tr1
+ ptabs/u ZERO, tr2
+ ptabs/u ZERO, tr3
+ ptabs/u ZERO, tr4
+ ptabs/u ZERO, tr5
+ ptabs/u ZERO, tr6
+ ptabs/u ZERO, tr7
+ synci
+
+ /*
+ * Read/Set CPU state. After this block:
+ * r29 = Initial SR
+ */
+ getcon SR, r29
+ movi SR_HARMLESS, r20
+ putcon r20, SR
+
+ /*
+ * Initialize EMI/LMI. To Be Done.
+ */
+
+ /*
+ * CPU detection and/or endianness settings (?). To Be Done.
+ * Pure PIC code here, please ! Just save state into r30.
+ * After this block:
+ * r30 = CPU type/Platform Endianness
+ */
+
+ /*
+ * Set initial TLB entries for cached and uncached regions.
+ * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
+ */
+ /* Clear ITLBs */
+ pta clear_ITLB, tr1
+ movi MMUIR_FIRST, r21
+ movi MMUIR_END, r22
+clear_ITLB:
+ putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
+ addi r21, MMUIR_STEP, r21
+ bne r21, r22, tr1
+
+ /* Clear DTLBs */
+ pta clear_DTLB, tr1
+ movi MMUDR_FIRST, r21
+ movi MMUDR_END, r22
+clear_DTLB:
+ putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
+ addi r21, MMUDR_STEP, r21
+ bne r21, r22, tr1
+
+ /* Map one big (512Mb) page for ITLB */
+ movi MMUIR_FIRST, r21
+ movi MMUIR_TEXT_L, r22 /* PTEL first */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
+ movi MMUIR_TEXT_H, r22 /* PTEH last */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
+
+ /* Map one big CACHED (512Mb) page for DTLB */
+ movi MMUDR_FIRST, r21
+ movi MMUDR_CACHED_L, r22 /* PTEL first */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
+ movi MMUDR_CACHED_H, r22 /* PTEH last */
+ add.l r22, r63, r22 /* Sign extend */
+ putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
+
+ /*
+ * Setup a DTLB translation for SCIF phys.
+ */
+ addi r21, MMUDR_STEP, r21
+ movi 0x0a03, r22 /* SCIF phys */
+ shori 0x0148, r22
+ putcfg r21, 1, r22 /* PTEL first */
+ movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
+ shori 0x0003, r22
+ putcfg r21, 0, r22 /* PTEH last */
+
+ /*
+ * Set cache behaviours.
+ */
+ /* ICache */
+ movi ICCR_BASE, r21
+ movi ICCR0_INIT_VAL, r22
+ movi ICCR1_INIT_VAL, r23
+ putcfg r21, ICCR_REG0, r22
+ putcfg r21, ICCR_REG1, r23
+
+ /* OCache */
+ movi OCCR_BASE, r21
+ movi OCCR0_INIT_VAL, r22
+ movi OCCR1_INIT_VAL, r23
+ putcfg r21, OCCR_REG0, r22
+ putcfg r21, OCCR_REG1, r23
+
+
+ /*
+ * Enable Caches and MMU. Do the first non-PIC jump.
+ * Now head.S global variables, constants and externs
+ * can be used.
+ */
+ getcon SR, r21
+ movi SR_ENABLE_MMU, r22
+ or r21, r22, r21
+ putcon r21, SSR
+ movi hyperspace, r22
+ ori r22, 1, r22 /* Make it SHmedia, not required but..*/
+ putcon r22, SPC
+ synco
+ rte /* And now go into the hyperspace ... */
+hyperspace: /* ... that's the next instruction ! */
+
+ /*
+ * Set CPU to a consistent state.
+ * r31 = FPU support flag
+ * tr0/tr7 in use. Others give a chance to loop somewhere safe
+ */
+ movi start_kernel, r32
+ ori r32, 1, r32
+
+ ptabs r32, tr0 /* r32 = _start_kernel address */
+ pta/u hopeless, tr1
+ pta/u hopeless, tr2
+ pta/u hopeless, tr3
+ pta/u hopeless, tr4
+ pta/u hopeless, tr5
+ pta/u hopeless, tr6
+ pta/u hopeless, tr7
+ gettr tr1, r28 /* r28 = hopeless address */
+
+ /* Set initial stack pointer */
+ movi init_thread_union, SP
+ putcon SP, KCR0 /* Set current to init_task */
+ movi THREAD_SIZE, r22 /* Point to the end */
+ add SP, r22, SP
+
+ /*
+ * Initialize FPU.
+ * Keep FPU flag in r31. After this block:
+ * r31 = FPU flag
+ */
+ movi fpu_in_use, r31 /* Temporary */
+
+#ifdef CONFIG_SH_FPU
+ getcon SR, r21
+ movi SR_ENABLE_FPU, r22
+ and r21, r22, r22
+ putcon r22, SR /* Try to enable */
+ getcon SR, r22
+ xor r21, r22, r21
+ shlri r21, 15, r21 /* Supposedly 0/1 */
+ st.q r31, 0 , r21 /* Set fpu_in_use */
+#else
+ movi 0, r21
+ st.q r31, 0 , r21 /* Set fpu_in_use */
+#endif
+ or r21, ZERO, r31 /* Set FPU flag at last */
+
+#ifndef CONFIG_SH_NO_BSS_INIT
+/* Don't clear BSS if running on slow platforms such as an RTL simulation,
+ remote memory via SHdebug link, etc. For these the memory can be guaranteed
+ to be all zero on boot anyway. */
+ /*
+ * Clear bss
+ */
+ pta clear_quad, tr1
+ movi __bss_start, r22
+ movi _end, r23
+clear_quad:
+ st.q r22, 0, ZERO
+ addi r22, 8, r22
+ bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
+#endif
+ pta/u hopeless, tr1
+
+ /* Say bye to head.S but be prepared to wrongly get back ... */
+ blink tr0, LINK
+
+ /* If we ever get back here through LINK/tr1-tr7 */
+ pta/u hopeless, tr7
+
+hopeless:
+ /*
+ * Something's badly wrong here. Loop endlessly,
+ * there's nothing more we can do about it.
+ *
+ * Note on hopeless: it can be jumped into invariably
+ * before or after jumping into hyperspace. The only
+ * requirement is to be PIC called (PTA) before and
+ * any way (PTA/PTABS) after. According to Virtual
+ * to Physical mapping a simulator/emulator can easily
+ * tell where we came here from just looking at hopeless
+ * (PC) address.
+ *
+ * For debugging purposes:
+ * (r28) hopeless/loop address
+ * (r29) Original SR
+ * (r30) CPU type/Platform endianness
+ * (r31) FPU Support
+ * (r32) _start_kernel address
+ */
+ blink tr7, ZERO
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 00000000..f9173766
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,421 @@
+/*
+ * arch/sh/kernel/hw_breakpoint.c
+ *
+ * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for each cpus
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
+
+/*
+ * A dummy placeholder for early accesses until the CPUs get a chance to
+ * register their UBCs later in the boot process.
+ */
+static struct sh_ubc ubc_dummy = { .num_events = 0 };
+
+static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free UBC channel and use it for this breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+ if (!*slot) {
+ *slot = bp;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return -EBUSY;
+
+ clk_enable(sh_ubc->clk);
+ sh_ubc->enable(info, i);
+
+ return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return;
+
+ sh_ubc->disable(info, i);
+ clk_disable(sh_ubc->clk);
+}
+
+static int get_hbp_len(u16 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case SH_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+ return len_in_bytes;
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
+{
+ unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ va = info->address;
+ len = get_hbp_len(info->len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+int arch_bp_generic_fields(int sh_len, int sh_type,
+ int *gen_len, int *gen_type)
+{
+ /* Len */
+ switch (sh_len) {
+ case SH_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (sh_type) {
+ case SH_BREAKPOINT_READ:
+ *gen_type = HW_BREAKPOINT_R;
+ case SH_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case SH_BREAKPOINT_RW:
+ *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arch_build_bp_info(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ info->address = bp->attr.bp_addr;
+
+ /* Len */
+ switch (bp->attr.bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ info->len = SH_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ info->len = SH_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ info->len = SH_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ info->len = SH_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (bp->attr.bp_type) {
+ case HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_READ;
+ break;
+ case HW_BREAKPOINT_W:
+ info->type = SH_BREAKPOINT_WRITE;
+ break;
+ case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned int align;
+ int ret;
+
+ ret = arch_build_bp_info(bp);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ switch (info->len) {
+ case SH_BREAKPOINT_LEN_1:
+ align = 0;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ align = 1;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ align = 3;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ align = 7;
+ break;
+ default:
+ return ret;
+ }
+
+ /*
+ * For kernel-addresses, either the address or symbol name can be
+ * specified.
+ */
+ if (info->name)
+ info->address = (unsigned long)kallsyms_lookup_name(info->name);
+
+ /*
+ * Check that the low-order bits of the address are appropriate
+ * for the alignment implied by len.
+ */
+ if (info->address & align)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
+ t->ptrace_bps[i] = NULL;
+ }
+}
+
+static int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+ int cpu, i, rc = NOTIFY_STOP;
+ struct perf_event *bp;
+ unsigned int cmf, resume_mask;
+
+ /*
+ * Do an early return if none of the channels triggered.
+ */
+ cmf = sh_ubc->triggered_mask();
+ if (unlikely(!cmf))
+ return NOTIFY_DONE;
+
+ /*
+ * By default, resume all of the active channels.
+ */
+ resume_mask = sh_ubc->active_mask();
+
+ /*
+ * Disable breakpoints during exception handling.
+ */
+ sh_ubc->disable_all();
+
+ cpu = get_cpu();
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unsigned long event_mask = (1 << i);
+
+ if (likely(!(cmf & event_mask)))
+ continue;
+
+ /*
+ * The counter may be concurrently released but that can only
+ * occur from a call_rcu() path. We can then safely fetch
+ * the breakpoint, use its callback, touch its counter
+ * while we are in an rcu_read_lock() path.
+ */
+ rcu_read_lock();
+
+ bp = per_cpu(bp_per_reg[i], cpu);
+ if (bp)
+ rc = NOTIFY_DONE;
+
+ /*
+ * Reset the condition match flag to denote completion of
+ * exception handling.
+ */
+ sh_ubc->clear_triggered_mask(event_mask);
+
+ /*
+ * bp can be NULL due to concurrent perf counter
+ * removing.
+ */
+ if (!bp) {
+ rcu_read_unlock();
+ break;
+ }
+
+ /*
+ * Don't restore the channel if the breakpoint is from
+ * ptrace, as it always operates in one-shot mode.
+ */
+ if (bp->overflow_handler == ptrace_triggered)
+ resume_mask &= ~(1 << i);
+
+ perf_bp_event(bp, args->regs);
+
+ /* Deliver the signal to userspace */
+ if (!arch_check_bp_in_kernelspace(bp)) {
+ siginfo_t info;
+
+ info.si_signo = args->signr;
+ info.si_errno = notifier_to_errno(rc);
+ info.si_code = TRAP_HWBKPT;
+
+ force_sig_info(args->signr, &info, current);
+ }
+
+ rcu_read_unlock();
+ }
+
+ if (cmf == 0)
+ rc = NOTIFY_DONE;
+
+ sh_ubc->enable_all(resume_mask);
+
+ put_cpu();
+
+ return rc;
+}
+
+BUILD_TRAP_HANDLER(breakpoint)
+{
+ unsigned long ex = lookup_exception_vector();
+ TRAP_HANDLER_DECL;
+
+ notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (val != DIE_BREAKPOINT)
+ return NOTIFY_DONE;
+
+ /*
+ * If the breakpoint hasn't been triggered by the UBC, it's
+ * probably from a debugger, so don't do anything more here.
+ *
+ * This also permits the UBC interface clock to remain off for
+ * non-UBC breakpoints, as we don't need to check the triggered
+ * or active channel masks.
+ */
+ if (args->trapnr != sh_ubc->trap_nr)
+ return NOTIFY_DONE;
+
+ return hw_breakpoint_handler(data);
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+ /* TODO */
+}
+
+int register_sh_ubc(struct sh_ubc *ubc)
+{
+ /* Bail if it's already assigned */
+ if (sh_ubc != &ubc_dummy)
+ return -EBUSY;
+ sh_ubc = ubc;
+
+ pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
+
+ WARN_ON(ubc->num_events > HBP_NUM);
+
+ return 0;
+}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
new file mode 100644
index 00000000..ee226e20
--- /dev/null
+++ b/arch/sh/kernel/idle.c
@@ -0,0 +1,162 @@
+/*
+ * The idle loop for all SuperH platforms.
+ *
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/preempt.h>
+#include <linux/thread_info.h>
+#include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/cpuidle.h>
+#include <linux/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/smp.h>
+#include <asm/bl_bit.h>
+
+void (*pm_idle)(void);
+
+static int hlt_counter;
+
+static int __init nohlt_setup(char *__unused)
+{
+ hlt_counter = 1;
+ return 1;
+}
+__setup("nohlt", nohlt_setup);
+
+static int __init hlt_setup(char *__unused)
+{
+ hlt_counter = 0;
+ return 1;
+}
+__setup("hlt", hlt_setup);
+
+static inline int hlt_works(void)
+{
+ return !hlt_counter;
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle(void)
+{
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
+}
+
+void default_idle(void)
+{
+ if (hlt_works()) {
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb__after_clear_bit();
+
+ set_bl_bit();
+ if (!need_resched()) {
+ local_irq_enable();
+ cpu_sleep();
+ } else
+ local_irq_enable();
+
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ clear_bl_bit();
+ } else
+ poll_idle();
+}
+
+/*
+ * The idle thread. There's no useful work to be done, so just try to conserve
+ * power and have a low exit latency (ie sit in a loop waiting for somebody to
+ * say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+
+ while (!need_resched()) {
+ check_pgt_cache();
+ rmb();
+
+ if (cpu_is_offline(cpu))
+ play_dead();
+
+ local_irq_disable();
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ if (cpuidle_idle_call())
+ pm_idle();
+ /*
+ * Sanity check to ensure that pm_idle() returns
+ * with IRQs enabled
+ */
+ WARN_ON(irqs_disabled());
+ start_critical_timings();
+ }
+
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+ schedule_preempt_disabled();
+ }
+}
+
+void __init select_idle_routine(void)
+{
+ /*
+ * If a platform has set its own idle routine, leave it alone.
+ */
+ if (pm_idle)
+ return;
+
+ if (hlt_works())
+ pm_idle = default_idle;
+ else
+ pm_idle = poll_idle;
+}
+
+static void do_nothing(void *unused)
+{
+}
+
+void stop_this_cpu(void *unused)
+{
+ local_irq_disable();
+ set_cpu_online(smp_processor_id(), false);
+
+ for (;;)
+ cpu_sleep();
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
new file mode 100644
index 00000000..11f2ea55
--- /dev/null
+++ b/arch/sh/kernel/init_task.c
@@ -0,0 +1,30 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct pt_regs fake_swapper_regs;
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
new file mode 100644
index 00000000..5c51b794
--- /dev/null
+++ b/arch/sh/kernel/io.c
@@ -0,0 +1,114 @@
+/*
+ * arch/sh/kernel/io.c - Machine independent I/O functions.
+ *
+ * Copyright (C) 2000 - 2009 Stuart Menefy
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <asm/machvec.h>
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
+{
+ /*
+ * Would it be worthwhile doing byte and long transfers first
+ * to try and get aligned?
+ */
+#ifdef CONFIG_CPU_SH4
+ if ((count >= 0x20) &&
+ (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
+ int tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l @%7+, %2 \n\t"
+ "movca.l r0, @%0 \n\t"
+ "mov.l @%7+, %3 \n\t"
+ "mov.l @%7+, %4 \n\t"
+ "mov.l @%7+, %5 \n\t"
+ "mov.l @%7+, %6 \n\t"
+ "mov.l @%7+, r7 \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l %2, @(0x04,%0) \n\t"
+ "mov #0x20, %2 \n\t"
+ "mov.l %3, @(0x08,%0) \n\t"
+ "sub %2, %1 \n\t"
+ "mov.l %4, @(0x0c,%0) \n\t"
+ "cmp/hi %1, %2 ! T if 32 > count \n\t"
+ "mov.l %5, @(0x10,%0) \n\t"
+ "mov.l %6, @(0x14,%0) \n\t"
+ "mov.l r7, @(0x18,%0) \n\t"
+ "mov.l r0, @(0x1c,%0) \n\t"
+ "bf.s 1b \n\t"
+ " add #0x20, %0 \n\t"
+ : "=&r" (to), "=&r" (count),
+ "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+ "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
+ : "7"(from), "0" (to), "1" (count)
+ : "r0", "r7", "t", "memory");
+ }
+#endif
+
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for (; count > 3; count -= 4) {
+ *(u32 *)to = *(volatile u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(u8 *)to = *(volatile u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
+{
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for ( ; count > 3; count -= 4) {
+ *(volatile u32 *)to = *(u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(volatile u8 *)to = *(u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
+}
+EXPORT_SYMBOL(memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void memset_io(volatile void __iomem *dst, int c, unsigned long count)
+{
+ while (count) {
+ count--;
+ writeb(c, dst);
+ dst++;
+ }
+}
+EXPORT_SYMBOL(memset_io);
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
new file mode 100644
index 00000000..c0a9761f
--- /dev/null
+++ b/arch/sh/kernel/io_trapped.c
@@ -0,0 +1,296 @@
+/*
+ * Trapped io support
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Intercept io operations by trapping.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/mmu_context.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/io_trapped.h>
+
+#define TRAPPED_PAGES_MAX 16
+
+#ifdef CONFIG_HAS_IOPORT
+LIST_HEAD(trapped_io);
+EXPORT_SYMBOL_GPL(trapped_io);
+#endif
+#ifdef CONFIG_HAS_IOMEM
+LIST_HEAD(trapped_mem);
+EXPORT_SYMBOL_GPL(trapped_mem);
+#endif
+static DEFINE_SPINLOCK(trapped_lock);
+
+static int trapped_io_disable __read_mostly;
+
+static int __init trapped_io_setup(char *__unused)
+{
+ trapped_io_disable = 1;
+ return 1;
+}
+__setup("noiotrap", trapped_io_setup);
+
+int register_trapped_io(struct trapped_io *tiop)
+{
+ struct resource *res;
+ unsigned long len = 0, flags = 0;
+ struct page *pages[TRAPPED_PAGES_MAX];
+ int k, n;
+
+ if (unlikely(trapped_io_disable))
+ return 0;
+
+ /* structure must be page aligned */
+ if ((unsigned long)tiop & (PAGE_SIZE - 1))
+ goto bad;
+
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ len += roundup(resource_size(res), PAGE_SIZE);
+ flags |= res->flags;
+ }
+
+ /* support IORESOURCE_IO _or_ MEM, not both */
+ if (hweight_long(flags) != 1)
+ goto bad;
+
+ n = len >> PAGE_SHIFT;
+
+ if (n >= TRAPPED_PAGES_MAX)
+ goto bad;
+
+ for (k = 0; k < n; k++)
+ pages[k] = virt_to_page(tiop);
+
+ tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
+ if (!tiop->virt_base)
+ goto bad;
+
+ len = 0;
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
+ (unsigned long)(tiop->virt_base + len),
+ res->flags & IORESOURCE_IO ? "io" : "mmio",
+ (unsigned long)res->start);
+ len += roundup(resource_size(res), PAGE_SIZE);
+ }
+
+ tiop->magic = IO_TRAPPED_MAGIC;
+ INIT_LIST_HEAD(&tiop->list);
+ spin_lock_irq(&trapped_lock);
+#ifdef CONFIG_HAS_IOPORT
+ if (flags & IORESOURCE_IO)
+ list_add(&tiop->list, &trapped_io);
+#endif
+#ifdef CONFIG_HAS_IOMEM
+ if (flags & IORESOURCE_MEM)
+ list_add(&tiop->list, &trapped_mem);
+#endif
+ spin_unlock_irq(&trapped_lock);
+
+ return 0;
+ bad:
+ pr_warning("unable to install trapped io filter\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(register_trapped_io);
+
+void __iomem *match_trapped_io_handler(struct list_head *list,
+ unsigned long offset,
+ unsigned long size)
+{
+ unsigned long voffs;
+ struct trapped_io *tiop;
+ struct resource *res;
+ int k, len;
+ unsigned long flags;
+
+ spin_lock_irqsave(&trapped_lock, flags);
+ list_for_each_entry(tiop, list, list) {
+ voffs = 0;
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ if (res->start == offset) {
+ spin_unlock_irqrestore(&trapped_lock, flags);
+ return tiop->virt_base + voffs;
+ }
+
+ len = resource_size(res);
+ voffs += roundup(len, PAGE_SIZE);
+ }
+ }
+ spin_unlock_irqrestore(&trapped_lock, flags);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(match_trapped_io_handler);
+
+static struct trapped_io *lookup_tiop(unsigned long address)
+{
+ pgd_t *pgd_k;
+ pud_t *pud_k;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+ pte_t entry;
+
+ pgd_k = swapper_pg_dir + pgd_index(address);
+ if (!pgd_present(*pgd_k))
+ return NULL;
+
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ return NULL;
+
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ return NULL;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ entry = *pte_k;
+
+ return pfn_to_kaddr(pte_pfn(entry));
+}
+
+static unsigned long lookup_address(struct trapped_io *tiop,
+ unsigned long address)
+{
+ struct resource *res;
+ unsigned long vaddr = (unsigned long)tiop->virt_base;
+ unsigned long len;
+ int k;
+
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ len = roundup(resource_size(res), PAGE_SIZE);
+ if (address < (vaddr + len))
+ return res->start + (address - vaddr);
+ vaddr += len;
+ }
+ return 0;
+}
+
+static unsigned long long copy_word(unsigned long src_addr, int src_len,
+ unsigned long dst_addr, int dst_len)
+{
+ unsigned long long tmp = 0;
+
+ switch (src_len) {
+ case 1:
+ tmp = __raw_readb(src_addr);
+ break;
+ case 2:
+ tmp = __raw_readw(src_addr);
+ break;
+ case 4:
+ tmp = __raw_readl(src_addr);
+ break;
+ case 8:
+ tmp = __raw_readq(src_addr);
+ break;
+ }
+
+ switch (dst_len) {
+ case 1:
+ __raw_writeb(tmp, dst_addr);
+ break;
+ case 2:
+ __raw_writew(tmp, dst_addr);
+ break;
+ case 4:
+ __raw_writel(tmp, dst_addr);
+ break;
+ case 8:
+ __raw_writeq(tmp, dst_addr);
+ break;
+ }
+
+ return tmp;
+}
+
+static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
+{
+ struct trapped_io *tiop;
+ unsigned long src_addr = (unsigned long)src;
+ unsigned long long tmp;
+
+ pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
+ tiop = lookup_tiop(src_addr);
+ WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
+
+ src_addr = lookup_address(tiop, src_addr);
+ if (!src_addr)
+ return cnt;
+
+ tmp = copy_word(src_addr,
+ max_t(unsigned long, cnt,
+ (tiop->minimum_bus_width / 8)),
+ (unsigned long)dst, cnt);
+
+ pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
+ return 0;
+}
+
+static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
+{
+ struct trapped_io *tiop;
+ unsigned long dst_addr = (unsigned long)dst;
+ unsigned long long tmp;
+
+ pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
+ tiop = lookup_tiop(dst_addr);
+ WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
+
+ dst_addr = lookup_address(tiop, dst_addr);
+ if (!dst_addr)
+ return cnt;
+
+ tmp = copy_word((unsigned long)src, cnt,
+ dst_addr, max_t(unsigned long, cnt,
+ (tiop->minimum_bus_width / 8)));
+
+ pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
+ return 0;
+}
+
+static struct mem_access trapped_io_access = {
+ from_device,
+ to_device,
+};
+
+int handle_trapped_io(struct pt_regs *regs, unsigned long address)
+{
+ mm_segment_t oldfs;
+ insn_size_t instruction;
+ int tmp;
+
+ if (trapped_io_disable)
+ return 0;
+ if (!lookup_tiop(address))
+ return 0;
+
+ WARN_ON(user_mode(regs));
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (void *)(regs->pc),
+ sizeof(instruction))) {
+ set_fs(oldfs);
+ return 0;
+ }
+
+ tmp = handle_unaligned_access(instruction, regs,
+ &trapped_io_access, 1, address);
+ set_fs(oldfs);
+ return tmp == 0;
+}
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c
new file mode 100644
index 00000000..2e8e8b9b
--- /dev/null
+++ b/arch/sh/kernel/iomap.c
@@ -0,0 +1,165 @@
+/*
+ * arch/sh/kernel/iomap.c
+ *
+ * Copyright (C) 2000 Niibe Yutaka
+ * Copyright (C) 2005 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+
+unsigned int ioread8(void __iomem *addr)
+{
+ return readb(addr);
+}
+EXPORT_SYMBOL(ioread8);
+
+unsigned int ioread16(void __iomem *addr)
+{
+ return readw(addr);
+}
+EXPORT_SYMBOL(ioread16);
+
+unsigned int ioread16be(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw(addr));
+}
+EXPORT_SYMBOL(ioread16be);
+
+unsigned int ioread32(void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL(ioread32);
+
+unsigned int ioread32be(void __iomem *addr)
+{
+ return be32_to_cpu(__raw_readl(addr));
+}
+EXPORT_SYMBOL(ioread32be);
+
+void iowrite8(u8 val, void __iomem *addr)
+{
+ writeb(val, addr);
+}
+EXPORT_SYMBOL(iowrite8);
+
+void iowrite16(u16 val, void __iomem *addr)
+{
+ writew(val, addr);
+}
+EXPORT_SYMBOL(iowrite16);
+
+void iowrite16be(u16 val, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(val), addr);
+}
+EXPORT_SYMBOL(iowrite16be);
+
+void iowrite32(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+EXPORT_SYMBOL(iowrite32);
+
+void iowrite32be(u32 val, void __iomem *addr)
+{
+ __raw_writel(cpu_to_be32(val), addr);
+}
+EXPORT_SYMBOL(iowrite32be);
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__raw" accesses, since we don't want to
+ * convert to CPU byte order. We write in "IO byte
+ * order" (we also don't have IO barriers).
+ */
+static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+{
+ while (--count >= 0) {
+ u8 data = __raw_readb(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+{
+ while (--count >= 0) {
+ u16 data = __raw_readw(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+{
+ while (--count >= 0) {
+ u32 data = __raw_readl(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writeb(*src, addr);
+ src++;
+ }
+}
+
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writew(*src, addr);
+ src++;
+ }
+}
+
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writel(*src, addr);
+ src++;
+ }
+}
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insb(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread8_rep);
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insw(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread16_rep);
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insl(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsb(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite8_rep);
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsw(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite16_rep);
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsl(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite32_rep);
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c
new file mode 100644
index 00000000..e3ad6103
--- /dev/null
+++ b/arch/sh/kernel/ioport.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/ioport.c
+ *
+ * Copyright (C) 2000 Niibe Yutaka
+ * Copyright (C) 2005 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+
+const unsigned long sh_io_port_base __read_mostly = -1;
+EXPORT_SYMBOL(sh_io_port_base);
+
+void __iomem *__ioport_map(unsigned long addr, unsigned int size)
+{
+ if (sh_mv.mv_ioport_map)
+ return sh_mv.mv_ioport_map(addr, size);
+
+ return (void __iomem *)(addr + sh_io_port_base);
+}
+EXPORT_SYMBOL(__ioport_map);
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ void __iomem *ret;
+
+ ret = __ioport_map_trapped(port, nr);
+ if (ret)
+ return ret;
+
+ return __ioport_map(port, nr);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+ if (sh_mv.mv_ioport_unmap)
+ sh_mv.mv_ioport_unmap(addr);
+}
+EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
new file mode 100644
index 00000000..a3ee9197
--- /dev/null
+++ b/arch/sh/kernel/irq.c
@@ -0,0 +1,284 @@
+/*
+ * linux/arch/sh/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/ftrace.h>
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include <asm/processor.h>
+#include <asm/machvec.h>
+#include <asm/uaccess.h>
+#include <asm/thread_info.h>
+#include <cpu/mmu_context.h>
+
+atomic_t irq_err_count;
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesn't deserve
+ * a generic callback i think.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ atomic_inc(&irq_err_count);
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+#if defined(CONFIG_PROC_FS)
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ int j;
+
+ seq_printf(p, "%*s: ", prec, "NMI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
+ seq_printf(p, " Non-maskable interrupts\n");
+
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_IRQSTACKS
+/*
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+union irq_ctx {
+ struct thread_info tinfo;
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+};
+
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
+
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+static inline void handle_one_irq(unsigned int irq)
+{
+ union irq_ctx *curctx, *irqctx;
+
+ curctx = (union irq_ctx *)current_thread_info();
+ irqctx = hardirq_ctx[smp_processor_id()];
+
+ /*
+ * this is where we switch to the IRQ stack. However, if we are
+ * already using the IRQ stack (because we interrupted a hardirq
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+ if (curctx != irqctx) {
+ u32 *isp;
+
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+ irqctx->tinfo.task = curctx->tinfo.task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ /*
+ * Copy the softirq bits in preempt_count so that the
+ * softirq checks work in the hardirq context.
+ */
+ irqctx->tinfo.preempt_count =
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
+ __asm__ __volatile__ (
+ "mov %0, r4 \n"
+ "mov r15, r8 \n"
+ "jsr @%1 \n"
+ /* swith to the irq stack */
+ " mov %2, r15 \n"
+ /* restore the stack (ring zero) */
+ "mov r8, r15 \n"
+ : /* no outputs */
+ : "r" (irq), "r" (generic_handle_irq), "r" (isp)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "t", "pr"
+ );
+ } else
+ generic_handle_irq(irq);
+}
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+ union irq_ctx *irqctx;
+
+ if (hardirq_ctx[cpu])
+ return;
+
+ irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ hardirq_ctx[cpu] = irqctx;
+
+ irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = 0;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ softirq_ctx[cpu] = irqctx;
+
+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
+}
+
+void irq_ctx_exit(int cpu)
+{
+ hardirq_ctx[cpu] = NULL;
+}
+
+asmlinkage void do_softirq(void)
+{
+ unsigned long flags;
+ struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+
+ __asm__ __volatile__ (
+ "mov r15, r9 \n"
+ "jsr @%0 \n"
+ /* switch to the softirq stack */
+ " mov %1, r15 \n"
+ /* restore the thread stack */
+ "mov r9, r15 \n"
+ : /* no outputs */
+ : "r" (__do_softirq), "r" (isp)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+
+ /*
+ * Shouldn't happen, we returned above if in_interrupt():
+ */
+ WARN_ON_ONCE(softirq_count());
+ }
+
+ local_irq_restore(flags);
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
+#endif
+
+asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ irq = irq_demux(irq_lookup(irq));
+
+ if (irq != NO_IRQ_IGNORE) {
+ handle_one_irq(irq);
+ irq_finish(irq);
+ }
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
+void __init init_IRQ(void)
+{
+ plat_irq_setup();
+
+ /* Perform the machine specific initialisation */
+ if (sh_mv.mv_init_irq)
+ sh_mv.mv_init_irq();
+
+ intc_finalize();
+
+ irq_ctx_init(smp_processor_id());
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+ nr_irqs = sh_mv.mv_nr_irqs;
+ return NR_IRQS_LEGACY;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+ printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
+ irq, data->node, cpu);
+
+ raw_spin_lock_irq(&desc->lock);
+ chip->irq_set_affinity(data, cpumask_of(cpu), false);
+ raw_spin_unlock_irq(&desc->lock);
+}
+
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int irq, cpu = smp_processor_id();
+
+ for_each_active_irq(irq) {
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(data->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ irq, cpu);
+
+ cpumask_setall(data->affinity);
+ newcpu = cpumask_any_and(data->affinity,
+ cpu_online_mask);
+ }
+
+ route_irq(data, irq, newcpu);
+ }
+ }
+}
+#endif
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
new file mode 100644
index 00000000..e5a755be
--- /dev/null
+++ b/arch/sh/kernel/irq_32.c
@@ -0,0 +1,57 @@
+/*
+ * SHcompact irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+
+void notrace arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __dummy0, __dummy1;
+
+ if (flags == ARCH_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "or #0xf0, %0\n\t"
+ "ldc %0, sr\n\t"
+ : "=&z" (__dummy0)
+ : /* no inputs */
+ : "memory"
+ );
+ } else {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and %1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+ "stc r6_bank, %1\n\t"
+ "or %1, %0\n\t"
+#endif
+ "ldc %0, sr\n\t"
+ : "=&r" (__dummy0), "=r" (__dummy1)
+ : "1" (~ARCH_IRQ_DISABLED)
+ : "memory"
+ );
+ }
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+unsigned long notrace arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and #0xf0, %0\n\t"
+ : "=&z" (flags)
+ : /* no inputs */
+ : "memory"
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
new file mode 100644
index 00000000..8fc05b99
--- /dev/null
+++ b/arch/sh/kernel/irq_64.c
@@ -0,0 +1,51 @@
+/*
+ * SHmedia irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <cpu/registers.h>
+
+void notrace arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long long __dummy;
+
+ if (flags == ARCH_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (ARCH_IRQ_DISABLED)
+ );
+ } else {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (~ARCH_IRQ_DISABLED)
+ );
+ }
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+unsigned long notrace arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0"
+ : "=&r" (flags)
+ : "r" (ARCH_IRQ_DISABLED)
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/kdebugfs.c b/arch/sh/kernel/kdebugfs.c
new file mode 100644
index 00000000..e11c30bb
--- /dev/null
+++ b/arch/sh/kernel/kdebugfs.c
@@ -0,0 +1,16 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int __init arch_kdebugfs_init(void)
+{
+ arch_debugfs_dir = debugfs_create_dir("sh", NULL);
+ if (!arch_debugfs_dir)
+ return -ENOMEM;
+
+ return 0;
+}
+arch_initcall(arch_kdebugfs_init);
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
new file mode 100644
index 00000000..b117781b
--- /dev/null
+++ b/arch/sh/kernel/kgdb.c
@@ -0,0 +1,330 @@
+/*
+ * SuperH KGDB support
+ *
+ * Copyright (C) 2008 - 2009 Paul Mundt
+ *
+ * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+/* Macros for single step instruction identification */
+#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
+#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
+#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
+ (((op) & 0x7f ) << 1))
+#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
+#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
+#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
+#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
+ (((op) & 0x7ff) << 1))
+#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
+#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
+#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
+#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
+ (((op) & 0x7ff) << 1))
+#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
+#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
+#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
+#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_RTS(op) ((op) == 0xb)
+#define OPCODE_RTE(op) ((op) == 0x2b)
+
+#define SR_T_BIT_MASK 0x1
+#define STEP_OPCODE 0xc33d
+
+/* Calculate the new address for after a step */
+static short *get_step_address(struct pt_regs *linux_regs)
+{
+ insn_size_t op = __raw_readw(linux_regs->pc);
+ long addr;
+
+ /* BT */
+ if (OPCODE_BT(op)) {
+ if (linux_regs->sr & SR_T_BIT_MASK)
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 2;
+ }
+
+ /* BTS */
+ else if (OPCODE_BTS(op)) {
+ if (linux_regs->sr & SR_T_BIT_MASK)
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 4; /* Not in delay slot */
+ }
+
+ /* BF */
+ else if (OPCODE_BF(op)) {
+ if (!(linux_regs->sr & SR_T_BIT_MASK))
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 2;
+ }
+
+ /* BFS */
+ else if (OPCODE_BFS(op)) {
+ if (!(linux_regs->sr & SR_T_BIT_MASK))
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 4; /* Not in delay slot */
+ }
+
+ /* BRA */
+ else if (OPCODE_BRA(op))
+ addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
+
+ /* BRAF */
+ else if (OPCODE_BRAF(op))
+ addr = linux_regs->pc + 4
+ + linux_regs->regs[OPCODE_BRAF_REG(op)];
+
+ /* BSR */
+ else if (OPCODE_BSR(op))
+ addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
+
+ /* BSRF */
+ else if (OPCODE_BSRF(op))
+ addr = linux_regs->pc + 4
+ + linux_regs->regs[OPCODE_BSRF_REG(op)];
+
+ /* JMP */
+ else if (OPCODE_JMP(op))
+ addr = linux_regs->regs[OPCODE_JMP_REG(op)];
+
+ /* JSR */
+ else if (OPCODE_JSR(op))
+ addr = linux_regs->regs[OPCODE_JSR_REG(op)];
+
+ /* RTS */
+ else if (OPCODE_RTS(op))
+ addr = linux_regs->pr;
+
+ /* RTE */
+ else if (OPCODE_RTE(op))
+ addr = linux_regs->regs[15];
+
+ /* Other */
+ else
+ addr = linux_regs->pc + instruction_size(op);
+
+ flush_icache_range(addr, addr + instruction_size(op));
+ return (short *)addr;
+}
+
+/*
+ * Replace the instruction immediately after the current instruction
+ * (i.e. next in the expected flow of control) with a trap instruction,
+ * so that returning will cause only a single instruction to be executed.
+ * Note that this model is slightly broken for instructions with delay
+ * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
+ * instruction in the delay slot will be executed.
+ */
+
+static unsigned long stepped_address;
+static insn_size_t stepped_opcode;
+
+static void do_single_step(struct pt_regs *linux_regs)
+{
+ /* Determine where the target instruction will send us to */
+ unsigned short *addr = get_step_address(linux_regs);
+
+ stepped_address = (int)addr;
+
+ /* Replace it */
+ stepped_opcode = __raw_readw((long)addr);
+ *addr = STEP_OPCODE;
+
+ /* Flush and return */
+ flush_icache_range((long)addr, (long)addr +
+ instruction_size(stepped_opcode));
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *linux_regs)
+{
+ /* If we have stepped, put back the old instruction */
+ /* Use stepped_address in case we stopped elsewhere */
+ if (stepped_opcode != 0) {
+ __raw_writew(stepped_opcode, stepped_address);
+ flush_icache_range(stepped_address, stepped_address + 2);
+ }
+
+ stepped_opcode = 0;
+}
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ gdb_regs[GDB_R0 + i] = regs->regs[i];
+
+ gdb_regs[GDB_PC] = regs->pc;
+ gdb_regs[GDB_PR] = regs->pr;
+ gdb_regs[GDB_SR] = regs->sr;
+ gdb_regs[GDB_GBR] = regs->gbr;
+ gdb_regs[GDB_MACH] = regs->mach;
+ gdb_regs[GDB_MACL] = regs->macl;
+
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
+
+ regs->pc = gdb_regs[GDB_PC];
+ regs->pr = gdb_regs[GDB_PR];
+ regs->sr = gdb_regs[GDB_SR];
+ regs->gbr = gdb_regs[GDB_GBR];
+ regs->mach = gdb_regs[GDB_MACH];
+ regs->macl = gdb_regs[GDB_MACL];
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ gdb_regs[GDB_R15] = p->thread.sp;
+ gdb_regs[GDB_PC] = p->thread.pc;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *linux_regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ /* Undo any stepping we may have done */
+ undo_single_step(linux_regs);
+
+ switch (remcomInBuffer[0]) {
+ case 'c':
+ case 's':
+ /* try to read optional parameter, pc unchanged if no parm */
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ linux_regs->pc = addr;
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(linux_regs);
+ kgdb_single_step = 1;
+
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+ return 0;
+ }
+
+ /* this means that we do not want to exit from the handler: */
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ if (exception == 60)
+ return instruction_pointer(regs) - 2;
+ return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
+/*
+ * The primary entry points for the kgdb debug trap table entries.
+ */
+BUILD_TRAP_HANDLER(singlestep)
+{
+ unsigned long flags;
+ TRAP_HANDLER_DECL;
+
+ local_irq_save(flags);
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ local_irq_restore(flags);
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ int ret;
+
+ switch (cmd) {
+ case DIE_BREAKPOINT:
+ /*
+ * This means a user thread is single stepping
+ * a system call which should be ignored
+ */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ return NOTIFY_DONE;
+
+ ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
+ args->err, args->regs);
+ if (ret)
+ return NOTIFY_DONE;
+
+ break;
+ }
+
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+
+ /*
+ * Lowest-prio notifier priority, we want to be notified last:
+ */
+ .priority = -INT_MAX,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trapa #0x3c */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ .gdb_bpt_instr = { 0x3c, 0xc3 },
+#else
+ .gdb_bpt_instr = { 0xc3, 0x3c },
+#endif
+};
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
new file mode 100644
index 00000000..1208b09e
--- /dev/null
+++ b/arch/sh/kernel/kprobes.c
@@ -0,0 +1,585 @@
+/*
+ * Kernel probes (kprobes) for SuperH
+ *
+ * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
+ * Copyright (C) 2006 Lineo Solutions, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
+
+#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
+#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
+#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
+#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
+#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
+#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
+
+#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
+#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
+
+#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
+#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
+
+#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
+#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
+
+ if (OPCODE_RTE(opcode))
+ return -EFAULT; /* Bad breakpoint */
+
+ p->opcode = opcode;
+
+ return 0;
+}
+
+void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (*p->addr == BREAKPOINT_INSTRUCTION)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * If an illegal slot instruction exception occurs for an address
+ * containing a kprobe, remove the probe.
+ *
+ * Returns 0 if the exception was handled successfully, 1 otherwise.
+ */
+int __kprobes kprobe_handle_illslot(unsigned long pc)
+{
+ struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
+
+ if (p != NULL) {
+ printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
+ (unsigned int)pc + 2);
+ unregister_kprobe(p);
+ return 0;
+ }
+
+ return 1;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ struct kprobe *saved = &__get_cpu_var(saved_next_opcode);
+
+ if (saved->addr) {
+ arch_disarm_kprobe(p);
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
+
+ saved = &__get_cpu_var(saved_next_opcode2);
+ if (saved->addr) {
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
+ }
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+/*
+ * Singlestep is implemented by disabling the current kprobe and setting one
+ * on the next instruction, following branches. Two probes are set if the
+ * branch is conditional.
+ */
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ __get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc;
+
+ if (p != NULL) {
+ struct kprobe *op1, *op2;
+
+ arch_disarm_kprobe(p);
+
+ op1 = &__get_cpu_var(saved_next_opcode);
+ op2 = &__get_cpu_var(saved_next_opcode2);
+
+ if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
+ unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+ op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
+ } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x0FFF);
+ op1->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+
+ } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
+ unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+ op1->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 +
+ regs->regs[reg_nr]);
+
+ } else if (OPCODE_RTS(p->opcode)) {
+ op1->addr = (kprobe_opcode_t *) regs->pr;
+
+ } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x00FF);
+ /* case 1 */
+ op1->addr = p->addr + 1;
+ /* case 2 */
+ op2->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
+
+ } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x00FF);
+ /* case 1 */
+ op1->addr = p->addr + 2;
+ /* case 2 */
+ op2->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
+
+ } else {
+ op1->addr = p->addr + 1;
+ }
+
+ op1->opcode = *(op1->addr);
+ arch_arm_kprobe(op1);
+ }
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->pr;
+
+ /* Replace the return addr with trampoline addr */
+ regs->pr = (unsigned long)kretprobe_trampoline;
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe_ctlblk *kcb;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ addr = (kprobe_opcode_t *) (regs->pc);
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+ goto no_kprobe;
+ }
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ /* Not one of ours: let kernel handle it */
+ if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile (".globl kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n");
+}
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler) {
+ __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ ri->rp->handler(ri, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ regs->pc = orig_ret_address;
+ kretprobe_hash_unlock(current, &flags);
+
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ return orig_ret_address;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe *p = NULL;
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ p = &__get_cpu_var(saved_next_opcode);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
+
+ addr = __get_cpu_var(saved_current_opcode).addr;
+ __get_cpu_var(saved_current_opcode).addr = NULL;
+
+ p = get_kprobe(addr);
+ arch_arm_kprobe(p);
+
+ p = &__get_cpu_var(saved_next_opcode2);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
+ }
+ }
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ const struct exception_table_entry *entry;
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe, point the pc back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->pc = (unsigned long)cur->addr;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ regs->pc = entry->fixup;
+ return 1;
+ }
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct kprobe *p = NULL;
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ addr = (kprobe_opcode_t *) (args->regs->pc);
+ if (val == DIE_TRAP) {
+ if (!kprobe_running()) {
+ if (kprobe_handler(args->regs)) {
+ ret = NOTIFY_STOP;
+ } else {
+ /* Not a kprobe trap */
+ ret = NOTIFY_DONE;
+ }
+ } else {
+ p = get_kprobe(addr);
+ if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
+ (kcb->kprobe_status == KPROBE_REENTER)) {
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ } else {
+ if (kprobe_handler(args->regs)) {
+ ret = NOTIFY_STOP;
+ } else {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler &&
+ p->break_handler(p, args->regs))
+ ret = NOTIFY_STOP;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_r15 = regs->regs[15];
+ addr = kcb->jprobe_saved_r15;
+
+ /*
+ * TBD: As Linus pointed out, gcc assumes that the callee
+ * owns the argument space and could overwrite it, e.g.
+ * tailcall optimization. So, to be absolutely safe
+ * we also save and restore enough stack bytes to cover
+ * the argument area.
+ */
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+ MIN_STACK_SIZE(addr));
+
+ regs->pc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long stack_addr = kcb->jprobe_saved_r15;
+ u8 *addr = (u8 *)regs->pc;
+
+ if ((addr >= (u8 *)jprobe_return) &&
+ (addr <= (u8 *)jprobe_return_end)) {
+ *regs = kcb->jprobe_saved_regs;
+
+ memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
+
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ preempt_enable_no_resched();
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
new file mode 100644
index 00000000..8bfc6dfa
--- /dev/null
+++ b/arch/sh/kernel/localtimer.c
@@ -0,0 +1,66 @@
+/*
+ * Dummy local timer
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * cloned from:
+ *
+ * linux/arch/arm/mach-realview/localtimer.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/hardirq.h>
+#include <linux/irq.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
+
+/*
+ * Used on SMP for either the local timer or SMP_MSG_TIMER
+ */
+void local_timer_interrupt(void)
+{
+ struct clock_event_device *clk = &__get_cpu_var(local_clockevent);
+
+ irq_enter();
+ clk->event_handler(clk);
+ irq_exit();
+}
+
+static void dummy_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+}
+
+void local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
+
+ clk->name = "dummy_timer";
+ clk->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ clk->rating = 400;
+ clk->mult = 1;
+ clk->set_mode = dummy_timer_set_mode;
+ clk->broadcast = smp_timer_broadcast;
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+}
+
+void local_timer_stop(unsigned int cpu)
+{
+}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
new file mode 100644
index 00000000..9fea49f6
--- /dev/null
+++ b/arch/sh/kernel/machine_kexec.c
@@ -0,0 +1,207 @@
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ *
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ * LANDISK/sh4 supported by kogiidena
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/numa.h>
+#include <linux/ftrace.h>
+#include <linux/suspend.h>
+#include <linux/memblock.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/sh_bios.h>
+#include <asm/reboot.h>
+
+typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
+ unsigned long reboot_code_buffer,
+ unsigned long start_address);
+
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
+extern void *vbr_base;
+
+void native_machine_crash_shutdown(struct pt_regs *regs)
+{
+ /* Nothing to do for UP, but definitely broken for SMP.. */
+}
+
+/*
+ * Do what every setup is needed on image and the
+ * reboot code buffer to allow us to avoid allocations
+ * later.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+ return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+static void kexec_info(struct kimage *image)
+{
+ int i;
+ printk("kexec information\n");
+ for (i = 0; i < image->nr_segments; i++) {
+ printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
+ i,
+ (unsigned int)image->segment[i].mem,
+ (unsigned int)image->segment[i].mem +
+ image->segment[i].memsz,
+ (unsigned int)image->segment[i].memsz);
+ }
+ printk(" start : 0x%08x\n\n", (unsigned int)image->start);
+}
+
+/*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+void machine_kexec(struct kimage *image)
+{
+ unsigned long page_list;
+ unsigned long reboot_code_buffer;
+ relocate_new_kernel_t rnk;
+ unsigned long entry;
+ unsigned long *ptr;
+ int save_ftrace_enabled;
+
+ /*
+ * Nicked from the mips version of machine_kexec():
+ * The generic kexec code builds a page list with physical
+ * addresses. Use phys_to_virt() to convert them to virtual.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+#ifdef CONFIG_KEXEC_JUMP
+ if (image->preserve_context)
+ save_processor_state();
+#endif
+
+ save_ftrace_enabled = __ftrace_enabled_save();
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+ page_list = image->head;
+
+ /* we need both effective and real address here */
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+ /* copy our kernel relocation code to the control code page */
+ memcpy((void *)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ kexec_info(image);
+ flush_cache_all();
+
+ sh_bios_vbr_reload();
+
+ /* now call it */
+ rnk = (relocate_new_kernel_t) reboot_code_buffer;
+ (*rnk)(page_list, reboot_code_buffer,
+ (unsigned long)phys_to_virt(image->start));
+
+#ifdef CONFIG_KEXEC_JUMP
+ asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
+
+ if (image->preserve_context)
+ restore_processor_state();
+
+ /* Convert page list back to physical addresses, what a mess. */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (*ptr & IND_INDIRECTION) ?
+ phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = virt_to_phys(*ptr);
+ }
+#endif
+
+ __ftrace_enabled_restore(save_ftrace_enabled);
+}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(node_data);
+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+#ifdef CONFIG_X2TLB
+ VMCOREINFO_CONFIG(X2TLB);
+#endif
+}
+
+void __init reserve_crashkernel(void)
+{
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &crash_size, &crash_base);
+ if (ret == 0 && crash_size > 0) {
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ }
+
+ if (crashk_res.end == crashk_res.start)
+ goto disable;
+
+ crash_size = PAGE_ALIGN(resource_size(&crashk_res));
+ if (!crashk_res.start) {
+ unsigned long max = memblock_end_of_DRAM() - memory_limit;
+ crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
+ if (!crashk_res.start) {
+ pr_err("crashkernel allocation failed\n");
+ goto disable;
+ }
+ } else {
+ ret = memblock_reserve(crashk_res.start, crash_size);
+ if (unlikely(ret < 0)) {
+ pr_err("crashkernel reservation failed - "
+ "memory is in use\n");
+ goto disable;
+ }
+ }
+
+ crashk_res.end = crashk_res.start + crash_size - 1;
+
+ /*
+ * Crash kernel trumps memory limit
+ */
+ if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
+ memory_limit = 0;
+ pr_info("Disabled memory limit for crashkernel\n");
+ }
+
+ pr_info("Reserving %ldMB of memory at 0x%08lx "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crashk_res.start),
+ (unsigned long)(memblock_phys_mem_size() >> 20));
+
+ return;
+
+disable:
+ crashk_res.start = crashk_res.end = 0;
+}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
new file mode 100644
index 00000000..3d722e49
--- /dev/null
+++ b/arch/sh/kernel/machvec.c
@@ -0,0 +1,127 @@
+/*
+ * arch/sh/kernel/machvec.c
+ *
+ * The SuperH machine vector setup handlers, yanked from setup.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2002 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/machvec.h>
+#include <asm/sections.h>
+#include <asm/addrspace.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define MV_NAME_SIZE 32
+
+#define for_each_mv(mv) \
+ for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
+ (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
+ (mv)++)
+
+static struct sh_machine_vector * __init get_mv_byname(const char *name)
+{
+ struct sh_machine_vector *mv;
+
+ for_each_mv(mv)
+ if (strcasecmp(name, mv->mv_name) == 0)
+ return mv;
+
+ return NULL;
+}
+
+static unsigned int __initdata machvec_selected;
+
+static int __init early_parse_mv(char *from)
+{
+ char mv_name[MV_NAME_SIZE] = "";
+ char *mv_end;
+ char *mv_comma;
+ int mv_len;
+ struct sh_machine_vector *mvp;
+
+ mv_end = strchr(from, ' ');
+ if (mv_end == NULL)
+ mv_end = from + strlen(from);
+
+ mv_comma = strchr(from, ',');
+ mv_len = mv_end - from;
+ if (mv_len > (MV_NAME_SIZE-1))
+ mv_len = MV_NAME_SIZE-1;
+ memcpy(mv_name, from, mv_len);
+ mv_name[mv_len] = '\0';
+ from = mv_end;
+
+ machvec_selected = 1;
+
+ /* Boot with the generic vector */
+ if (strcmp(mv_name, "generic") == 0)
+ return 0;
+
+ mvp = get_mv_byname(mv_name);
+ if (unlikely(!mvp)) {
+ printk("Available vectors:\n\n\t'%s', ", sh_mv.mv_name);
+ for_each_mv(mvp)
+ printk("'%s', ", mvp->mv_name);
+ printk("\n\n");
+ panic("Failed to select machvec '%s' -- halting.\n",
+ mv_name);
+ } else
+ sh_mv = *mvp;
+
+ return 0;
+}
+early_param("sh_mv", early_parse_mv);
+
+void __init sh_mv_setup(void)
+{
+ /*
+ * Only overload the machvec if one hasn't been selected on
+ * the command line with sh_mv=
+ */
+ if (!machvec_selected) {
+ unsigned long machvec_size;
+
+ machvec_size = ((unsigned long)&__machvec_end -
+ (unsigned long)&__machvec_start);
+
+ /*
+ * Sanity check for machvec section alignment. Ensure
+ * __initmv hasn't been misused.
+ */
+ if (machvec_size % sizeof(struct sh_machine_vector))
+ panic("machvec misaligned, invalid __initmv use?");
+
+ /*
+ * If the machvec hasn't been preselected, use the first
+ * vector (usually the only one) from .machvec.init.
+ */
+ if (machvec_size >= sizeof(struct sh_machine_vector))
+ sh_mv = *(struct sh_machine_vector *)&__machvec_start;
+ }
+
+ printk(KERN_NOTICE "Booting machvec: %s\n", get_system_type());
+
+ /*
+ * Manually walk the vec, fill in anything that the board hasn't yet
+ * by hand, wrapping to the generic implementation.
+ */
+#define mv_set(elem) do { \
+ if (!sh_mv.mv_##elem) \
+ sh_mv.mv_##elem = generic_##elem; \
+} while (0)
+
+ mv_set(irq_demux);
+ mv_set(mode_pins);
+ mv_set(mem_init);
+
+ if (!sh_mv.mv_nr_irqs)
+ sh_mv.mv_nr_irqs = NR_IRQS;
+}
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
new file mode 100644
index 00000000..1b525ded
--- /dev/null
+++ b/arch/sh/kernel/module.c
@@ -0,0 +1,126 @@
+/* Kernel module help for SH.
+
+ SHcompact version by Kaz Kojima and Paul Mundt.
+
+ SHmedia bits:
+
+ Copyright 2004 SuperH (UK) Ltd
+ Author: Richard Curnow
+
+ Based on the sh version, and on code from the sh64-specific parts of
+ modutils, originally written by Richard Curnow and Ben Gaster.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <asm/dwarf.h>
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ Elf32_Addr relocation;
+ uint32_t *location;
+ uint32_t value;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ relocation = sym->st_value + rel[i].r_addend;
+
+#ifdef CONFIG_SUPERH64
+ /* For text addresses, bit2 of the st_other field indicates
+ * whether the symbol is SHmedia (1) or SHcompact (0). If
+ * SHmedia, the LSB of the symbol needs to be asserted
+ * for the CPU to be in SHmedia mode when it starts executing
+ * the branch target. */
+ relocation |= !!(sym->st_other & 4);
+#endif
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_SH_NONE:
+ break;
+ case R_SH_DIR32:
+ value = get_unaligned(location);
+ value += relocation;
+ put_unaligned(value, location);
+ break;
+ case R_SH_REL32:
+ relocation = (relocation - (Elf32_Addr) location);
+ value = get_unaligned(location);
+ value += relocation;
+ put_unaligned(value, location);
+ break;
+ case R_SH_IMM_LOW16:
+ *location = (*location & ~0x3fffc00) |
+ ((relocation & 0xffff) << 10);
+ break;
+ case R_SH_IMM_MEDLOW16:
+ *location = (*location & ~0x3fffc00) |
+ (((relocation >> 16) & 0xffff) << 10);
+ break;
+ case R_SH_IMM_LOW16_PCREL:
+ relocation -= (Elf32_Addr) location;
+ *location = (*location & ~0x3fffc00) |
+ ((relocation & 0xffff) << 10);
+ break;
+ case R_SH_IMM_MEDLOW16_PCREL:
+ relocation -= (Elf32_Addr) location;
+ *location = (*location & ~0x3fffc00) |
+ (((relocation >> 16) & 0xffff) << 10);
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ int ret = 0;
+
+ ret |= module_dwarf_finalize(hdr, sechdrs, me);
+
+ return ret;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ module_dwarf_cleanup(mod);
+}
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
new file mode 100644
index 00000000..ff0abbd1
--- /dev/null
+++ b/arch/sh/kernel/nmi_debug.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+
+enum nmi_action {
+ NMI_SHOW_STATE = 1 << 0,
+ NMI_SHOW_REGS = 1 << 1,
+ NMI_DIE = 1 << 2,
+ NMI_DEBOUNCE = 1 << 3,
+};
+
+static unsigned long nmi_actions;
+
+static int nmi_debug_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (likely(val != DIE_NMI))
+ return NOTIFY_DONE;
+
+ if (nmi_actions & NMI_SHOW_STATE)
+ show_state();
+ if (nmi_actions & NMI_SHOW_REGS)
+ show_regs(args->regs);
+ if (nmi_actions & NMI_DEBOUNCE)
+ mdelay(10);
+ if (nmi_actions & NMI_DIE)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nmi_debug_nb = {
+ .notifier_call = nmi_debug_notify,
+};
+
+static int __init nmi_debug_setup(char *str)
+{
+ char *p, *sep;
+
+ register_die_notifier(&nmi_debug_nb);
+
+ if (*str != '=')
+ return 0;
+
+ for (p = str + 1; *p; p = sep + 1) {
+ sep = strchr(p, ',');
+ if (sep)
+ *sep = 0;
+ if (strcmp(p, "state") == 0)
+ nmi_actions |= NMI_SHOW_STATE;
+ else if (strcmp(p, "regs") == 0)
+ nmi_actions |= NMI_SHOW_REGS;
+ else if (strcmp(p, "debounce") == 0)
+ nmi_actions |= NMI_DEBOUNCE;
+ else if (strcmp(p, "die") == 0)
+ nmi_actions |= NMI_DIE;
+ else
+ printk(KERN_WARNING "NMI: Unrecognized action `%s'\n",
+ p);
+ if (!sep)
+ break;
+ }
+
+ return 0;
+}
+__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
new file mode 100644
index 00000000..cc80b614
--- /dev/null
+++ b/arch/sh/kernel/perf_callchain.c
@@ -0,0 +1,41 @@
+/*
+ * Performance event callchain support - SuperH architecture code
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <asm/unwinder.h>
+#include <asm/ptrace.h>
+
+static int callchain_stack(void *data, char *name)
+{
+ return 0;
+}
+
+static void callchain_address(void *data, unsigned long addr, int reliable)
+{
+ struct perf_callchain_entry *entry = data;
+
+ if (reliable)
+ perf_callchain_store(entry, addr);
+}
+
+static const struct stacktrace_ops callchain_ops = {
+ .stack = callchain_stack,
+ .address = callchain_address,
+};
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ perf_callchain_store(entry, regs->pc);
+
+ unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
+}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
new file mode 100644
index 00000000..068b8a27
--- /dev/null
+++ b/arch/sh/kernel/perf_event.c
@@ -0,0 +1,400 @@
+/*
+ * Performance event support framework for SuperH hardware counters.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Heavily based on the x86 and PowerPC implementations.
+ *
+ * x86:
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ *
+ * ppc:
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <linux/export.h>
+#include <asm/processor.h>
+
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+};
+
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+static struct sh_pmu *sh_pmu __read_mostly;
+
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Stub these out for now, do something more profound later.
+ */
+int reserve_pmc_hardware(void)
+{
+ return 0;
+}
+
+void release_pmc_hardware(void)
+{
+}
+
+static inline int sh_pmu_initialized(void)
+{
+ return !!sh_pmu;
+}
+
+const char *perf_pmu_name(void)
+{
+ if (!sh_pmu)
+ return NULL;
+
+ return sh_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ if (!sh_pmu)
+ return 0;
+
+ return sh_pmu->num_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+/*
+ * Release the PMU if this is the last perf_event.
+ */
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (!atomic_add_unless(&num_events, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_events) == 0)
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
+static int hw_perf_cache_event(int config, int *evp)
+{
+ unsigned long type, op, result;
+ int ev;
+
+ if (!sh_pmu->cache_events)
+ return -EINVAL;
+
+ /* unpack config */
+ type = config & 0xff;
+ op = (config >> 8) & 0xff;
+ result = (config >> 16) & 0xff;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ev = (*sh_pmu->cache_events)[type][op][result];
+ if (ev == 0)
+ return -EOPNOTSUPP;
+ if (ev == -1)
+ return -EINVAL;
+ *evp = ev;
+ return 0;
+}
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ int config = -1;
+ int err;
+
+ if (!sh_pmu_initialized())
+ return -ENODEV;
+
+ /*
+ * All of the on-chip counters are "limited", in that they have
+ * no interrupts, and are therefore unable to do sampling without
+ * further work and timer assistance.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * See if we need to reserve the counter.
+ *
+ * If no events are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * reserve_pmc_hardware or release_pmc_hardware.
+ */
+ err = 0;
+ if (!atomic_inc_not_zero(&num_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_events) == 0 &&
+ reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ event->destroy = hw_perf_event_destroy;
+
+ switch (attr->type) {
+ case PERF_TYPE_RAW:
+ config = attr->config & sh_pmu->raw_event_mask;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ err = hw_perf_cache_event(attr->config, &config);
+ if (err)
+ return err;
+ break;
+ case PERF_TYPE_HARDWARE:
+ if (attr->config >= sh_pmu->max_events)
+ return -EINVAL;
+
+ config = sh_pmu->event_map(attr->config);
+ break;
+ }
+
+ if (config == -1)
+ return -EINVAL;
+
+ hwc->config |= config;
+
+ return 0;
+}
+
+static void sh_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
+ int shift = 0;
+
+ /*
+ * Depending on the counter configuration, they may or may not
+ * be chained, in which case the previous counter value can be
+ * updated underneath us if the lower-half overflows.
+ *
+ * Our tactic to handle this is to first atomically read and
+ * exchange a new raw count - then add that new-prev delta
+ * count to the generic counter atomically.
+ *
+ * As there is no interrupt associated with the overflow events,
+ * this is the simplest approach for maintaining consistency.
+ */
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = sh_pmu->read(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (counter-)time and add that to the generic counter.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+static void sh_pmu_stop(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ sh_pmu->disable(hwc, idx);
+ cpuc->events[idx] = NULL;
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+ sh_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ cpuc->events[idx] = event;
+ event->hw.state = 0;
+ sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ sh_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int sh_pmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ int ret = -EAGAIN;
+
+ perf_pmu_disable(event->pmu);
+
+ if (__test_and_set_bit(idx, cpuc->used_mask)) {
+ idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
+ if (idx == sh_pmu->num_events)
+ goto out;
+
+ __set_bit(idx, cpuc->used_mask);
+ hwc->idx = idx;
+ }
+
+ sh_pmu->disable(hwc, idx);
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ sh_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ ret = 0;
+out:
+ perf_pmu_enable(event->pmu);
+ return ret;
+}
+
+static void sh_pmu_read(struct perf_event *event)
+{
+ sh_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int sh_pmu_event_init(struct perf_event *event)
+{
+ int err;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_HARDWARE:
+ err = __hw_perf_event_init(event);
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (unlikely(err)) {
+ if (event->destroy)
+ event->destroy(event);
+ }
+
+ return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->disable_all();
+}
+
+static struct pmu pmu = {
+ .pmu_enable = sh_pmu_enable,
+ .pmu_disable = sh_pmu_disable,
+ .event_init = sh_pmu_event_init,
+ .add = sh_pmu_add,
+ .del = sh_pmu_del,
+ .start = sh_pmu_start,
+ .stop = sh_pmu_stop,
+ .read = sh_pmu_read,
+};
+
+static void sh_pmu_setup(int cpu)
+{
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+ memset(cpuhw, 0, sizeof(struct cpu_hw_events));
+}
+
+static int __cpuinit
+sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ sh_pmu_setup(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
+{
+ if (sh_pmu)
+ return -EBUSY;
+ sh_pmu = _pmu;
+
+ pr_info("Performance Events: %s support registered\n", _pmu->name);
+
+ WARN_ON(_pmu->num_events > MAX_HWEVENTS);
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ perf_cpu_notifier(sh_pmu_notifier);
+ return 0;
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 00000000..325f98b1
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,103 @@
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+struct kmem_cache *task_xstate_cachep = NULL;
+unsigned int xstate_size;
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ *dst = *src;
+
+ if (src->thread.xstate) {
+ dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!dst->thread.xstate)
+ return -ENOMEM;
+ memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ }
+
+ return 0;
+}
+
+void free_thread_xstate(struct task_struct *tsk)
+{
+ if (tsk->thread.xstate) {
+ kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
+ tsk->thread.xstate = NULL;
+ }
+}
+
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
+{
+ struct thread_info *ti;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+ gfp_t mask = GFP_KERNEL;
+#endif
+
+ ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ free_thread_xstate(ti->task);
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, SLAB_PANIC, NULL);
+}
+#else
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
+{
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+ gfp_t mask = GFP_KERNEL;
+#endif
+ struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
+
+ return page ? page_address(page) : NULL;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
+void arch_task_cache_init(void)
+{
+ if (!xstate_size)
+ return;
+
+ task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+ SLAB_PANIC | SLAB_NOTRACK, NULL);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+# define HAVE_SOFTFP 1
+#else
+# define HAVE_SOFTFP 0
+#endif
+
+void __cpuinit init_thread_xstate(void)
+{
+ if (boot_cpu_data.flags & CPU_HAS_FPU)
+ xstate_size = sizeof(struct sh_fpu_hard_struct);
+ else if (HAVE_SOFTFP)
+ xstate_size = sizeof(struct sh_fpu_soft_struct);
+ else
+ xstate_size = 0;
+}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
new file mode 100644
index 00000000..94273aaf
--- /dev/null
+++ b/arch/sh/kernel/process_32.c
@@ -0,0 +1,338 @@
+/*
+ * arch/sh/kernel/process.c
+ *
+ * This file handles the architecture-dependent parts of process handling..
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
+ * Copyright (C) 2002 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/elfcore.h>
+#include <linux/kallsyms.h>
+#include <linux/fs.h>
+#include <linux/ftrace.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/prefetch.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/fpu.h>
+#include <asm/syscalls.h>
+#include <asm/switch_to.h>
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm);
+ printk("CPU : %d \t\t%s (%s %.*s)\n\n",
+ smp_processor_id(), print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+ print_symbol("PC is at %s\n", instruction_pointer(regs));
+ print_symbol("PR is at %s\n", regs->pr);
+
+ printk("PC : %08lx SP : %08lx SR : %08lx ",
+ regs->pc, regs->regs[15], regs->sr);
+#ifdef CONFIG_MMU
+ printk("TEA : %08x\n", __raw_readl(MMU_TEA));
+#else
+ printk("\n");
+#endif
+
+ printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
+ regs->regs[0],regs->regs[1],
+ regs->regs[2],regs->regs[3]);
+ printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
+ regs->regs[4],regs->regs[5],
+ regs->regs[6],regs->regs[7]);
+ printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
+ regs->regs[8],regs->regs[9],
+ regs->regs[10],regs->regs[11]);
+ printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
+ regs->regs[12],regs->regs[13],
+ regs->regs[14]);
+ printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
+ regs->mach, regs->macl, regs->gbr, regs->pr);
+
+ show_trace(NULL, (unsigned long *)regs->regs[15], regs);
+ show_code(regs);
+}
+
+/*
+ * Create a kernel thread
+ */
+__noreturn void kernel_thread_helper(void *arg, int (*fn)(void *))
+{
+ do_exit(fn(arg));
+}
+
+/* Don't use this in BL=1(cli). Or else, CPU resets! */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+ int pid;
+
+ memset(&regs, 0, sizeof(regs));
+ regs.regs[4] = (unsigned long)arg;
+ regs.regs[5] = (unsigned long)fn;
+
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.sr = SR_MD;
+#if defined(CONFIG_SH_FPU)
+ regs.sr |= SR_FD;
+#endif
+
+ /* Ok, create the new process.. */
+ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+ &regs, 0, NULL, NULL);
+
+ return pid;
+}
+EXPORT_SYMBOL(kernel_thread);
+
+void start_thread(struct pt_regs *regs, unsigned long new_pc,
+ unsigned long new_sp)
+{
+ regs->pr = 0;
+ regs->sr = SR_FD;
+ regs->pc = new_pc;
+ regs->regs[15] = new_sp;
+
+ free_thread_xstate(current);
+}
+EXPORT_SYMBOL(start_thread);
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ flush_ptrace_hw_breakpoint(tsk);
+
+#if defined(CONFIG_SH_FPU)
+ /* Forget lazy FPU state */
+ clear_fpu(tsk, task_pt_regs(tsk));
+ clear_used_math();
+#endif
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ int fpvalid = 0;
+
+#if defined(CONFIG_SH_FPU)
+ struct task_struct *tsk = current;
+
+ fpvalid = !!tsk_used_math(tsk);
+ if (fpvalid)
+ fpvalid = !fpregs_get(tsk, NULL, 0,
+ sizeof(struct user_fpu_struct),
+ fpu, NULL);
+#endif
+
+ return fpvalid;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+}
+
+asmlinkage void ret_from_fork(void);
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct thread_info *ti = task_thread_info(p);
+ struct pt_regs *childregs;
+
+#if defined(CONFIG_SH_DSP)
+ struct task_struct *tsk = current;
+
+ if (is_dsp_enabled(tsk)) {
+ /* We can use the __save_dsp or just copy the struct:
+ * __save_dsp(p);
+ * p->thread.dsp_status.status |= SR_DSP
+ */
+ p->thread.dsp_status = tsk->thread.dsp_status;
+ }
+#endif
+
+ childregs = task_pt_regs(p);
+ *childregs = *regs;
+
+ if (user_mode(regs)) {
+ childregs->regs[15] = usp;
+ ti->addr_limit = USER_DS;
+ } else {
+ childregs->regs[15] = (unsigned long)childregs;
+ ti->addr_limit = KERNEL_DS;
+ ti->status &= ~TS_USEDFPU;
+ p->fpu_counter = 0;
+ }
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->gbr = childregs->regs[0];
+
+ childregs->regs[0] = 0; /* Set return value for child */
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.pc = (unsigned long) ret_from_fork;
+
+ memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+
+ return 0;
+}
+
+/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ */
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev, struct task_struct *next)
+{
+ struct thread_struct *next_t = &next->thread;
+
+ unlazy_fpu(prev, task_pt_regs(prev));
+
+ /* we're going to use this soon, after a few expensive things */
+ if (next->fpu_counter > 5)
+ prefetch(next_t->xstate);
+
+#ifdef CONFIG_MMU
+ /*
+ * Restore the kernel mode register
+ * k7 (r7_bank1)
+ */
+ asm volatile("ldc %0, r7_bank"
+ : /* no output */
+ : "r" (task_thread_info(next)));
+#endif
+
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ if (next->fpu_counter > 5)
+ __fpu_state_restore();
+
+ return prev;
+}
+
+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+#ifdef CONFIG_MMU
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
+#else
+ /* fork almost works, enough to trick you into looking elsewhere :-( */
+ return -EINVAL;
+#endif
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long parent_tidptr,
+ unsigned long child_tidptr,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ if (!newsp)
+ newsp = regs->regs[15];
+ return do_fork(clone_flags, newsp, regs, 0,
+ (int __user *)parent_tidptr,
+ (int __user *)child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
+ 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(const char __user *ufilename,
+ const char __user *const __user *uargv,
+ const char __user *const __user *uenvp,
+ unsigned long r7, struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ int error;
+ char *filename;
+
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename, uargv, uenvp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long pc;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * The same comment as on the Alpha applies here, too ...
+ */
+ pc = thread_saved_pc(p);
+
+#ifdef CONFIG_FRAME_POINTER
+ if (in_sched_functions(pc)) {
+ unsigned long schedule_frame = (unsigned long)p->thread.sp;
+ return ((unsigned long *)schedule_frame)[21];
+ }
+#endif
+
+ return pc;
+}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
new file mode 100644
index 00000000..4264583e
--- /dev/null
+++ b/arch/sh/kernel/process_64.c
@@ -0,0 +1,549 @@
+/*
+ * arch/sh/kernel/process_64.c
+ *
+ * This file handles the architecture-dependent parts of process handling..
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * Started from SH3/4 version:
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ *
+ * In turn started from i386 version:
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/ptrace.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/syscalls.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/fpu.h>
+#include <asm/switch_to.h>
+
+struct task_struct *last_task_used_math = NULL;
+
+void show_regs(struct pt_regs *regs)
+{
+ unsigned long long ah, al, bh, bl, ch, cl;
+
+ printk("\n");
+
+ ah = (regs->pc) >> 32;
+ al = (regs->pc) & 0xffffffff;
+ bh = (regs->regs[18]) >> 32;
+ bl = (regs->regs[18]) & 0xffffffff;
+ ch = (regs->regs[15]) >> 32;
+ cl = (regs->regs[15]) & 0xffffffff;
+ printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->sr) >> 32;
+ al = (regs->sr) & 0xffffffff;
+ asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
+ asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
+ bh = (bh) >> 32;
+ bl = (bl) & 0xffffffff;
+ asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
+ asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
+ ch = (ch) >> 32;
+ cl = (cl) & 0xffffffff;
+ printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[0]) >> 32;
+ al = (regs->regs[0]) & 0xffffffff;
+ bh = (regs->regs[1]) >> 32;
+ bl = (regs->regs[1]) & 0xffffffff;
+ ch = (regs->regs[2]) >> 32;
+ cl = (regs->regs[2]) & 0xffffffff;
+ printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[3]) >> 32;
+ al = (regs->regs[3]) & 0xffffffff;
+ bh = (regs->regs[4]) >> 32;
+ bl = (regs->regs[4]) & 0xffffffff;
+ ch = (regs->regs[5]) >> 32;
+ cl = (regs->regs[5]) & 0xffffffff;
+ printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[6]) >> 32;
+ al = (regs->regs[6]) & 0xffffffff;
+ bh = (regs->regs[7]) >> 32;
+ bl = (regs->regs[7]) & 0xffffffff;
+ ch = (regs->regs[8]) >> 32;
+ cl = (regs->regs[8]) & 0xffffffff;
+ printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[9]) >> 32;
+ al = (regs->regs[9]) & 0xffffffff;
+ bh = (regs->regs[10]) >> 32;
+ bl = (regs->regs[10]) & 0xffffffff;
+ ch = (regs->regs[11]) >> 32;
+ cl = (regs->regs[11]) & 0xffffffff;
+ printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[12]) >> 32;
+ al = (regs->regs[12]) & 0xffffffff;
+ bh = (regs->regs[13]) >> 32;
+ bl = (regs->regs[13]) & 0xffffffff;
+ ch = (regs->regs[14]) >> 32;
+ cl = (regs->regs[14]) & 0xffffffff;
+ printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[16]) >> 32;
+ al = (regs->regs[16]) & 0xffffffff;
+ bh = (regs->regs[17]) >> 32;
+ bl = (regs->regs[17]) & 0xffffffff;
+ ch = (regs->regs[19]) >> 32;
+ cl = (regs->regs[19]) & 0xffffffff;
+ printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[20]) >> 32;
+ al = (regs->regs[20]) & 0xffffffff;
+ bh = (regs->regs[21]) >> 32;
+ bl = (regs->regs[21]) & 0xffffffff;
+ ch = (regs->regs[22]) >> 32;
+ cl = (regs->regs[22]) & 0xffffffff;
+ printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[23]) >> 32;
+ al = (regs->regs[23]) & 0xffffffff;
+ bh = (regs->regs[24]) >> 32;
+ bl = (regs->regs[24]) & 0xffffffff;
+ ch = (regs->regs[25]) >> 32;
+ cl = (regs->regs[25]) & 0xffffffff;
+ printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[26]) >> 32;
+ al = (regs->regs[26]) & 0xffffffff;
+ bh = (regs->regs[27]) >> 32;
+ bl = (regs->regs[27]) & 0xffffffff;
+ ch = (regs->regs[28]) >> 32;
+ cl = (regs->regs[28]) & 0xffffffff;
+ printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[29]) >> 32;
+ al = (regs->regs[29]) & 0xffffffff;
+ bh = (regs->regs[30]) >> 32;
+ bl = (regs->regs[30]) & 0xffffffff;
+ ch = (regs->regs[31]) >> 32;
+ cl = (regs->regs[31]) & 0xffffffff;
+ printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[32]) >> 32;
+ al = (regs->regs[32]) & 0xffffffff;
+ bh = (regs->regs[33]) >> 32;
+ bl = (regs->regs[33]) & 0xffffffff;
+ ch = (regs->regs[34]) >> 32;
+ cl = (regs->regs[34]) & 0xffffffff;
+ printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[35]) >> 32;
+ al = (regs->regs[35]) & 0xffffffff;
+ bh = (regs->regs[36]) >> 32;
+ bl = (regs->regs[36]) & 0xffffffff;
+ ch = (regs->regs[37]) >> 32;
+ cl = (regs->regs[37]) & 0xffffffff;
+ printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[38]) >> 32;
+ al = (regs->regs[38]) & 0xffffffff;
+ bh = (regs->regs[39]) >> 32;
+ bl = (regs->regs[39]) & 0xffffffff;
+ ch = (regs->regs[40]) >> 32;
+ cl = (regs->regs[40]) & 0xffffffff;
+ printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[41]) >> 32;
+ al = (regs->regs[41]) & 0xffffffff;
+ bh = (regs->regs[42]) >> 32;
+ bl = (regs->regs[42]) & 0xffffffff;
+ ch = (regs->regs[43]) >> 32;
+ cl = (regs->regs[43]) & 0xffffffff;
+ printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[44]) >> 32;
+ al = (regs->regs[44]) & 0xffffffff;
+ bh = (regs->regs[45]) >> 32;
+ bl = (regs->regs[45]) & 0xffffffff;
+ ch = (regs->regs[46]) >> 32;
+ cl = (regs->regs[46]) & 0xffffffff;
+ printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[47]) >> 32;
+ al = (regs->regs[47]) & 0xffffffff;
+ bh = (regs->regs[48]) >> 32;
+ bl = (regs->regs[48]) & 0xffffffff;
+ ch = (regs->regs[49]) >> 32;
+ cl = (regs->regs[49]) & 0xffffffff;
+ printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[50]) >> 32;
+ al = (regs->regs[50]) & 0xffffffff;
+ bh = (regs->regs[51]) >> 32;
+ bl = (regs->regs[51]) & 0xffffffff;
+ ch = (regs->regs[52]) >> 32;
+ cl = (regs->regs[52]) & 0xffffffff;
+ printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[53]) >> 32;
+ al = (regs->regs[53]) & 0xffffffff;
+ bh = (regs->regs[54]) >> 32;
+ bl = (regs->regs[54]) & 0xffffffff;
+ ch = (regs->regs[55]) >> 32;
+ cl = (regs->regs[55]) & 0xffffffff;
+ printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[56]) >> 32;
+ al = (regs->regs[56]) & 0xffffffff;
+ bh = (regs->regs[57]) >> 32;
+ bl = (regs->regs[57]) & 0xffffffff;
+ ch = (regs->regs[58]) >> 32;
+ cl = (regs->regs[58]) & 0xffffffff;
+ printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[59]) >> 32;
+ al = (regs->regs[59]) & 0xffffffff;
+ bh = (regs->regs[60]) >> 32;
+ bl = (regs->regs[60]) & 0xffffffff;
+ ch = (regs->regs[61]) >> 32;
+ cl = (regs->regs[61]) & 0xffffffff;
+ printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->regs[62]) >> 32;
+ al = (regs->regs[62]) & 0xffffffff;
+ bh = (regs->tregs[0]) >> 32;
+ bl = (regs->tregs[0]) & 0xffffffff;
+ ch = (regs->tregs[1]) >> 32;
+ cl = (regs->tregs[1]) & 0xffffffff;
+ printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->tregs[2]) >> 32;
+ al = (regs->tregs[2]) & 0xffffffff;
+ bh = (regs->tregs[3]) >> 32;
+ bl = (regs->tregs[3]) & 0xffffffff;
+ ch = (regs->tregs[4]) >> 32;
+ cl = (regs->tregs[4]) & 0xffffffff;
+ printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ ah = (regs->tregs[5]) >> 32;
+ al = (regs->tregs[5]) & 0xffffffff;
+ bh = (regs->tregs[6]) >> 32;
+ bl = (regs->tregs[6]) & 0xffffffff;
+ ch = (regs->tregs[7]) >> 32;
+ cl = (regs->tregs[7]) & 0xffffffff;
+ printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
+ ah, al, bh, bl, ch, cl);
+
+ /*
+ * If we're in kernel mode, dump the stack too..
+ */
+ if (!user_mode(regs)) {
+ void show_stack(struct task_struct *tsk, unsigned long *sp);
+ unsigned long sp = regs->regs[15] & 0xffffffff;
+ struct task_struct *tsk = get_current();
+
+ tsk->thread.kregs = regs;
+
+ show_stack(tsk, (unsigned long *)sp);
+ }
+}
+
+/*
+ * Create a kernel thread
+ */
+__noreturn void kernel_thread_helper(void *arg, int (*fn)(void *))
+{
+ do_exit(fn(arg));
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be freed until both the parent and the child have exited.
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+ regs.regs[2] = (unsigned long)arg;
+ regs.regs[3] = (unsigned long)fn;
+
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.sr = (1 << 30);
+
+ /* Ok, create the new process.. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+ &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /*
+ * See arch/sparc/kernel/process.c for the precedent for doing
+ * this -- RPC.
+ *
+ * The SH-5 FPU save/restore approach relies on
+ * last_task_used_math pointing to a live task_struct. When
+ * another task tries to use the FPU for the 1st time, the FPUDIS
+ * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
+ * existing FPU state to the FP regs field within
+ * last_task_used_math before re-loading the new task's FPU state
+ * (or initialising it if the FPU has been used before). So if
+ * last_task_used_math is stale, and its page has already been
+ * re-allocated for another use, the consequences are rather
+ * grim. Unless we null it here, there is no other path through
+ * which it would get safely nulled.
+ */
+#ifdef CONFIG_SH_FPU
+ if (last_task_used_math == current) {
+ last_task_used_math = NULL;
+ }
+#endif
+}
+
+void flush_thread(void)
+{
+
+ /* Called by fs/exec.c (setup_new_exec) to remove traces of a
+ * previously running executable. */
+#ifdef CONFIG_SH_FPU
+ if (last_task_used_math == current) {
+ last_task_used_math = NULL;
+ }
+ /* Force FPU state to be reinitialised after exec */
+ clear_used_math();
+#endif
+
+ /* if we are a kernel thread, about to change to user thread,
+ * update kreg
+ */
+ if(current->thread.kregs==&fake_swapper_regs) {
+ current->thread.kregs =
+ ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
+ current->thread.uregs = current->thread.kregs;
+ }
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+#ifdef CONFIG_SH_FPU
+ int fpvalid;
+ struct task_struct *tsk = current;
+
+ fpvalid = !!tsk_used_math(tsk);
+ if (fpvalid) {
+ if (current == last_task_used_math) {
+ enable_fpu();
+ save_fpu(tsk);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
+ }
+
+ return fpvalid;
+#else
+ return 0; /* Task didn't use the fpu at all. */
+#endif
+}
+EXPORT_SYMBOL(dump_fpu);
+
+asmlinkage void ret_from_fork(void);
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+
+#ifdef CONFIG_SH_FPU
+ if(last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+#endif
+ /* Copy from sh version */
+ childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
+
+ *childregs = *regs;
+
+ /*
+ * Sign extend the edited stack.
+ * Note that thread.pc and thread.pc will stay
+ * 32-bit wide and context switch must take care
+ * of NEFF sign extension.
+ */
+ if (user_mode(regs)) {
+ childregs->regs[15] = neff_sign_extend(usp);
+ p->thread.uregs = childregs;
+ } else {
+ childregs->regs[15] =
+ neff_sign_extend((unsigned long)task_stack_page(p) +
+ THREAD_SIZE);
+ }
+
+ childregs->regs[9] = 0; /* Set return value for child */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.pc = (unsigned long) ret_from_fork;
+
+ return 0;
+}
+
+asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ if (!newsp)
+ newsp = pregs->regs[15];
+ return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(const char *ufilename, char **uargv,
+ char **uenvp, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs)
+{
+ int error;
+ char *filename;
+
+ filename = getname((char __user *)ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename,
+ (const char __user *const __user *)uargv,
+ (const char __user *const __user *)uenvp,
+ pregs);
+ putname(filename);
+out:
+ return error;
+}
+
+#ifdef CONFIG_FRAME_POINTER
+static int in_sh64_switch_to(unsigned long pc)
+{
+ extern char __sh64_switch_to_end;
+ /* For a sleeping task, the PC is somewhere in the middle of the function,
+ so we don't have to worry about masking the LSB off */
+ return (pc >= (unsigned long) sh64_switch_to) &&
+ (pc < (unsigned long) &__sh64_switch_to_end);
+}
+#endif
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long pc;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * The same comment as on the Alpha applies here, too ...
+ */
+ pc = thread_saved_pc(p);
+
+#ifdef CONFIG_FRAME_POINTER
+ if (in_sh64_switch_to(pc)) {
+ unsigned long schedule_fp;
+ unsigned long sh64_switch_to_fp;
+ unsigned long schedule_caller_pc;
+
+ sh64_switch_to_fp = (long) p->thread.sp;
+ /* r14 is saved at offset 4 in the sh64_switch_to frame */
+ schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
+
+ /* and the caller of 'schedule' is (currently!) saved at offset 24
+ in the frame of schedule (from disasm) */
+ schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
+ return schedule_caller_pc;
+ }
+#endif
+ return pc;
+}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 00000000..0a059836
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,33 @@
+#include <linux/ptrace.h>
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
new file mode 100644
index 00000000..96986714
--- /dev/null
+++ b/arch/sh/kernel/ptrace_32.c
@@ -0,0 +1,539 @@
+/*
+ * SuperH process tracing
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ *
+ * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+#include <linux/io.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * This routine will get a word off of the process kernel stack.
+ */
+static inline int get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)task_pt_regs(task);
+ stack += offset;
+ return (*((int *)stack));
+}
+
+/*
+ * This routine will put a word on the process kernel stack.
+ */
+static inline int put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)task_pt_regs(task);
+ stack += offset;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ /*
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions.
+ */
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+ struct thread_struct *thread = &tsk->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = thread->ptrace_bps[0];
+ if (!bp) {
+ ptrace_breakpoint_init(&attr);
+
+ attr.bp_addr = addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_2;
+ attr.bp_type = HW_BREAKPOINT_R;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ thread->ptrace_bps[0] = bp;
+ } else {
+ int err;
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+ /* reenable breakpoint */
+ attr.disabled = false;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ if (ptrace_get_breakpoints(child) < 0)
+ return;
+
+ set_single_step(child, pc);
+ ptrace_put_breakpoints(child);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ user_disable_single_step(child);
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ 0, 16 * sizeof(unsigned long));
+ if (!ret)
+ /* PC, PR, SR, GBR, MACH, MACL, TRA */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ offsetof(struct pt_regs, pc),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ 0, 16 * sizeof(unsigned long));
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ offsetof(struct pt_regs, pc),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_SH_FPU
+int fpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ if ((boot_cpu_data.flags & CPU_HAS_FPU))
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->softfpu, 0, -1);
+}
+
+static int fpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ set_stopped_child_used_math(target);
+
+ if ((boot_cpu_data.flags & CPU_HAS_FPU))
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->softfpu, 0, -1);
+}
+
+static int fpregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return tsk_used_math(target) ? regset->n : 0;
+}
+#endif
+
+#ifdef CONFIG_SH_DSP
+static int dspregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_dspregs *regs =
+ (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
+ 0, sizeof(struct pt_dspregs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_dspregs), -1);
+
+ return ret;
+}
+
+static int dspregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_dspregs *regs =
+ (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
+ 0, sizeof(struct pt_dspregs));
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_dspregs), -1);
+
+ return ret;
+}
+
+static int dspregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return regs->sr & SR_DSP ? regset->n : 0;
+}
+#endif
+
+const struct pt_regs_offset regoffset_table[] = {
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pr),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(gbr),
+ REG_OFFSET_NAME(mach),
+ REG_OFFSET_NAME(macl),
+ REG_OFFSET_NAME(tra),
+ REG_OFFSET_END,
+};
+
+/*
+ * These are our native regset flavours.
+ */
+enum sh_regset {
+ REGSET_GENERAL,
+#ifdef CONFIG_SH_FPU
+ REGSET_FPU,
+#endif
+#ifdef CONFIG_SH_DSP
+ REGSET_DSP,
+#endif
+};
+
+static const struct user_regset sh_regsets[] = {
+ /*
+ * Format is:
+ * R0 --> R15
+ * PC, PR, SR, GBR, MACH, MACL, TRA
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+
+#ifdef CONFIG_SH_FPU
+ [REGSET_FPU] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fpu_struct) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = fpregs_get,
+ .set = fpregs_set,
+ .active = fpregs_active,
+ },
+#endif
+
+#ifdef CONFIG_SH_DSP
+ [REGSET_DSP] = {
+ .n = sizeof(struct pt_dspregs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = dspregs_get,
+ .set = dspregs_set,
+ .active = dspregs_active,
+ },
+#endif
+};
+
+static const struct user_regset_view user_sh_native_view = {
+ .name = "sh",
+ .e_machine = EM_SH,
+ .regsets = sh_regsets,
+ .n = ARRAY_SIZE(sh_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_sh_native_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ unsigned long __user *datap = (unsigned long __user *)data;
+ int ret;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ break;
+
+ if (addr < sizeof(struct pt_regs))
+ tmp = get_stack_long(child, addr);
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ if (!tsk_used_math(child)) {
+ if (addr == offsetof(struct user, fpu.fpscr))
+ tmp = FPSCR_INIT;
+ else
+ tmp = 0;
+ } else {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ tmp = ((unsigned long *)child->thread.xstate)
+ [index >> 2];
+ }
+ } else if (addr == offsetof(struct user, u_fpvalid))
+ tmp = !!tsk_used_math(child);
+ else if (addr == PT_TEXT_ADDR)
+ tmp = child->mm->start_code;
+ else if (addr == PT_DATA_ADDR)
+ tmp = child->mm->start_data;
+ else if (addr == PT_TEXT_END_ADDR)
+ tmp = child->mm->end_code;
+ else if (addr == PT_TEXT_LEN)
+ tmp = child->mm->end_code - child->mm->start_code;
+ else
+ tmp = 0;
+ ret = put_user(tmp, datap);
+ break;
+ }
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ break;
+
+ if (addr < sizeof(struct pt_regs))
+ ret = put_stack_long(child, addr, data);
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ set_stopped_child_used_math(child);
+ ((unsigned long *)child->thread.xstate)
+ [index >> 2] = data;
+ ret = 0;
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
+ conditional_stopped_child_used_math(data, child);
+ ret = 0;
+ }
+ break;
+
+ case PTRACE_GETREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+ case PTRACE_SETREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+#ifdef CONFIG_SH_FPU
+ case PTRACE_GETFPREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+ case PTRACE_SETFPREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+#endif
+#ifdef CONFIG_SH_DSP
+ case PTRACE_GETDSPREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_DSP,
+ 0, sizeof(struct pt_dspregs),
+ datap);
+ case PTRACE_SETDSPREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_DSP,
+ 0, sizeof(struct pt_dspregs),
+ datap);
+#endif
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+static inline int audit_arch(void)
+{
+ int arch = EM_SH;
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ arch |= __AUDIT_ARCH_LE;
+#endif
+
+ return arch;
+}
+
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long ret = 0;
+
+ secure_computing(regs->regs[0]);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1L;
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[0]);
+
+ audit_syscall_entry(audit_arch(), regs->regs[3],
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+
+ return ret ?: regs->regs[0];
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[0]);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
new file mode 100644
index 00000000..bc81e07d
--- /dev/null
+++ b/arch/sh/kernel/ptrace_64.c
@@ -0,0 +1,593 @@
+/*
+ * arch/sh/kernel/ptrace_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ *
+ * Started from SH3/4 version:
+ * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * Original x86 implementation:
+ * By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/* This mask defines the bits of the SR which the user is not allowed to
+ change, which are everything except S, Q, M, PR, SZ, FR. */
+#define SR_MASK (0xffff8cfd)
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * This routine will get a word from the user area in the process kernel stack.
+ */
+static inline int get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)(task->thread.uregs);
+ stack += offset;
+ return (*((int *)stack));
+}
+
+static inline unsigned long
+get_fpu_long(struct task_struct *task, unsigned long addr)
+{
+ unsigned long tmp;
+ struct pt_regs *regs;
+ regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+
+ if (!tsk_used_math(task)) {
+ if (addr == offsetof(struct user_fpu_struct, fpscr)) {
+ tmp = FPSCR_INIT;
+ } else {
+ tmp = 0xffffffffUL; /* matches initial value in fpu.c */
+ }
+ return tmp;
+ }
+
+ if (last_task_used_math == task) {
+ enable_fpu();
+ save_fpu(task);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
+ return tmp;
+}
+
+/*
+ * This routine will put a word into the user area in the process kernel stack.
+ */
+static inline int put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)(task->thread.uregs);
+ stack += offset;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+static inline int
+put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+ struct pt_regs *regs;
+
+ regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
+
+ if (!tsk_used_math(task)) {
+ init_fpu(task);
+ } else if (last_task_used_math == task) {
+ enable_fpu();
+ save_fpu(task);
+ disable_fpu();
+ last_task_used_math = 0;
+ regs->sr |= SR_FD;
+ }
+
+ ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
+ return 0;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = child->thread.uregs;
+
+ regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = child->thread.uregs;
+
+ regs->sr &= ~SR_SSTEP;
+
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* PC, SR, SYSCALL */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 0, 3 * sizeof(unsigned long long));
+
+ /* R1 -> R63 */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ offsetof(struct pt_regs, regs[0]),
+ 63 * sizeof(unsigned long long));
+ /* TR0 -> TR7 */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->tregs,
+ offsetof(struct pt_regs, tregs[0]),
+ 8 * sizeof(unsigned long long));
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* PC, SR, SYSCALL */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 0, 3 * sizeof(unsigned long long));
+
+ /* R1 -> R63 */
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ offsetof(struct pt_regs, regs[0]),
+ 63 * sizeof(unsigned long long));
+
+ /* TR0 -> TR7 */
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->tregs,
+ offsetof(struct pt_regs, tregs[0]),
+ 8 * sizeof(unsigned long long));
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_SH_FPU
+int fpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+}
+
+static int fpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ set_stopped_child_used_math(target);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+}
+
+static int fpregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return tsk_used_math(target) ? regset->n : 0;
+}
+#endif
+
+const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(syscall_nr),
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REGS_OFFSET_NAME(16),
+ REGS_OFFSET_NAME(17),
+ REGS_OFFSET_NAME(18),
+ REGS_OFFSET_NAME(19),
+ REGS_OFFSET_NAME(20),
+ REGS_OFFSET_NAME(21),
+ REGS_OFFSET_NAME(22),
+ REGS_OFFSET_NAME(23),
+ REGS_OFFSET_NAME(24),
+ REGS_OFFSET_NAME(25),
+ REGS_OFFSET_NAME(26),
+ REGS_OFFSET_NAME(27),
+ REGS_OFFSET_NAME(28),
+ REGS_OFFSET_NAME(29),
+ REGS_OFFSET_NAME(30),
+ REGS_OFFSET_NAME(31),
+ REGS_OFFSET_NAME(32),
+ REGS_OFFSET_NAME(33),
+ REGS_OFFSET_NAME(34),
+ REGS_OFFSET_NAME(35),
+ REGS_OFFSET_NAME(36),
+ REGS_OFFSET_NAME(37),
+ REGS_OFFSET_NAME(38),
+ REGS_OFFSET_NAME(39),
+ REGS_OFFSET_NAME(40),
+ REGS_OFFSET_NAME(41),
+ REGS_OFFSET_NAME(42),
+ REGS_OFFSET_NAME(43),
+ REGS_OFFSET_NAME(44),
+ REGS_OFFSET_NAME(45),
+ REGS_OFFSET_NAME(46),
+ REGS_OFFSET_NAME(47),
+ REGS_OFFSET_NAME(48),
+ REGS_OFFSET_NAME(49),
+ REGS_OFFSET_NAME(50),
+ REGS_OFFSET_NAME(51),
+ REGS_OFFSET_NAME(52),
+ REGS_OFFSET_NAME(53),
+ REGS_OFFSET_NAME(54),
+ REGS_OFFSET_NAME(55),
+ REGS_OFFSET_NAME(56),
+ REGS_OFFSET_NAME(57),
+ REGS_OFFSET_NAME(58),
+ REGS_OFFSET_NAME(59),
+ REGS_OFFSET_NAME(60),
+ REGS_OFFSET_NAME(61),
+ REGS_OFFSET_NAME(62),
+ REGS_OFFSET_NAME(63),
+ TREGS_OFFSET_NAME(0),
+ TREGS_OFFSET_NAME(1),
+ TREGS_OFFSET_NAME(2),
+ TREGS_OFFSET_NAME(3),
+ TREGS_OFFSET_NAME(4),
+ TREGS_OFFSET_NAME(5),
+ TREGS_OFFSET_NAME(6),
+ TREGS_OFFSET_NAME(7),
+ REG_OFFSET_END,
+};
+
+/*
+ * These are our native regset flavours.
+ */
+enum sh_regset {
+ REGSET_GENERAL,
+#ifdef CONFIG_SH_FPU
+ REGSET_FPU,
+#endif
+};
+
+static const struct user_regset sh_regsets[] = {
+ /*
+ * Format is:
+ * PC, SR, SYSCALL,
+ * R1 --> R63,
+ * TR0 --> TR7,
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long long),
+ .align = sizeof(long long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+
+#ifdef CONFIG_SH_FPU
+ [REGSET_FPU] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fpu_struct) /
+ sizeof(long long),
+ .size = sizeof(long long),
+ .align = sizeof(long long),
+ .get = fpregs_get,
+ .set = fpregs_set,
+ .active = fpregs_active,
+ },
+#endif
+};
+
+static const struct user_regset_view user_sh64_native_view = {
+ .name = "sh64",
+ .e_machine = EM_SH,
+ .regsets = sh_regsets,
+ .n = ARRAY_SIZE(sh_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_sh64_native_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0)
+ break;
+
+ if (addr < sizeof(struct pt_regs))
+ tmp = get_stack_long(child, addr);
+ else if ((addr >= offsetof(struct user, fpu)) &&
+ (addr < offsetof(struct user, u_fpvalid))) {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ tmp = get_fpu_long(child, index);
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
+ tmp = !!tsk_used_math(child);
+ } else {
+ break;
+ }
+ ret = put_user(tmp, datap);
+ break;
+ }
+
+ case PTRACE_POKEUSR:
+ /* write the word at location addr in the USER area. We must
+ disallow any changes to certain SR bits or u_fpvalid, since
+ this could crash the kernel or result in a security
+ loophole. */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0)
+ break;
+
+ if (addr < sizeof(struct pt_regs)) {
+ /* Ignore change of top 32 bits of SR */
+ if (addr == offsetof (struct pt_regs, sr)+4)
+ {
+ ret = 0;
+ break;
+ }
+ /* If lower 32 bits of SR, ignore non-user bits */
+ if (addr == offsetof (struct pt_regs, sr))
+ {
+ long cursr = get_stack_long(child, addr);
+ data &= ~(SR_MASK);
+ data |= (cursr & SR_MASK);
+ }
+ ret = put_stack_long(child, addr, data);
+ }
+ else if ((addr >= offsetof(struct user, fpu)) &&
+ (addr < offsetof(struct user, u_fpvalid))) {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ ret = put_fpu_long(child, index, data);
+ }
+ break;
+
+ case PTRACE_GETREGS:
+ return copy_regset_to_user(child, &user_sh64_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+ case PTRACE_SETREGS:
+ return copy_regset_from_user(child, &user_sh64_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+#ifdef CONFIG_SH_FPU
+ case PTRACE_GETFPREGS:
+ return copy_regset_to_user(child, &user_sh64_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+ case PTRACE_SETFPREGS:
+ return copy_regset_from_user(child, &user_sh64_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+#endif
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int sh64_ptrace(long request, long pid,
+ unsigned long addr, unsigned long data)
+{
+#define WPC_DBRMODE 0x0d104008
+ static unsigned long first_call;
+
+ if (!test_and_set_bit(0, &first_call)) {
+ /* Set WPC.DBRMODE to 0. This makes all debug events get
+ * delivered through RESVEC, i.e. into the handlers in entry.S.
+ * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
+ * would normally be left set to 1, which makes debug events get
+ * delivered through DBRVEC, i.e. into the remote gdb's
+ * handlers. This prevents ptrace getting them, and confuses
+ * the remote gdb.) */
+ printk("DBRMODE set to 0 to permit native debugging\n");
+ poke_real_address_q(WPC_DBRMODE, 0);
+ }
+
+ return sys_ptrace(request, pid, addr, data);
+}
+
+static inline int audit_arch(void)
+{
+ int arch = EM_SH;
+
+#ifdef CONFIG_64BIT
+ arch |= __AUDIT_ARCH_64BIT;
+#endif
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ arch |= __AUDIT_ARCH_LE;
+#endif
+
+ return arch;
+}
+
+asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long long ret = 0;
+
+ secure_computing(regs->regs[9]);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1LL;
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[9]);
+
+ audit_syscall_entry(audit_arch(), regs->regs[1],
+ regs->regs[2], regs->regs[3],
+ regs->regs[4], regs->regs[5]);
+
+ return ret ?: regs->regs[9];
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[9]);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
+
+/* Called with interrupts disabled */
+asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
+{
+ /* This is called after a single step exception (DEBUGSS).
+ There is no need to change the PC, as it is a post-execution
+ exception, as entry.S does not do anything to the PC for DEBUGSS.
+ We need to clear the Single Step setting in SR to avoid
+ continually stepping. */
+ local_irq_enable();
+ regs->sr &= ~SR_SSTEP;
+ force_sig(SIGTRAP, current);
+}
+
+/* Called with interrupts disabled */
+BUILD_TRAP_HANDLER(breakpoint)
+{
+ TRAP_HANDLER_DECL;
+
+ /* We need to forward step the PC, to counteract the backstep done
+ in signal.c. */
+ local_irq_enable();
+ force_sig(SIGTRAP, current);
+ regs->pc += 4;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ user_disable_single_step(child);
+}
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
new file mode 100644
index 00000000..04afe5b2
--- /dev/null
+++ b/arch/sh/kernel/reboot.c
@@ -0,0 +1,102 @@
+#include <linux/pm.h>
+#include <linux/kexec.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/watchdog.h>
+#endif
+#include <asm/addrspace.h>
+#include <asm/reboot.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+#ifdef CONFIG_SUPERH32
+static void watchdog_trigger_immediate(void)
+{
+ sh_wdt_write_cnt(0xFF);
+ sh_wdt_write_csr(0xC2);
+}
+#endif
+
+static void native_machine_restart(char * __unused)
+{
+ local_irq_disable();
+
+ /* Destroy all of the TLBs in preparation for reset by MMU */
+ __flush_tlb_global();
+
+ /* Address error with SR.BL=1 first. */
+ trigger_address_error();
+
+#ifdef CONFIG_SUPERH32
+ /* If that fails or is unsupported, go for the watchdog next. */
+ watchdog_trigger_immediate();
+#endif
+
+ /*
+ * Give up and sleep.
+ */
+ while (1)
+ cpu_sleep();
+}
+
+static void native_machine_shutdown(void)
+{
+ smp_send_stop();
+}
+
+static void native_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+}
+
+static void native_machine_halt(void)
+{
+ /* stop other cpus */
+ machine_shutdown();
+
+ /* stop this cpu */
+ stop_this_cpu(NULL);
+}
+
+struct machine_ops machine_ops = {
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+ .crash_shutdown = native_machine_crash_shutdown,
+#endif
+};
+
+void machine_power_off(void)
+{
+ machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+ machine_ops.shutdown();
+}
+
+void machine_restart(char *cmd)
+{
+ machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+ machine_ops.halt();
+}
+
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
new file mode 100644
index 00000000..fcc9934f
--- /dev/null
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -0,0 +1,232 @@
+/*
+ * relocate_kernel.S - put the kernel image in place to boot
+ * 2005.9.17 kogiidena@eggplant.ddo.jp
+ *
+ * LANDISK/sh4 is supported. Maybe, SH archtecture works well.
+ *
+ * 2009-03-18 Magnus Damm - Added Kexec Jump support
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/linkage.h>
+#include <asm/addrspace.h>
+#include <asm/page.h>
+
+ .globl relocate_new_kernel
+relocate_new_kernel:
+ /* r4 = indirection_page */
+ /* r5 = reboot_code_buffer */
+ /* r6 = start_address */
+
+ mov.l 10f, r0 /* PAGE_SIZE */
+ add r5, r0 /* setup new stack at end of control page */
+
+ /* save r15->r8 to new stack */
+ mov.l r15, @-r0
+ mov r0, r15
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ /* save other random registers */
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ stc.l ssr, @-r15
+ stc.l sr, @-r15
+ sts.l pr, @-r15
+ stc.l spc, @-r15
+
+ /* switch to bank1 and save r7->r0 */
+ mov.l 12f, r9
+ stc sr, r8
+ or r9, r8
+ ldc r8, sr
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
+
+ /* switch to bank0 and save r7->r0 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
+
+ mov.l r4, @-r15 /* save indirection page again */
+
+ bsr swap_pages /* swap pages before jumping to new kernel */
+ nop
+
+ mova 11f, r0
+ mov.l r15, @r0 /* save pointer to stack */
+
+ jsr @r6 /* hand over control to new kernel */
+ nop
+
+ mov.l 11f, r15 /* get pointer to stack */
+ mov.l @r15+, r4 /* restore r4 to get indirection page */
+
+ bsr swap_pages /* swap pages back to previous state */
+ nop
+
+ /* make sure bank0 is active and restore r0->r7 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ /* switch to bank1 and restore r0->r7 */
+ mov.l 12f, r9
+ stc sr, r8
+ or r9, r8
+ ldc r8, sr
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ /* switch back to bank0 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+
+ /* restore other random registers */
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ ldc.l @r15+, sr
+ ldc.l @r15+, ssr
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+
+ /* restore r8->r15 */
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, r15
+ rts
+ nop
+
+swap_pages:
+ bra 1f
+ mov r4,r0 /* cmd = indirection_page */
+0:
+ mov.l @r4+,r0 /* cmd = *ind++ */
+
+1: /* addr = cmd & 0xfffffff0 */
+ mov r0,r2
+ mov #-16,r1
+ and r1,r2
+
+ /* if(cmd & IND_DESTINATION) dst = addr */
+ tst #1,r0
+ bt 2f
+ bra 0b
+ mov r2,r5
+
+2: /* else if(cmd & IND_INDIRECTION) ind = addr */
+ tst #2,r0
+ bt 3f
+ bra 0b
+ mov r2,r4
+
+3: /* else if(cmd & IND_DONE) return */
+ tst #4,r0
+ bt 4f
+ rts
+ nop
+
+4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */
+ tst #8,r0
+ bt 0b
+
+ mov.l 10f,r3 /* PAGE_SIZE */
+ shlr2 r3
+ shlr2 r3
+5:
+ dt r3
+
+ /* regular kexec just overwrites the destination page
+ * with the contents of the source page.
+ * for the kexec jump case we need to swap the contents
+ * of the pages.
+ * to keep it simple swap the contents for both cases.
+ */
+ mov.l @(0, r2), r8
+ mov.l @(0, r5), r1
+ mov.l r8, @(0, r5)
+ mov.l r1, @(0, r2)
+
+ mov.l @(4, r2), r8
+ mov.l @(4, r5), r1
+ mov.l r8, @(4, r5)
+ mov.l r1, @(4, r2)
+
+ mov.l @(8, r2), r8
+ mov.l @(8, r5), r1
+ mov.l r8, @(8, r5)
+ mov.l r1, @(8, r2)
+
+ mov.l @(12, r2), r8
+ mov.l @(12, r5), r1
+ mov.l r8, @(12, r5)
+ mov.l r1, @(12, r2)
+
+ add #16,r5
+ add #16,r2
+ bf 5b
+
+ bra 0b
+ nop
+
+ .align 2
+10:
+ .long PAGE_SIZE
+11:
+ .long 0
+12:
+ .long 0x20000000 ! RB=1
+
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
new file mode 100644
index 00000000..5124aeb2
--- /dev/null
+++ b/arch/sh/kernel/return_address.c
@@ -0,0 +1,59 @@
+/*
+ * arch/sh/kernel/return_address.c
+ *
+ * Copyright (C) 2009 Matt Fleming
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/dwarf.h>
+
+#ifdef CONFIG_DWARF_UNWINDER
+
+void *return_address(unsigned int depth)
+{
+ struct dwarf_frame *frame;
+ unsigned long ra;
+ int i;
+
+ for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
+ struct dwarf_frame *tmp;
+
+ tmp = dwarf_unwind_stack(ra, frame);
+ if (!tmp)
+ return NULL;
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ frame = tmp;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ ra = frame->return_addr;
+ }
+
+ /* Failed to unwind the stack to the specified depth. */
+ WARN_ON(i != depth + 1);
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ return (void *)ra;
+}
+
+#else
+
+void *return_address(unsigned int depth)
+{
+ return NULL;
+}
+
+#endif
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
new file mode 100644
index 00000000..7b57bf1d
--- /dev/null
+++ b/arch/sh/kernel/setup.c
@@ -0,0 +1,324 @@
+/*
+ * arch/sh/kernel/setup.c
+ *
+ * This file handles the architecture-dependent parts of initialization
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2002 - 2010 Paul Mundt
+ */
+#include <linux/screen_info.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/utsname.h>
+#include <linux/nodemask.h>
+#include <linux/cpu.h>
+#include <linux/pfn.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/err.h>
+#include <linux/crash_dump.h>
+#include <linux/mmzone.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/elf.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/clock.h>
+#include <asm/smp.h>
+#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
+#include <asm/sparsemem.h>
+
+/*
+ * Initialize loops_per_jiffy as 10000000 (1000MIPS).
+ * This value will be used at the very early stage of serial setup.
+ * The bigger value means no problem.
+ */
+struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
+ [0] = {
+ .type = CPU_SH_NONE,
+ .family = CPU_FAMILY_UNKNOWN,
+ .loops_per_jiffy = 10000000,
+ .phys_bits = MAX_PHYSMEM_BITS,
+ },
+};
+EXPORT_SYMBOL(cpu_data);
+
+/*
+ * The machine vector. First entry in .machvec.init, or clobbered by
+ * sh_mv= on the command line, prior to .machvec.init teardown.
+ */
+struct sh_machine_vector sh_mv = { .mv_name = "generic", };
+EXPORT_SYMBOL(sh_mv);
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+extern int root_mountflags;
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+unsigned long memory_end = 0;
+EXPORT_SYMBOL(memory_end);
+unsigned long memory_limit = 0;
+
+static struct resource mem_resources[MAX_NUMNODES];
+
+int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
+
+static int __init early_parse_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
+
+ pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+void __init check_for_initrd(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start, end;
+
+ /*
+ * Check for the rare cases where boot loaders adhere to the boot
+ * ABI.
+ */
+ if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
+ goto disable;
+
+ start = INITRD_START + __MEMORY_START;
+ end = start + INITRD_SIZE;
+
+ if (unlikely(end <= start))
+ goto disable;
+ if (unlikely(start & ~PAGE_MASK)) {
+ pr_err("initrd must be page aligned\n");
+ goto disable;
+ }
+
+ if (unlikely(start < __MEMORY_START)) {
+ pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
+ start, __MEMORY_START);
+ goto disable;
+ }
+
+ if (unlikely(end > memblock_end_of_DRAM())) {
+ pr_err("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ end, (unsigned long)memblock_end_of_DRAM());
+ goto disable;
+ }
+
+ /*
+ * If we got this far in spite of the boot loader's best efforts
+ * to the contrary, assume we actually have a valid initrd and
+ * fix up the root dev.
+ */
+ ROOT_DEV = Root_RAM0;
+
+ /*
+ * Address sanitization
+ */
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = initrd_start + INITRD_SIZE;
+
+ memblock_reserve(__pa(initrd_start), INITRD_SIZE);
+
+ return;
+
+disable:
+ pr_info("initrd disabled\n");
+ initrd_start = initrd_end = 0;
+#endif
+}
+
+void __cpuinit calibrate_delay(void)
+{
+ struct clk *clk = clk_get(NULL, "cpu_clk");
+
+ if (IS_ERR(clk))
+ panic("Need a sane CPU clock definition!");
+
+ loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
+
+ printk(KERN_INFO "Calibrating delay loop (skipped)... "
+ "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100,
+ loops_per_jiffy);
+}
+
+void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct resource *res = &mem_resources[nid];
+ unsigned long start, end;
+
+ WARN_ON(res->name); /* max one active range per node for now */
+
+ start = start_pfn << PAGE_SHIFT;
+ end = end_pfn << PAGE_SHIFT;
+
+ res->name = "System RAM";
+ res->start = start;
+ res->end = end - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ if (request_resource(&iomem_resource, res)) {
+ pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
+ start_pfn, end_pfn);
+ return;
+ }
+
+ /*
+ * We don't know which RAM region contains kernel data or
+ * the reserved crashkernel region, so try it repeatedly
+ * and let the resource manager test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
+
+ /*
+ * Also make sure that there is a PMB mapping that covers this
+ * range before we attempt to activate it, to avoid reset by MMU.
+ * We can hit this path with NUMA or memory hot-add.
+ */
+ pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
+ PAGE_KERNEL);
+
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
+}
+
+void __init __weak plat_early_device_setup(void)
+{
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ enable_mmu();
+
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+
+ printk(KERN_NOTICE "Boot params:\n"
+ "... MOUNT_ROOT_RDONLY - %08lx\n"
+ "... RAMDISK_FLAGS - %08lx\n"
+ "... ORIG_ROOT_DEV - %08lx\n"
+ "... LOADER_TYPE - %08lx\n"
+ "... INITRD_START - %08lx\n"
+ "... INITRD_SIZE - %08lx\n",
+ MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
+ ORIG_ROOT_DEV, LOADER_TYPE,
+ INITRD_START, INITRD_SIZE);
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ code_resource.start = virt_to_phys(_text);
+ code_resource.end = virt_to_phys(_etext)-1;
+ data_resource.start = virt_to_phys(_etext);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(__bss_start);
+ bss_resource.end = virt_to_phys(_ebss)-1;
+
+#ifdef CONFIG_CMDLINE_OVERWRITE
+ strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
+#else
+ strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
+#ifdef CONFIG_CMDLINE_EXTEND
+ strlcat(command_line, " ", sizeof(command_line));
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+#endif
+#endif
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+
+ plat_early_device_setup();
+
+ sh_mv_setup();
+
+ /* Let earlyprintk output early console messages */
+ early_platform_driver_probe("earlyprintk", 1, 1);
+
+ paging_init();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ /* Perform the machine specific initialisation */
+ if (likely(sh_mv.mv_setup))
+ sh_mv.mv_setup(cmdline_p);
+
+ plat_smp_setup();
+}
+
+/* processor boot mode configuration */
+int generic_mode_pins(void)
+{
+ pr_warning("generic_mode_pins(): missing mode pin configuration\n");
+ return 0;
+}
+
+int test_mode_pin(int pin)
+{
+ return sh_mv.mv_mode_pins() & pin;
+}
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
new file mode 100644
index 00000000..47475cca
--- /dev/null
+++ b/arch/sh/kernel/sh_bios.c
@@ -0,0 +1,172 @@
+/*
+ * C interface for trapping into the standard LinuxSH BIOS.
+ *
+ * Copyright (C) 2000 Greg Banks, Mitch Davis
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 M. R. Brown
+ * Copyright (C) 2004 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <asm/sh_bios.h>
+
+#define BIOS_CALL_CONSOLE_WRITE 0
+#define BIOS_CALL_ETH_NODE_ADDR 10
+#define BIOS_CALL_SHUTDOWN 11
+#define BIOS_CALL_GDB_DETACH 0xff
+
+void *gdb_vbr_vector = NULL;
+
+static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
+ long arg3)
+{
+ register long r0 __asm__("r0") = func;
+ register long r4 __asm__("r4") = arg0;
+ register long r5 __asm__("r5") = arg1;
+ register long r6 __asm__("r6") = arg2;
+ register long r7 __asm__("r7") = arg3;
+
+ if (!gdb_vbr_vector)
+ return -ENOSYS;
+
+ __asm__ __volatile__("trapa #0x3f":"=z"(r0)
+ :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
+ :"memory");
+ return r0;
+}
+
+void sh_bios_console_write(const char *buf, unsigned int len)
+{
+ sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
+}
+
+void sh_bios_gdb_detach(void)
+{
+ sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sh_bios_gdb_detach);
+
+void sh_bios_get_node_addr(unsigned char *node_addr)
+{
+ sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sh_bios_get_node_addr);
+
+void sh_bios_shutdown(unsigned int how)
+{
+ sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
+}
+
+/*
+ * Read the old value of the VBR register to initialise the vector
+ * through which debug and BIOS traps are delegated by the Linux trap
+ * handler.
+ */
+void sh_bios_vbr_init(void)
+{
+ unsigned long vbr;
+
+ if (unlikely(gdb_vbr_vector))
+ return;
+
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
+
+ if (vbr) {
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
+ gdb_vbr_vector);
+ } else
+ printk(KERN_NOTICE "SH-BIOS not detected\n");
+}
+
+/**
+ * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
+ *
+ * This can be used by save/restore code to reinitialize the system VBR
+ * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
+ */
+void sh_bios_vbr_reload(void)
+{
+ if (gdb_vbr_vector)
+ __asm__ __volatile__ (
+ "ldc %0, vbr"
+ :
+ : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
+ : "memory"
+ );
+}
+
+/*
+ * Print a string through the BIOS
+ */
+static void sh_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ sh_bios_console_write(s, count);
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init sh_console_setup(struct console *co, char *options)
+{
+ int cflag = CREAD | HUPCL | CLOCAL;
+
+ /*
+ * Now construct a cflag setting.
+ * TODO: this is a totally bogus cflag, as we have
+ * no idea what serial settings the BIOS is using, or
+ * even if its using the serial port at all.
+ */
+ cflag |= B115200 | CS8 | /*no parity*/0;
+
+ co->cflag = cflag;
+
+ return 0;
+}
+
+static struct console bios_console = {
+ .name = "bios",
+ .write = sh_console_write,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct console *early_console;
+
+static int __init setup_early_printk(char *buf)
+{
+ int keep_early = 0;
+
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "bios", 4))
+ early_console = &bios_console;
+
+ if (likely(early_console)) {
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ }
+
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
new file mode 100644
index 00000000..3896f26e
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -0,0 +1,93 @@
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/checksum.h>
+#include <asm/sections.h>
+
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(_ebss);
+EXPORT_SYMBOL(empty_zero_page);
+
+#define DECLARE_EXPORT(name) \
+ extern void name(void);EXPORT_SYMBOL(name)
+
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__sdivsi3);
+DECLARE_EXPORT(__lshrsi3);
+DECLARE_EXPORT(__ashrsi3);
+DECLARE_EXPORT(__ashlsi3);
+DECLARE_EXPORT(__ashiftrt_r4_6);
+DECLARE_EXPORT(__ashiftrt_r4_7);
+DECLARE_EXPORT(__ashiftrt_r4_8);
+DECLARE_EXPORT(__ashiftrt_r4_9);
+DECLARE_EXPORT(__ashiftrt_r4_10);
+DECLARE_EXPORT(__ashiftrt_r4_11);
+DECLARE_EXPORT(__ashiftrt_r4_12);
+DECLARE_EXPORT(__ashiftrt_r4_13);
+DECLARE_EXPORT(__ashiftrt_r4_14);
+DECLARE_EXPORT(__ashiftrt_r4_15);
+DECLARE_EXPORT(__ashiftrt_r4_20);
+DECLARE_EXPORT(__ashiftrt_r4_21);
+DECLARE_EXPORT(__ashiftrt_r4_22);
+DECLARE_EXPORT(__ashiftrt_r4_23);
+DECLARE_EXPORT(__ashiftrt_r4_24);
+DECLARE_EXPORT(__ashiftrt_r4_27);
+DECLARE_EXPORT(__ashiftrt_r4_30);
+DECLARE_EXPORT(__movstr);
+DECLARE_EXPORT(__movstrSI8);
+DECLARE_EXPORT(__movstrSI12);
+DECLARE_EXPORT(__movstrSI16);
+DECLARE_EXPORT(__movstrSI20);
+DECLARE_EXPORT(__movstrSI24);
+DECLARE_EXPORT(__movstrSI28);
+DECLARE_EXPORT(__movstrSI32);
+DECLARE_EXPORT(__movstrSI36);
+DECLARE_EXPORT(__movstrSI40);
+DECLARE_EXPORT(__movstrSI44);
+DECLARE_EXPORT(__movstrSI48);
+DECLARE_EXPORT(__movstrSI52);
+DECLARE_EXPORT(__movstrSI56);
+DECLARE_EXPORT(__movstrSI60);
+DECLARE_EXPORT(__movstr_i4_even);
+DECLARE_EXPORT(__movstr_i4_odd);
+DECLARE_EXPORT(__movstrSI12_i4);
+DECLARE_EXPORT(__movmem);
+DECLARE_EXPORT(__movmemSI8);
+DECLARE_EXPORT(__movmemSI12);
+DECLARE_EXPORT(__movmemSI16);
+DECLARE_EXPORT(__movmemSI20);
+DECLARE_EXPORT(__movmemSI24);
+DECLARE_EXPORT(__movmemSI28);
+DECLARE_EXPORT(__movmemSI32);
+DECLARE_EXPORT(__movmemSI36);
+DECLARE_EXPORT(__movmemSI40);
+DECLARE_EXPORT(__movmemSI44);
+DECLARE_EXPORT(__movmemSI48);
+DECLARE_EXPORT(__movmemSI52);
+DECLARE_EXPORT(__movmemSI56);
+DECLARE_EXPORT(__movmemSI60);
+DECLARE_EXPORT(__movmem_i4_even);
+DECLARE_EXPORT(__movmem_i4_odd);
+DECLARE_EXPORT(__movmemSI12_i4);
+DECLARE_EXPORT(__udiv_qrnnd_16);
+DECLARE_EXPORT(__sdivsi3_i4);
+DECLARE_EXPORT(__udivsi3_i4);
+DECLARE_EXPORT(__sdivsi3_i4i);
+DECLARE_EXPORT(__udivsi3_i4i);
+#ifdef CONFIG_MCOUNT
+DECLARE_EXPORT(mcount);
+#endif
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
new file mode 100644
index 00000000..45afa5c5
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -0,0 +1,56 @@
+/*
+ * arch/sh/kernel/sh_ksyms_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/rwsem.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/screen_info.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+EXPORT_SYMBOL(__put_user_asm_b);
+EXPORT_SYMBOL(__put_user_asm_w);
+EXPORT_SYMBOL(__put_user_asm_l);
+EXPORT_SYMBOL(__put_user_asm_q);
+EXPORT_SYMBOL(__get_user_asm_b);
+EXPORT_SYMBOL(__get_user_asm_w);
+EXPORT_SYMBOL(__get_user_asm_l);
+EXPORT_SYMBOL(__get_user_asm_q);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strcpy);
+
+/* Ugh. These come in from libgcc.a at link time. */
+#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+
+DECLARE_EXPORT(__sdivsi3);
+DECLARE_EXPORT(__sdivsi3_1);
+DECLARE_EXPORT(__sdivsi3_2);
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__div_table);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
new file mode 100644
index 00000000..5901fba3
--- /dev/null
+++ b/arch/sh/kernel/signal_32.c
@@ -0,0 +1,639 @@
+/*
+ * linux/arch/sh/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ *
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/elf.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/freezer.h>
+#include <linux/io.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+struct fdpic_func_descriptor {
+ unsigned long text;
+ unsigned long GOT;
+};
+
+/*
+ * The following define adds a 64 byte gap between the signal
+ * stack frame and previous contents of the stack. This allows
+ * frame unwinding in a function epilogue but only if a frame
+ * pointer is used in the function. This is necessary because
+ * current gcc compilers (<4.3) do not generate unwind info on
+ * SH for function epilogues.
+ */
+#define UNWINDGUARD 64
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask,
+ unsigned long r5, unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ sigset_t blocked;
+
+ current->saved_sigmask = current->blocked;
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_restore_sigmask();
+
+ return -ERESTARTNOHAND;
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+ return do_sigaltstack(uss, uoss, regs->regs[15]);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
+#if defined(CONFIG_CPU_SH2)
+#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */
+#else
+#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */
+#endif
+#define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ u16 retcode[8];
+};
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ u16 retcode[8];
+};
+
+#ifdef CONFIG_SH_FPU
+static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
+{
+ struct task_struct *tsk = current;
+
+ if (!(boot_cpu_data.flags & CPU_HAS_FPU))
+ return 0;
+
+ set_used_math();
+ return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
+ sizeof(long)*(16*2+2));
+}
+
+static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
+ struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ if (!(boot_cpu_data.flags & CPU_HAS_FPU))
+ return 0;
+
+ if (!used_math()) {
+ __put_user(0, &sc->sc_ownedfp);
+ return 0;
+ }
+
+ __put_user(1, &sc->sc_ownedfp);
+
+ /* This will cause a "finit" to be triggered by the next
+ attempted FPU operation by the 'current' process.
+ */
+ clear_used_math();
+
+ unlazy_fpu(tsk, regs);
+ return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
+ sizeof(long)*(16*2+2));
+}
+#endif /* CONFIG_SH_FPU */
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
+{
+ unsigned int err = 0;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+ COPY(regs[1]);
+ COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]);
+ COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]);
+ COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]);
+ COPY(regs[14]); COPY(regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+#ifdef CONFIG_SH_FPU
+ if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ int owned_fp;
+ struct task_struct *tsk = current;
+
+ regs->sr |= SR_FD; /* Release FPU */
+ clear_fpu(tsk, regs);
+ clear_used_math();
+ __get_user (owned_fp, &sc->sc_ownedfp);
+ if (owned_fp)
+ err |= restore_sigcontext_fpu(sc);
+ }
+#endif
+
+ regs->tra = -1; /* disable syscall checks */
+ err |= __get_user(*r0_p, &sc->sc_regs[0]);
+ return err;
+}
+
+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
+ sigset_t set;
+ int r0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->sc, &r0))
+ goto badframe;
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
+ sigset_t set;
+ int r0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL,
+ regs->regs[15]) == -EFAULT)
+ goto badframe;
+
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+ COPY(regs[0]); COPY(regs[1]);
+ COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]);
+ COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]);
+ COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]);
+ COPY(regs[14]); COPY(regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+#ifdef CONFIG_SH_FPU
+ err |= save_sigcontext_fpu(sc, regs);
+#endif
+
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(sp) == 0)
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
+}
+
+/* These symbols are defined with the addresses in the vsyscall page.
+ See vsyscall-trapa.S. */
+extern void __kernel_sigreturn(void);
+extern void __kernel_rt_sigreturn(void);
+
+static int setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1)
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+#ifdef CONFIG_VSYSCALL
+ } else if (likely(current->mm->context.vdso)) {
+ regs->pr = VDSO_SYM(&__kernel_sigreturn);
+#endif
+ } else {
+ /* Generate return code (system call to sigreturn) */
+ err |= __put_user(MOVW(7), &frame->retcode[0]);
+ err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[2]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[3]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[4]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[5]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[6]);
+ err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
+ regs->pr = (unsigned long) frame->retcode;
+ flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->regs[15] = (unsigned long) frame;
+ regs->regs[4] = signal; /* Arg for signal handler */
+ regs->regs[5] = 0;
+ regs->regs[6] = (unsigned long) &frame->sc;
+
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor __user *)ka->sa.sa_handler;
+
+ __get_user(regs->pc, &funcptr->text);
+ __get_user(regs->regs[12], &funcptr->GOT);
+ } else
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+ pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
+
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->regs[15]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+#ifdef CONFIG_VSYSCALL
+ } else if (likely(current->mm->context.vdso)) {
+ regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
+#endif
+ } else {
+ /* Generate return code (system call to rt_sigreturn) */
+ err |= __put_user(MOVW(7), &frame->retcode[0]);
+ err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[2]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[3]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[4]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[5]);
+ err |= __put_user(OR_R0_R0, &frame->retcode[6]);
+ err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
+ regs->pr = (unsigned long) frame->retcode;
+ flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->regs[15] = (unsigned long) frame;
+ regs->regs[4] = signal; /* Arg for signal handler */
+ regs->regs[5] = (unsigned long) &frame->info;
+ regs->regs[6] = (unsigned long) &frame->uc;
+
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor __user *)ka->sa.sa_handler;
+
+ __get_user(regs->pc, &funcptr->text);
+ __get_user(regs->regs[12], &funcptr->GOT);
+ } else
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+ pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
+
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static inline void
+handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ /* If we're not from a syscall, bail out */
+ if (regs->tra < 0)
+ return;
+
+ /* check for system call restart.. */
+ switch (regs->regs[0]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->regs[0] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->regs[0] = save_r0;
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static int
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+{
+ int ret;
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ ret = setup_frame(sig, ka, oldset, regs);
+
+ if (ret == 0)
+ block_sigmask(ka, sig);
+
+ return ret;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs, unsigned int save_r0)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+ sigset_t *oldset;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ handle_syscall_restart(save_r0, regs, &ka.sa);
+
+ /* Whee! Actually deliver the signal. */
+ if (handle_signal(signr, &ka, &info, oldset,
+ regs, save_r0) == 0) {
+ /*
+ * A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+ }
+
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->tra >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->regs[0] == -ERESTARTNOHAND ||
+ regs->regs[0] == -ERESTARTSYS ||
+ regs->regs[0] == -ERESTARTNOINTR) {
+ regs->regs[0] = save_r0;
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ regs->regs[3] = __NR_restart_syscall;
+ }
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
+ unsigned long thread_info_flags)
+{
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs, save_r0);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
new file mode 100644
index 00000000..3c9a6f7d
--- /dev/null
+++ b/arch/sh/kernel/signal_64.c
@@ -0,0 +1,743 @@
+/*
+ * arch/sh/kernel/signal_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+
+#define REG_RET 9
+#define REG_ARG1 2
+#define REG_ARG2 3
+#define REG_ARG3 4
+#define REG_SP 15
+#define REG_PR 18
+#define REF_REG_RET regs->regs[REG_RET]
+#define REF_REG_SP regs->regs[REG_SP]
+#define DEREF_REG_PR regs->regs[REG_PR]
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+static int
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs * regs);
+
+static inline void
+handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
+{
+ /* If we're not from a syscall, bail out */
+ if (regs->syscall_nr < 0)
+ return;
+
+ /* check for system call restart.. */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->regs[REG_RET] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ /* Decode syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
+ regs->pc -= 4;
+ break;
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+ else if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, 0);
+ if (signr > 0) {
+ handle_syscall_restart(regs, &ka.sa);
+
+ /* Whee! Actually deliver the signal. */
+ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+ /*
+ * If a signal was successfully delivered, the
+ * saved sigmask is in its frame, and we can
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+ return 1;
+ }
+ }
+
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ /* Decode Syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
+ regs->pc -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[REG_RET] = __NR_restart_syscall;
+ regs->pc -= 4;
+ break;
+ }
+ }
+
+ /* No signal to deliver -- put the saved sigmask back */
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
+ return 0;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask,
+ unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ sigset_t saveset, blocked;
+
+ saveset = current->blocked;
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
+
+ REF_REG_RET = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_restore_sigmask();
+ regs->pc += 4; /* because sys_sigreturn decrements the pc */
+ if (do_signal(regs, &saveset)) {
+ /* pc now points at signal handler. Need to decrement
+ it because entry.S will increment it. */
+ regs->pc -= 4;
+ return -EINTR;
+ }
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7,
+ struct pt_regs * regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+ saveset = current->blocked;
+ set_current_blocked(&newset);
+
+ REF_REG_RET = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ regs->pc += 4; /* because sys_sigreturn decrements the pc */
+ if (do_signal(regs, &saveset)) {
+ /* pc now points at signal handler. Need to decrement
+ it because entry.S will increment it. */
+ regs->pc -= 4;
+ return -EINTR;
+ }
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7,
+ struct pt_regs * regs)
+{
+ return do_sigaltstack(uss, uoss, REF_REG_SP);
+}
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+struct sigframe {
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ long long retcode[2];
+};
+
+struct rt_sigframe {
+ struct siginfo __user *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ long long retcode[2];
+};
+
+#ifdef CONFIG_SH_FPU
+static inline int
+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int fpvalid;
+
+ err |= __get_user (fpvalid, &sc->sc_fpvalid);
+ conditional_used_math(fpvalid);
+ if (! fpvalid)
+ return err;
+
+ if (current == last_task_used_math) {
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
+ (sizeof(long long) * 32) + (sizeof(int) * 1));
+
+ return err;
+}
+
+static inline int
+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int fpvalid;
+
+ fpvalid = !!used_math();
+ err |= __put_user(fpvalid, &sc->sc_fpvalid);
+ if (! fpvalid)
+ return err;
+
+ if (current == last_task_used_math) {
+ enable_fpu();
+ save_fpu(current);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
+ (sizeof(long long) * 32) + (sizeof(int) * 1));
+ clear_used_math();
+
+ return err;
+}
+#else
+static inline int
+restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ return 0;
+}
+static inline int
+setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ return 0;
+}
+#endif
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
+{
+ unsigned int err = 0;
+ unsigned long long current_sr, new_sr;
+#define SR_MASK 0xffff8cfd
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+
+ COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
+ COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
+ COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
+ COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
+ COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
+ COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
+ COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
+ COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
+ COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
+ COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
+ COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
+ COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
+ COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
+ COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
+ COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
+
+ /* Prevent the signal handler manipulating SR in a way that can
+ crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
+ modified */
+ current_sr = regs->sr;
+ err |= __get_user(new_sr, &sc->sc_sr);
+ regs->sr &= SR_MASK;
+ regs->sr |= (new_sr & ~SR_MASK);
+
+ COPY(pc);
+
+#undef COPY
+
+ /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
+ * has been restored above.) */
+ err |= restore_sigcontext_fpu(regs, sc);
+
+ regs->syscall_nr = -1; /* disable syscall checks */
+ err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
+ return err;
+}
+
+asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
+ sigset_t set;
+ long long ret;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->sc, &ret))
+ goto badframe;
+ regs->pc -= 4;
+
+ return (int) ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs * regs)
+{
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
+ sigset_t set;
+ stack_t __user st;
+ long long ret;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
+ goto badframe;
+ regs->pc -= 4;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, REF_REG_SP);
+
+ return (int) ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+ /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
+ err |= setup_sigcontext_fpu(regs, sc);
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+
+ COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
+ COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
+ COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
+ COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
+ COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
+ COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
+ COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
+ COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
+ COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
+ COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
+ COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
+ COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
+ COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
+ COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
+ COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
+ COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
+ COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
+ COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
+ COPY(sr); COPY(pc);
+
+#undef COPY
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void __user *)((sp - frame_size) & -8ul);
+}
+
+void sa_default_restorer(void); /* See comments below */
+void sa_default_rt_restorer(void); /* See comments below */
+
+static int setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask)); }
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ /*
+ * On SH5 all edited pointers are subject to NEFF
+ */
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
+ } else {
+ /*
+ * Different approach on SH5.
+ * . Endianness independent asm code gets placed in entry.S .
+ * This is limited to four ASM instructions corresponding
+ * to two long longs in size.
+ * . err checking is done on the else branch only
+ * . flush_icache_range() is called upon __put_user() only
+ * . all edited pointers are subject to NEFF
+ * . being code, linker turns ShMedia bit on, always
+ * dereference index -1.
+ */
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
+
+ if (__copy_to_user(frame->retcode,
+ (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
+ goto give_sigsegv;
+
+ /* Cohere the trampoline with the I-cache. */
+ flush_cache_sigtramp(DEREF_REG_PR-1);
+ }
+
+ /*
+ * Set up registers for signal handler.
+ * All edited pointers are subject to NEFF.
+ */
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
+ regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+
+ /* FIXME:
+ The glibc profiling support for SH-5 needs to be passed a sigcontext
+ so it can retrieve the PC. At some point during 2003 the glibc
+ support was changed to receive the sigcontext through the 2nd
+ argument, but there are still versions of libc.so in use that use
+ the 3rd argument. Until libc.so is stabilised, pass the sigcontext
+ through both 2nd and 3rd arguments.
+ */
+
+ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+ regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
+
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
+
+ set_fs(USER_DS);
+
+ /* Broken %016Lx */
+ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal, current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Give up earlier as i386, in case */
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ /*
+ * On SH5 all edited pointers are subject to NEFF
+ */
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
+ } else {
+ /*
+ * Different approach on SH5.
+ * . Endianness independent asm code gets placed in entry.S .
+ * This is limited to four ASM instructions corresponding
+ * to two long longs in size.
+ * . err checking is done on the else branch only
+ * . flush_icache_range() is called upon __put_user() only
+ * . all edited pointers are subject to NEFF
+ * . being code, linker turns ShMedia bit on, always
+ * dereference index -1.
+ */
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
+
+ if (__copy_to_user(frame->retcode,
+ (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
+ goto give_sigsegv;
+
+ /* Cohere the trampoline with the I-cache. */
+ flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
+ }
+
+ /*
+ * Set up registers for signal handler.
+ * All edited pointers are subject to NEFF.
+ */
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
+ regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
+ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
+ regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
+
+ set_fs(USER_DS);
+
+ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal, current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
+
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static int
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs * regs)
+{
+ int ret;
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ ret = setup_frame(sig, ka, oldset, regs);
+
+ if (ret == 0)
+ block_sigmask(ka, sig);
+
+ return ret;
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs, 0);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
new file mode 100644
index 00000000..eaebdf6a
--- /dev/null
+++ b/arch/sh/kernel/smp.c
@@ -0,0 +1,486 @@
+/*
+ * arch/sh/kernel/smp.c
+ *
+ * SMP support for the SuperH processors.
+ *
+ * Copyright (C) 2002 - 2010 Paul Mundt
+ * Copyright (C) 2006 - 2007 Akio Idehara
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/err.h>
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+
+struct plat_smp_ops *mp_ops = NULL;
+
+/* State of each CPU */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
+void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
+
+static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
+{
+ struct sh_cpuinfo *c = cpu_data + cpu;
+
+ memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
+
+ c->loops_per_jiffy = loops_per_jiffy;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned int cpu = smp_processor_id();
+
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = cpu;
+ mp_ops->prepare_cpus(max_cpus);
+
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ __cpu_number_map[0] = cpu;
+ __cpu_logical_map[0] = cpu;
+
+ set_cpu_online(cpu, true);
+ set_cpu_possible(cpu, true);
+
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void native_cpu_die(unsigned int cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ smp_rmb();
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
+ return;
+ }
+
+ msleep(100);
+ }
+
+ pr_err("CPU %u didn't die...\n", cpu);
+}
+
+int native_cpu_disable(unsigned int cpu)
+{
+ return cpu == 0 ? -EPERM : 0;
+}
+
+void play_dead_common(void)
+{
+ idle_task_exit();
+ irq_ctx_exit(raw_smp_processor_id());
+ mb();
+
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
+}
+
+void native_play_dead(void)
+{
+ play_dead_common();
+}
+
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct task_struct *p;
+ int ret;
+
+ ret = mp_ops->cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Stop the local timer for this CPU.
+ */
+ local_timer_stop(cpu);
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ read_lock(&tasklist_lock);
+ for_each_process(p)
+ if (p->mm)
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int native_cpu_disable(unsigned int cpu)
+{
+ return -ENOSYS;
+}
+
+void native_cpu_die(unsigned int cpu)
+{
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+
+void native_play_dead(void)
+{
+ BUG();
+}
+#endif
+
+asmlinkage void __cpuinit start_secondary(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct mm_struct *mm = &init_mm;
+
+ enable_mmu();
+ atomic_inc(&mm->mm_count);
+ atomic_inc(&mm->mm_users);
+ current->active_mm = mm;
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
+
+ per_cpu_trap_init();
+
+ preempt_disable();
+
+ notify_cpu_starting(cpu);
+
+ local_irq_enable();
+
+ /* Enable local timers */
+ local_timer_setup(cpu);
+ calibrate_delay();
+
+ smp_store_cpu_info(cpu);
+
+ set_cpu_online(cpu, true);
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+
+ cpu_idle();
+}
+
+extern struct {
+ unsigned long sp;
+ unsigned long bss_start;
+ unsigned long bss_end;
+ void *start_kernel_fn;
+ void *cpu_init_fn;
+ void *thread_info;
+} stack_start;
+
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+ struct task_struct *tsk;
+ unsigned long timeout;
+
+ tsk = cpu_data[cpu].idle;
+ if (!tsk) {
+ tsk = fork_idle(cpu);
+ if (IS_ERR(tsk)) {
+ pr_err("Failed forking idle task for cpu %d\n", cpu);
+ return PTR_ERR(tsk);
+ }
+
+ cpu_data[cpu].idle = tsk;
+ }
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
+ /* Fill in data in head.S for secondary cpus */
+ stack_start.sp = tsk->thread.sp;
+ stack_start.thread_info = tsk->stack;
+ stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
+ stack_start.start_kernel_fn = start_secondary;
+
+ flush_icache_range((unsigned long)&stack_start,
+ (unsigned long)&stack_start + sizeof(stack_start));
+ wmb();
+
+ mp_ops->start_cpu(cpu, (unsigned long)_stext);
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout)) {
+ if (cpu_online(cpu))
+ break;
+
+ udelay(10);
+ barrier();
+ }
+
+ if (cpu_online(cpu))
+ return 0;
+
+ return -ENOENT;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ unsigned long bogosum = 0;
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ bogosum += cpu_data[cpu].loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
+ bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, 0, 0);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask)
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
+}
+
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask)
+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
+}
+
+static void ipi_timer(void)
+{
+ irq_enter();
+ local_timer_interrupt();
+ irq_exit();
+}
+
+void smp_message_recv(unsigned int msg)
+{
+ switch (msg) {
+ case SMP_MSG_FUNCTION:
+ generic_smp_call_function_interrupt();
+ break;
+ case SMP_MSG_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case SMP_MSG_FUNCTION_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+ case SMP_MSG_TIMER:
+ ipi_timer();
+ break;
+ default:
+ printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
+ smp_processor_id(), __func__, msg);
+ break;
+ }
+}
+
+/* Not really SMP stuff ... */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(flush_tlb_all_ipi, 0, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, intercpu interrupts have to be sent.
+ * Another case where intercpu interrupts are required is when the target
+ * mm might be active on another cpu (eg debuggers doing the flushes on
+ * behalf of debugees, kswapd stealing pages from another process etc).
+ * Kanoj 07/00.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
+ } else {
+ int i;
+ for (i = 0; i < num_online_cpus(); i++)
+ if (smp_processor_id() != i)
+ cpu_context(i, mm) = 0;
+ }
+ local_flush_tlb_mm(mm);
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ preempt_disable();
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd;
+
+ fd.vma = vma;
+ fd.addr1 = start;
+ fd.addr2 = end;
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
+ } else {
+ int i;
+ for (i = 0; i < num_online_cpus(); i++)
+ if (smp_processor_id() != i)
+ cpu_context(i, mm) = 0;
+ }
+ local_flush_tlb_range(vma, start, end);
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd;
+
+ fd.addr1 = start;
+ fd.addr2 = end;
+ on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ preempt_disable();
+ if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd;
+
+ fd.vma = vma;
+ fd.addr1 = page;
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
+ } else {
+ int i;
+ for (i = 0; i < num_online_cpus(); i++)
+ if (smp_processor_id() != i)
+ cpu_context(i, vma->vm_mm) = 0;
+ }
+ local_flush_tlb_page(vma, page);
+ preempt_enable();
+}
+
+static void flush_tlb_one_ipi(void *info)
+{
+ struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ local_flush_tlb_one(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_one(unsigned long asid, unsigned long vaddr)
+{
+ struct flush_tlb_data fd;
+
+ fd.addr1 = asid;
+ fd.addr2 = vaddr;
+
+ smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
+ local_flush_tlb_one(asid, vaddr);
+}
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
new file mode 100644
index 00000000..bf989e06
--- /dev/null
+++ b/arch/sh/kernel/stacktrace.c
@@ -0,0 +1,92 @@
+/*
+ * arch/sh/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static int save_stack_stack(void *data, char *name)
+{
+ return 0;
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+static void save_stack_address(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = data;
+
+ if (!reliable)
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops = {
+ .stack = save_stack_stack,
+ .address = save_stack_address,
+};
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long *sp = (unsigned long *)current_stack_pointer;
+
+ unwind_stack(current, NULL, sp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = (struct stack_trace *)data;
+
+ if (!reliable)
+ return;
+
+ if (in_sched_functions(addr))
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops_nosched = {
+ .stack = save_stack_stack,
+ .address = save_stack_address_nosched,
+};
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned long *sp = (unsigned long *)tsk->thread.sp;
+
+ unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/swsusp.c b/arch/sh/kernel/swsusp.c
new file mode 100644
index 00000000..12b64a0f
--- /dev/null
+++ b/arch/sh/kernel/swsusp.c
@@ -0,0 +1,38 @@
+/*
+ * swsusp.c - SuperH hibernation support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <asm/suspend.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/fpu.h>
+
+struct swsusp_arch_regs swsusp_arch_regs_cpu0;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= begin_pfn) && (pfn < end_pfn);
+}
+
+void save_processor_state(void)
+{
+ init_fpu(current);
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
new file mode 100644
index 00000000..8c6a350d
--- /dev/null
+++ b/arch/sh/kernel/sys_sh.c
@@ -0,0 +1,95 @@
+/*
+ * linux/arch/sh/kernel/sys_sh.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/SuperH
+ * platform.
+ *
+ * Taken from i386 version.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/ipc.h>
+#include <asm/syscalls.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+
+asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ int fd, unsigned long off)
+{
+ if (off & ~PAGE_MASK)
+ return -EINVAL;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ /*
+ * The shift for mmap2 is constant, regardless of PAGE_SIZE
+ * setting.
+ */
+ if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
+ return -EINVAL;
+
+ pgoff >>= PAGE_SHIFT - 12;
+
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+}
+
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
+{
+ struct vm_area_struct *vma;
+
+ if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
+ return -EINVAL;
+
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ if (addr + len < addr)
+ return -EFAULT;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma (current->mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ switch (op & CACHEFLUSH_D_PURGE) {
+ case CACHEFLUSH_D_INVAL:
+ __flush_invalidate_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_WB:
+ __flush_wback_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_PURGE:
+ __flush_purge_region((void *)addr, len);
+ break;
+ }
+
+ if (op & CACHEFLUSH_I)
+ flush_icache_range(addr, addr+len);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+}
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
new file mode 100644
index 00000000..f56b6fe5
--- /dev/null
+++ b/arch/sh/kernel/sys_sh32.c
@@ -0,0 +1,86 @@
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/ipc.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/syscalls.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ int fd[2];
+ int error;
+
+ error = do_pipe_flags(fd, 0);
+ if (!error) {
+ regs->regs[1] = fd[1];
+ return fd[0];
+ }
+ return error;
+}
+
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
+ size_t count, long dummy, loff_t pos)
+{
+ return sys_pread64(fd, buf, count, pos);
+}
+
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
+ size_t count, long dummy, loff_t pos)
+{
+ return sys_pwrite64(fd, buf, count, pos);
+}
+
+asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ u32 len0, u32 len1, int advice)
+{
+#ifdef __LITTLE_ENDIAN__
+ return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
+ (u64)len1 << 32 | len0, advice);
+#else
+ return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
+ (u64)len0 << 32 | len1, advice);
+#endif
+}
+
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define SYSCALL_ARG3 "trapa #0x23"
+#else
+#define SYSCALL_ARG3 "trapa #0x13"
+#endif
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ register long __sc0 __asm__ ("r3") = __NR_execve;
+ register long __sc4 __asm__ ("r4") = (long) filename;
+ register long __sc5 __asm__ ("r5") = (long) argv;
+ register long __sc6 __asm__ ("r6") = (long) envp;
+ __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
+ : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
+ : "memory");
+ return __sc0;
+}
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
new file mode 100644
index 00000000..c5a38c4b
--- /dev/null
+++ b/arch/sh/kernel/sys_sh64.c
@@ -0,0 +1,50 @@
+/*
+ * arch/sh/kernel/sys_sh64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/SH5
+ * platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <linux/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
+ register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
+ register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
+ register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
+ __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
+ : "=r" (__sc0)
+ : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
+ __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
+ : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
+ return __sc0;
+}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
new file mode 100644
index 00000000..4b68f0f7
--- /dev/null
+++ b/arch/sh/kernel/syscalls_32.S
@@ -0,0 +1,386 @@
+/*
+ * arch/sh/kernel/syscalls.S
+ *
+ * System call table for SuperH
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+ .data
+ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_sh_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* sys_olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* sys_oldselect */
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_ni_syscall /* 110 */ /* iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall /* idle */
+ .long sys_ni_syscall /* vm86old */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_cacheflush /* x86: sys_modify_ldt */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* vm86 */
+ .long sys_ni_syscall /* old "query_module" */
+ .long sys_poll
+ .long sys_ni_syscall /* was nfsservctl */
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread_wrapper /* 180 */
+ .long sys_pwrite_wrapper
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64_wrapper
+ .long sys_ni_syscall /* Reserved for vserver */
+ .long sys_mbind
+ .long sys_get_mempolicy /* 275 */
+ .long sys_set_mempolicy
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_kexec_load
+ .long sys_waitid
+ .long sys_add_key /* 285 */
+ .long sys_request_key
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init /* 290 */
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_ni_syscall
+ .long sys_migrate_pages
+ .long sys_openat /* 295 */
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64 /* 300 */
+ .long sys_unlinkat
+ .long sys_renameat
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat /* 305 */
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare /* 310 */
+ .long sys_set_robust_list
+ .long sys_get_robust_list
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee /* 315 */
+ .long sys_vmsplice
+ .long sys_move_pages
+ .long sys_getcpu
+ .long sys_epoll_pwait
+ .long sys_utimensat /* 320 */
+ .long sys_signalfd
+ .long sys_timerfd_create
+ .long sys_eventfd
+ .long sys_fallocate
+ .long sys_timerfd_settime /* 325 */
+ .long sys_timerfd_gettime
+ .long sys_signalfd4
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3 /* 330 */
+ .long sys_pipe2
+ .long sys_inotify_init1
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_event_open
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
+ /* Broken-out socket family */
+ .long sys_socket /* 340 */
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname /* 345 */
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_sendto
+ .long sys_recv /* 350 */
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg /* 355 */
+ .long sys_recvmsg
+ .long sys_recvmmsg
+ .long sys_accept4
+ .long sys_name_to_handle_at
+ .long sys_open_by_handle_at /* 360 */
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long sys_setns
+ .long sys_process_vm_readv /* 365 */
+ .long sys_process_vm_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
new file mode 100644
index 00000000..0956345b
--- /dev/null
+++ b/arch/sh/kernel/syscalls_64.S
@@ -0,0 +1,406 @@
+/*
+ * arch/sh/kernel/syscalls_64.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+
+ .section .data, "aw"
+ .balign 32
+
+/*
+ * System calls jump table
+ */
+ .globl sys_call_table
+sys_call_table:
+ .long sys_restart_syscall /* 0 - old "setup()" system call */
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sh64_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys( */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* sys_olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* sys_oldselect */
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm */
+ .long sys_socketcall /* Obsolete implementation of socket syscall */
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_ni_syscall /* 110 */ /* iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall /* idle */
+ .long sys_ni_syscall /* vm86old */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc /* Obsolete ipc syscall implementation */
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_cacheflush /* x86: sys_modify_ldt */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* vm86 */
+ .long sys_ni_syscall /* old "query_module" */
+ .long sys_poll
+ .long sys_ni_syscall /* was nfsservctl */
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ /* Broken-out socket family (maintain backwards compatibility in syscall
+ numbering with 2.4) */
+ .long sys_socket /* 220 */
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname /* 225 */
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_sendto
+ .long sys_recv /* 230*/
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg /* 235 */
+ .long sys_recvmsg
+ /* Broken-out IPC family (maintain backwards compatibility in syscall
+ numbering with 2.4) */
+ .long sys_semop
+ .long sys_semget
+ .long sys_semctl
+ .long sys_msgsnd /* 240 */
+ .long sys_msgrcv
+ .long sys_msgget
+ .long sys_msgctl
+ .long sys_shmat
+ .long sys_shmdt /* 245 */
+ .long sys_shmget
+ .long sys_shmctl
+ /* Rest of syscalls listed in 2.4 i386 unistd.h */
+ .long sys_getdents64
+ .long sys_fcntl64
+ .long sys_ni_syscall /* 250 reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead
+ .long sys_setxattr
+ .long sys_lsetxattr /* 255 */
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr /* 260 */
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr /* 265 */
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity /* 270 */
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents /* 275 */
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64
+ .long sys_ni_syscall
+ .long sys_exit_group /* 280 */
+ /* Rest of new 2.6 syscalls */
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl
+ .long sys_epoll_wait
+ .long sys_remap_file_pages /* 285 */
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun /* 290 */
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime
+ .long sys_clock_getres
+ .long sys_clock_nanosleep /* 295 */
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill
+ .long sys_utimes
+ .long sys_fadvise64_64 /* 300 */
+ .long sys_ni_syscall /* Reserved for vserver */
+ .long sys_ni_syscall /* Reserved for mbind */
+ .long sys_ni_syscall /* get_mempolicy */
+ .long sys_ni_syscall /* set_mempolicy */
+ .long sys_mq_open /* 305 */
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify
+ .long sys_mq_getsetattr /* 310 */
+ .long sys_ni_syscall /* Reserved for kexec */
+ .long sys_waitid
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl /* 315 */
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch /* 320 */
+ .long sys_ni_syscall
+ .long sys_migrate_pages
+ .long sys_openat
+ .long sys_mkdirat
+ .long sys_mknodat /* 325 */
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64
+ .long sys_unlinkat
+ .long sys_renameat /* 330 */
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat /* 335 */
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 340 */
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee
+ .long sys_vmsplice
+ .long sys_move_pages /* 345 */
+ .long sys_getcpu
+ .long sys_epoll_pwait
+ .long sys_utimensat
+ .long sys_signalfd
+ .long sys_timerfd_create /* 350 */
+ .long sys_eventfd
+ .long sys_fallocate
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
+ .long sys_signalfd4 /* 355 */
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3
+ .long sys_pipe2
+ .long sys_inotify_init1 /* 360 */
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_event_open
+ .long sys_recvmmsg /* 365 */
+ .long sys_accept4
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
+ .long sys_name_to_handle_at /* 370 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long sys_setns /* 375 */
+ .long sys_process_vm_readv
+ .long sys_process_vm_writev
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
new file mode 100644
index 00000000..552c8fcf
--- /dev/null
+++ b/arch/sh/kernel/time.c
@@ -0,0 +1,115 @@
+/*
+ * arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/clockchips.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/rtc.h>
+#include <asm/clock.h>
+#include <asm/rtc.h>
+
+/* Dummy RTC ops */
+static void null_rtc_get_time(struct timespec *tv)
+{
+ tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
+ tv->tv_nsec = 0;
+}
+
+static int null_rtc_set_time(const time_t secs)
+{
+ return 0;
+}
+
+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
+
+void read_persistent_clock(struct timespec *ts)
+{
+ rtc_sh_get_time(ts);
+}
+
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
+int update_persistent_clock(struct timespec now)
+{
+ return rtc_sh_set_time(now.tv_sec);
+}
+#endif
+
+unsigned int get_rtc_time(struct rtc_time *tm)
+{
+ if (rtc_sh_get_time != null_rtc_get_time) {
+ struct timespec tv;
+
+ rtc_sh_get_time(&tv);
+ rtc_time_to_tm(tv.tv_sec, tm);
+ }
+
+ return RTC_24H;
+}
+EXPORT_SYMBOL(get_rtc_time);
+
+int set_rtc_time(struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ rtc_tm_to_time(tm, &secs);
+ return rtc_sh_set_time(secs);
+}
+EXPORT_SYMBOL(set_rtc_time);
+
+static int __init rtc_generic_init(void)
+{
+ struct platform_device *pdev;
+
+ if (rtc_sh_get_time == null_rtc_get_time)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return 0;
+}
+module_init(rtc_generic_init);
+
+void (*board_time_init)(void);
+
+static void __init sh_late_time_init(void)
+{
+ /*
+ * Make sure all compiled-in early timers register themselves.
+ *
+ * Run probe() for two "earlytimer" devices, these will be the
+ * clockevents and clocksource devices respectively. In the event
+ * that only a clockevents device is available, we -ENODEV on the
+ * clocksource and the jiffies clocksource is used transparently
+ * instead. No error handling is necessary here.
+ */
+ early_platform_driver_register_all("earlytimer");
+ early_platform_driver_probe("earlytimer", 2, 0);
+}
+
+void __init time_init(void)
+{
+ if (board_time_init)
+ board_time_init();
+
+ clk_init();
+
+ late_time_init = sh_late_time_init;
+}
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
new file mode 100644
index 00000000..772caffb
--- /dev/null
+++ b/arch/sh/kernel/topology.c
@@ -0,0 +1,82 @@
+/*
+ * arch/sh/kernel/topology.c
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/topology.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/export.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
+
+static cpumask_t cpu_coregroup_map(unsigned int cpu)
+{
+ /*
+ * Presently all SH-X3 SMP cores are multi-cores, so just keep it
+ * simple until we have a method for determining topology..
+ */
+ return *cpu_possible_mask;
+}
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
+int arch_update_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+
+ return 0;
+}
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ for_each_online_node(i)
+ register_one_node(i);
+#endif
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+
+ ret = register_cpu(c, i);
+ if (unlikely(ret))
+ printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
+ __func__, i, ret);
+ }
+
+#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
+ /*
+ * In the UP case, make sure the CPU association is still
+ * registered under each node. Without this, sysfs fails
+ * to make the connection between nodes other than node0
+ * and cpu0.
+ */
+ for_each_online_node(i)
+ if (i != numa_node_id())
+ register_cpu_under_node(raw_smp_processor_id(), i);
+#endif
+
+ return 0;
+}
+subsys_initcall(topology_init);
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
new file mode 100644
index 00000000..a87e58a9
--- /dev/null
+++ b/arch/sh/kernel/traps.c
@@ -0,0 +1,116 @@
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/kdebug.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <asm/unwinder.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_GENERIC_BUG
+static void handle_BUG(struct pt_regs *regs)
+{
+ const struct bug_entry *bug;
+ unsigned long bugaddr = regs->pc;
+ enum bug_trap_type tt;
+
+ if (!is_valid_bugaddr(bugaddr))
+ goto invalid;
+
+ bug = find_bug(bugaddr);
+
+ /* Switch unwinders when unwind_stack() is called */
+ if (bug->flags & BUGFLAG_UNWINDER)
+ unwinder_faulted = 1;
+
+ tt = report_bug(bugaddr, regs);
+ if (tt == BUG_TRAP_TYPE_WARN) {
+ regs->pc += instruction_size(bugaddr);
+ return;
+ }
+
+invalid:
+ die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
+}
+
+int is_valid_bugaddr(unsigned long addr)
+{
+ insn_size_t opcode;
+
+ if (addr < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((insn_size_t *)addr, opcode))
+ return 0;
+ if (opcode == TRAPA_BUG_OPCODE)
+ return 1;
+
+ return 0;
+}
+#endif
+
+/*
+ * Generic trap handler.
+ */
+BUILD_TRAP_HANDLER(debug)
+{
+ TRAP_HANDLER_DECL;
+
+ /* Rewind */
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+
+ if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ force_sig(SIGTRAP, current);
+}
+
+/*
+ * Special handler for BUG() traps.
+ */
+BUILD_TRAP_HANDLER(bug)
+{
+ TRAP_HANDLER_DECL;
+
+ /* Rewind */
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+
+ if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+
+#ifdef CONFIG_GENERIC_BUG
+ if (__kernel_text_address(instruction_pointer(regs))) {
+ insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
+ if (insn == TRAPA_BUG_OPCODE)
+ handle_BUG(regs);
+ return;
+ }
+#endif
+
+ force_sig(SIGTRAP, current);
+}
+
+BUILD_TRAP_HANDLER(nmi)
+{
+ unsigned int cpu = smp_processor_id();
+ TRAP_HANDLER_DECL;
+
+ nmi_enter();
+ nmi_count(cpu)++;
+
+ switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
+ case NOTIFY_OK:
+ case NOTIFY_STOP:
+ break;
+ case NOTIFY_BAD:
+ die("Fatal Non-Maskable Interrupt", regs, SIGINT);
+ default:
+ printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
+ break;
+ }
+
+ nmi_exit();
+}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
new file mode 100644
index 00000000..a37175de
--- /dev/null
+++ b/arch/sh/kernel/traps_32.c
@@ -0,0 +1,925 @@
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf
+ * Copyright (C) 2000 David Howells
+ * Copyright (C) 2002 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/debug_locks.h>
+#include <linux/kdebug.h>
+#include <linux/kexec.h>
+#include <linux/limits.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <linux/perf_event.h>
+#include <asm/alignment.h>
+#include <asm/fpu.h>
+#include <asm/kprobes.h>
+#include <asm/traps.h>
+#include <asm/bl_bit.h>
+
+#ifdef CONFIG_CPU_SH2
+# define TRAP_RESERVED_INST 4
+# define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_ADDRESS_ERROR 9
+# ifdef CONFIG_CPU_SH2A
+# define TRAP_UBC 12
+# define TRAP_FPU_ERROR 13
+# define TRAP_DIVZERO_ERROR 17
+# define TRAP_DIVOVF_ERROR 18
+# endif
+#else
+#define TRAP_RESERVED_INST 12
+#define TRAP_ILLEGAL_SLOT_INST 13
+#endif
+
+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p;
+ int i;
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top; ) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ if (__get_user(val, (unsigned int __user *)p)) {
+ printk("\n");
+ return;
+ }
+ printk("%08x ", val);
+ }
+ }
+ printk("\n");
+ }
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+
+ printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ print_modules();
+ show_regs(regs);
+
+ printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+ task_pid_nr(current), task_stack_page(current) + 1);
+
+ if (!user_mode(regs) || in_interrupt())
+ dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+ (unsigned long)task_stack_page(current));
+
+ notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs,
+ long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ */
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+
+ die(str, regs, err);
+ }
+}
+
+static inline void sign_extend(unsigned int count, unsigned char *dst)
+{
+#ifdef __LITTLE_ENDIAN__
+ if ((count == 1) && dst[0] & 0x80) {
+ dst[1] = 0xff;
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ if ((count == 1) && dst[3] & 0x80) {
+ dst[2] = 0xff;
+ dst[1] = 0xff;
+ dst[0] = 0xff;
+ }
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[1] = 0xff;
+ dst[0] = 0xff;
+ }
+#endif
+}
+
+static struct mem_access user_mem_access = {
+ copy_from_user,
+ copy_to_user,
+};
+
+/*
+ * handle an instruction that does an unaligned memory access by emulating the
+ * desired behaviour
+ * - note that PC _may not_ point to the faulting instruction
+ * (if that instruction is in a branch delay slot)
+ * - return 0 if emulation okay, -EFAULT on existential error
+ */
+static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
+ struct mem_access *ma)
+{
+ int ret, index, count;
+ unsigned long *rm, *rn;
+ unsigned char *src, *dst;
+ unsigned char __user *srcu, *dstu;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rn = &regs->regs[index];
+
+ index = (instruction>>4)&15; /* 0x00F0 */
+ rm = &regs->regs[index];
+
+ count = 1<<(instruction&3);
+
+ switch (count) {
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
+ }
+
+ ret = -EFAULT;
+ switch (instruction>>12) {
+ case 0: /* mov.[bwl] to/from memory via r0+rn */
+ if (instruction & 8) {
+ /* from memory */
+ srcu = (unsigned char __user *)*rm;
+ srcu += regs->regs[0];
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 4-count;
+#endif
+ if (ma->from(dst, srcu, count))
+ goto fetch_fault;
+
+ sign_extend(count, dst);
+ } else {
+ /* to memory */
+ src = (unsigned char *)rm;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ dstu = (unsigned char __user *)*rn;
+ dstu += regs->regs[0];
+
+ if (ma->to(dstu, src, count))
+ goto fetch_fault;
+ }
+ ret = 0;
+ break;
+
+ case 1: /* mov.l Rm,@(disp,Rn) */
+ src = (unsigned char*) rm;
+ dstu = (unsigned char __user *)*rn;
+ dstu += (instruction&0x000F)<<2;
+
+ if (ma->to(dstu, src, 4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+ if (instruction & 4)
+ *rn -= count;
+ src = (unsigned char*) rm;
+ dstu = (unsigned char __user *)*rn;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ if (ma->to(dstu, src, count))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 5: /* mov.l @(disp,Rm),Rn */
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+ if (ma->from(dst, srcu, 4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 6: /* mov.[bwl] from memory, possibly with post-increment */
+ srcu = (unsigned char __user *)*rm;
+ if (instruction & 4)
+ *rm += count;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 4-count;
+#endif
+ if (ma->from(dst, srcu, count))
+ goto fetch_fault;
+ sign_extend(count, dst);
+ ret = 0;
+ break;
+
+ case 8:
+ switch ((instruction&0xFF00)>>8) {
+ case 0x81: /* mov.w R0,@(disp,Rn) */
+ src = (unsigned char *) &regs->regs[0];
+#if !defined(__LITTLE_ENDIAN__)
+ src += 2;
+#endif
+ dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
+ dstu += (instruction & 0x000F) << 1;
+
+ if (ma->to(dstu, src, 2))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 0x85: /* mov.w @(disp,Rm),R0 */
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 1;
+ dst = (unsigned char *) &regs->regs[0];
+ *(unsigned long *)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+ if (ma->from(dst, srcu, 2))
+ goto fetch_fault;
+ sign_extend(2, dst);
+ ret = 0;
+ break;
+ }
+ break;
+
+ case 9: /* mov.w @(disp,PC),Rn */
+ srcu = (unsigned char __user *)regs->pc;
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 1;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (ma->from(dst, srcu, 2))
+ goto fetch_fault;
+ sign_extend(2, dst);
+ ret = 0;
+ break;
+
+ case 0xd: /* mov.l @(disp,PC),Rn */
+ srcu = (unsigned char __user *)(regs->pc & ~0x3);
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+ if (ma->from(dst, srcu, 4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+ }
+ return ret;
+
+ fetch_fault:
+ /* Argh. Address not only misaligned but also non-existent.
+ * Raise an EFAULT and see if it's trapped
+ */
+ die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+ return -EFAULT;
+}
+
+/*
+ * emulate the instruction in the delay slot
+ * - fetches the instruction from PC+2
+ */
+static inline int handle_delayslot(struct pt_regs *regs,
+ insn_size_t old_instruction,
+ struct mem_access *ma)
+{
+ insn_size_t instruction;
+ void __user *addr = (void __user *)(regs->pc +
+ instruction_size(old_instruction));
+
+ if (copy_from_user(&instruction, addr, sizeof(instruction))) {
+ /* the instruction-fetch faulted */
+ if (user_mode(regs))
+ return -EFAULT;
+
+ /* kernel */
+ die("delay-slot-insn faulting in handle_unaligned_delayslot",
+ regs, 0);
+ }
+
+ return handle_unaligned_ins(instruction, regs, ma);
+}
+
+/*
+ * handle an instruction that does an unaligned memory access
+ * - have to be careful of branch delay-slot instructions that fault
+ * SH3:
+ * - if the branch would be taken PC points to the branch
+ * - if the branch would not be taken, PC points to delay-slot
+ * SH4:
+ * - PC always points to delayed branch
+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
+ */
+
+/* Macros to determine offset from current PC for branch instructions */
+/* Explicit type coercion is used to force sign extension where needed */
+#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
+#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+
+int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
+ struct mem_access *ma, int expected,
+ unsigned long address)
+{
+ u_int rm;
+ int ret, index;
+
+ /*
+ * XXX: We can't handle mixed 16/32-bit instructions yet
+ */
+ if (instruction_size(instruction) != 2)
+ return -EINVAL;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rm = regs->regs[index];
+
+ /*
+ * Log the unexpected fixups, and then pass them on to perf.
+ *
+ * We intentionally don't report the expected cases to perf as
+ * otherwise the trapped I/O case will skew the results too much
+ * to be useful.
+ */
+ if (!expected) {
+ unaligned_fixups_notify(current, instruction, regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
+ regs, address);
+ }
+
+ ret = -EFAULT;
+ switch (instruction&0xF000) {
+ case 0x0000:
+ if (instruction==0x000B) {
+ /* rts */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0)
+ regs->pc = regs->pr;
+ }
+ else if ((instruction&0x00FF)==0x0023) {
+ /* braf @Rm */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0)
+ regs->pc += rm + 4;
+ }
+ else if ((instruction&0x00FF)==0x0003) {
+ /* bsrf @Rm */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += rm + 4;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ goto simple;
+
+ case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+ goto simple;
+
+ case 0x4000:
+ if ((instruction&0x00FF)==0x002B) {
+ /* jmp @Rm */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0)
+ regs->pc = rm;
+ }
+ else if ((instruction&0x00FF)==0x000B) {
+ /* jsr @Rm */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc = rm;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
+
+ case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+ goto simple;
+
+ case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+ switch (instruction&0x0F00) {
+ case 0x0100: /* mov.w R0,@(disp,Rm) */
+ goto simple;
+ case 0x0500: /* mov.w @(disp,Rm),R0 */
+ goto simple;
+ case 0x0B00: /* bf lab - no delayslot*/
+ ret = 0;
+ break;
+ case 0x0F00: /* bf/s lab */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+ if ((regs->sr & 0x00000001) != 0)
+ regs->pc += 4; /* next after slot */
+ else
+#endif
+ regs->pc += SH_PC_8BIT_OFFSET(instruction);
+ }
+ break;
+ case 0x0900: /* bt lab - no delayslot */
+ ret = 0;
+ break;
+ case 0x0D00: /* bt/s lab */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+ if ((regs->sr & 0x00000001) == 0)
+ regs->pc += 4; /* next after slot */
+ else
+#endif
+ regs->pc += SH_PC_8BIT_OFFSET(instruction);
+ }
+ break;
+ }
+ break;
+
+ case 0x9000: /* mov.w @(disp,Rm),Rn */
+ goto simple;
+
+ case 0xA000: /* bra label */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0)
+ regs->pc += SH_PC_12BIT_OFFSET(instruction);
+ break;
+
+ case 0xB000: /* bsr label */
+ ret = handle_delayslot(regs, instruction, ma);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += SH_PC_12BIT_OFFSET(instruction);
+ }
+ break;
+
+ case 0xD000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
+ }
+ return ret;
+
+ /* handle non-delay-slot instruction */
+ simple:
+ ret = handle_unaligned_ins(instruction, regs, ma);
+ if (ret==0)
+ regs->pc += instruction_size(instruction);
+ return ret;
+}
+
+/*
+ * Handle various address error exceptions:
+ * - instruction address error:
+ * misaligned PC
+ * PC >= 0x80000000 in user mode
+ * - data address error (read and write)
+ * misaligned data access
+ * access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read accesses.
+ */
+asmlinkage void do_address_error(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address)
+{
+ unsigned long error_code = 0;
+ mm_segment_t oldfs;
+ siginfo_t info;
+ insn_size_t instruction;
+ int tmp;
+
+ /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+ error_code = lookup_exception_vector();
+#endif
+
+ oldfs = get_fs();
+
+ if (user_mode(regs)) {
+ int si_code = BUS_ADRERR;
+ unsigned int user_action;
+
+ local_irq_enable();
+ inc_unaligned_user_access();
+
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
+ sizeof(instruction))) {
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+ set_fs(oldfs);
+
+ /* shout about userspace fixups */
+ unaligned_fixups_notify(current, instruction, regs);
+
+ user_action = unaligned_user_action();
+ if (user_action & UM_FIXUP)
+ goto fixup;
+ if (user_action & UM_SIGNAL)
+ goto uspace_segv;
+ else {
+ /* ignore */
+ regs->pc += instruction_size(instruction);
+ return;
+ }
+
+fixup:
+ /* bad PC is not something we can fix */
+ if (regs->pc & 1) {
+ si_code = BUS_ADRALN;
+ goto uspace_segv;
+ }
+
+ set_fs(USER_DS);
+ tmp = handle_unaligned_access(instruction, regs,
+ &user_mem_access, 0,
+ address);
+ set_fs(oldfs);
+
+ if (tmp == 0)
+ return; /* sorted */
+uspace_segv:
+ printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+ "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+ regs->pr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else {
+ inc_unaligned_kernel_access();
+
+ if (regs->pc & 1)
+ die("unaligned program counter", regs, error_code);
+
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (void __user *)(regs->pc),
+ sizeof(instruction))) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ die("insn faulting in do_address_error", regs, 0);
+ }
+
+ unaligned_fixups_notify(current, instruction, regs);
+
+ handle_unaligned_access(instruction, regs, &user_mem_access,
+ 0, address);
+ set_fs(oldfs);
+ }
+}
+
+#ifdef CONFIG_SH_DSP
+/*
+ * SH-DSP support gerg@snapgear.com.
+ */
+int is_dsp_inst(struct pt_regs *regs)
+{
+ unsigned short inst = 0;
+
+ /*
+ * Safe guard if DSP mode is already enabled or we're lacking
+ * the DSP altogether.
+ */
+ if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
+ return 0;
+
+ get_user(inst, ((unsigned short *) regs->pc));
+
+ inst &= 0xf000;
+
+ /* Check for any type of DSP or support instruction */
+ if ((inst == 0xf000) || (inst == 0x4000))
+ return 1;
+
+ return 0;
+}
+#else
+#define is_dsp_inst(regs) (0)
+#endif /* CONFIG_SH_DSP */
+
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ siginfo_t info;
+
+ switch (r4) {
+ case TRAP_DIVZERO_ERROR:
+ info.si_code = FPE_INTDIV;
+ break;
+ case TRAP_DIVOVF_ERROR:
+ info.si_code = FPE_INTOVF;
+ break;
+ }
+
+ force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ unsigned long error_code;
+ struct task_struct *tsk = current;
+
+#ifdef CONFIG_SH_FPU_EMU
+ unsigned short inst = 0;
+ int err;
+
+ get_user(inst, (unsigned short*)regs->pc);
+
+ err = do_fpu_inst(inst, regs);
+ if (!err) {
+ regs->pc += instruction_size(inst);
+ return;
+ }
+ /* not a FPU inst. */
+#endif
+
+#ifdef CONFIG_SH_DSP
+ /* Check if it's a DSP instruction */
+ if (is_dsp_inst(regs)) {
+ /* Enable DSP mode, and restart instruction. */
+ regs->sr |= SR_DSP;
+ /* Save DSP mode */
+ tsk->thread.dsp_status.status |= SR_DSP;
+ return;
+ }
+#endif
+
+ error_code = lookup_exception_vector();
+
+ local_irq_enable();
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("reserved instruction", regs, error_code);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+static int emulate_branch(unsigned short inst, struct pt_regs *regs)
+{
+ /*
+ * bfs: 8fxx: PC+=d*2+4;
+ * bts: 8dxx: PC+=d*2+4;
+ * bra: axxx: PC+=D*2+4;
+ * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
+ * braf:0x23: PC+=Rn*2+4;
+ * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
+ * jmp: 4x2b: PC=Rn;
+ * jsr: 4x0b: PC=Rn after PR=PC+4;
+ * rts: 000b: PC=PR;
+ */
+ if (((inst & 0xf000) == 0xb000) || /* bsr */
+ ((inst & 0xf0ff) == 0x0003) || /* bsrf */
+ ((inst & 0xf0ff) == 0x400b)) /* jsr */
+ regs->pr = regs->pc + 4;
+
+ if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
+ regs->pc += SH_PC_8BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
+ regs->pc += SH_PC_12BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
+ regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
+ regs->pc = regs->regs[(inst & 0x0f00) >> 8];
+ return 0;
+ }
+
+ if ((inst & 0xffff) == 0x000b) { /* rts */
+ regs->pc = regs->pr;
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ unsigned long inst;
+ struct task_struct *tsk = current;
+
+ if (kprobe_handle_illslot(regs->pc) == 0)
+ return;
+
+#ifdef CONFIG_SH_FPU_EMU
+ get_user(inst, (unsigned short *)regs->pc + 1);
+ if (!do_fpu_inst(inst, regs)) {
+ get_user(inst, (unsigned short *)regs->pc);
+ if (!emulate_branch(inst, regs))
+ return;
+ /* fault in branch.*/
+ }
+ /* not a FPU inst. */
+#endif
+
+ inst = lookup_exception_vector();
+
+ local_irq_enable();
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("illegal slot instruction", regs, inst);
+}
+
+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ long ex;
+
+ ex = lookup_exception_vector();
+ die_if_kernel("exception", regs, ex);
+}
+
+void __cpuinit per_cpu_trap_init(void)
+{
+ extern void *vbr_base;
+
+ /* NOTE: The VBR value should be at P1
+ (or P2, virtural "fixed" address space).
+ It's definitely should not in physical address. */
+
+ asm volatile("ldc %0, vbr"
+ : /* no output */
+ : "r" (&vbr_base)
+ : "memory");
+
+ /* disable exception blocking now when the vbr has been setup */
+ clear_bl_bit();
+}
+
+void *set_exception_table_vec(unsigned int vec, void *handler)
+{
+ extern void *exception_handling_table[];
+ void *old_handler;
+
+ old_handler = exception_handling_table[vec];
+ exception_handling_table[vec] = handler;
+ return old_handler;
+}
+
+void __init trap_init(void)
+{
+ set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
+ set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
+
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
+ defined(CONFIG_SH_FPU_EMU)
+ /*
+ * For SH-4 lacking an FPU, treat floating point instructions as
+ * reserved. They'll be handled in the math-emu case, or faulted on
+ * otherwise.
+ */
+ set_exception_table_evt(0x800, do_reserved_inst);
+ set_exception_table_evt(0x820, do_illegal_slot_inst);
+#elif defined(CONFIG_SH_FPU)
+ set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
+ set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
+#endif
+
+#ifdef CONFIG_CPU_SH2
+ set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+ set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+ set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#ifdef CONFIG_SH_FPU
+ set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
+#endif
+#endif
+
+#ifdef TRAP_UBC
+ set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
+#endif
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long stack;
+
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)tsk->thread.sp;
+
+ stack = (unsigned long)sp;
+ dump_mem("Stack: ", stack, THREAD_SIZE +
+ (unsigned long)task_stack_page(tsk));
+ show_trace(tsk, sp, NULL);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
new file mode 100644
index 00000000..6c048609
--- /dev/null
+++ b/arch/sh/kernel/traps_64.c
@@ -0,0 +1,959 @@
+/*
+ * arch/sh/kernel/traps_64.c
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003, 2004 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/atomic.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/fpu.h>
+
+#undef DEBUG_EXCEPTION
+#ifdef DEBUG_EXCEPTION
+/* implemented in ../lib/dbg.c */
+extern void show_excp_regs(char *fname, int trapnr, int signr,
+ struct pt_regs *regs);
+#else
+#define show_excp_regs(a, b, c, d)
+#endif
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+ unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
+
+#define DO_ERROR(trapnr, signr, str, name, tsk) \
+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+{ \
+ do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ printk("%s: %lx\n", str, (err & 0xffffff));
+ show_regs(regs);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+ die(str, regs, err);
+ }
+}
+
+DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
+DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
+
+
+/* Implement misaligned load/store handling for kernel (and optionally for user
+ mode too). Limitation : only SHmedia mode code is handled - there is no
+ handling at all for misaligned accesses occurring in SHcompact code yet. */
+
+static int misaligned_fixup(struct pt_regs *regs);
+
+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0) {
+ do_unhandled_exception(7, SIGSEGV, "address error(load)",
+ "do_address_error_load",
+ error_code, regs, current);
+ }
+ return;
+}
+
+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0) {
+ do_unhandled_exception(8, SIGSEGV, "address error(store)",
+ "do_address_error_store",
+ error_code, regs, current);
+ }
+ return;
+}
+
+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
+
+#define OPCODE_INVALID 0
+#define OPCODE_USER_VALID 1
+#define OPCODE_PRIV_VALID 2
+
+/* getcon/putcon - requires checking which control register is referenced. */
+#define OPCODE_CTRL_REG 3
+
+/* Table of valid opcodes for SHmedia mode.
+ Form a 10-bit value by concatenating the major/minor opcodes i.e.
+ opcode[31:26,20:16]. The 6 MSBs of this value index into the following
+ array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+ LSBs==4'b0000 etc). */
+static unsigned long shmedia_opcode_table[64] = {
+ 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+ 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+ 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
+};
+
+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
+{
+ /* Workaround SH5-101 cut2 silicon defect #2815 :
+ in some situations, inter-mode branches from SHcompact -> SHmedia
+ which should take ITLBMISS or EXECPROT exceptions at the target
+ falsely take RESINST at the target instead. */
+
+ unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+ unsigned long pc, aligned_pc;
+ int get_user_error;
+ int trapnr = 12;
+ int signr = SIGILL;
+ char *exception_name = "reserved_instruction";
+
+ pc = regs->pc;
+ if ((pc & 3) == 1) {
+ /* SHmedia : check for defect. This requires executable vmas
+ to be readable too. */
+ aligned_pc = pc & ~3;
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ get_user_error = -EFAULT;
+ } else {
+ get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ }
+ if (get_user_error >= 0) {
+ unsigned long index, shift;
+ unsigned long major, minor, combined;
+ unsigned long reserved_field;
+ reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+ combined = (major << 4) | minor;
+ index = major;
+ shift = minor << 1;
+ if (reserved_field == 0) {
+ int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+ switch (opcode_state) {
+ case OPCODE_INVALID:
+ /* Trap. */
+ break;
+ case OPCODE_USER_VALID:
+ /* Restart the instruction : the branch to the instruction will now be from an RTE
+ not from SHcompact so the silicon defect won't be triggered. */
+ return;
+ case OPCODE_PRIV_VALID:
+ if (!user_mode(regs)) {
+ /* Should only ever get here if a module has
+ SHcompact code inside it. If so, the same fix up is needed. */
+ return; /* same reason */
+ }
+ /* Otherwise, user mode trying to execute a privileged instruction -
+ fall through to trap. */
+ break;
+ case OPCODE_CTRL_REG:
+ /* If in privileged mode, return as above. */
+ if (!user_mode(regs)) return;
+ /* In user mode ... */
+ if (combined == 0x9f) { /* GETCON */
+ unsigned long regno = (opcode >> 20) & 0x3f;
+ if (regno >= 62) {
+ return;
+ }
+ /* Otherwise, reserved or privileged control register, => trap */
+ } else if (combined == 0x1bf) { /* PUTCON */
+ unsigned long regno = (opcode >> 4) & 0x3f;
+ if (regno >= 62) {
+ return;
+ }
+ /* Otherwise, reserved or privileged control register, => trap */
+ } else {
+ /* Trap */
+ }
+ break;
+ default:
+ /* Fall through to trap. */
+ break;
+ }
+ }
+ /* fall through to normal resinst processing */
+ } else {
+ /* Error trying to read opcode. This typically means a
+ real fault, not a RESINST any more. So change the
+ codes. */
+ trapnr = 87;
+ exception_name = "address error (exec)";
+ signr = SIGSEGV;
+ }
+ }
+
+ do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
+}
+
+#else /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* If the workaround isn't needed, this is just a straightforward reserved
+ instruction */
+DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
+
+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* Called with interrupts disabled */
+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+{
+ show_excp_regs(__func__, -1, -1, regs);
+ die_if_kernel("exception", regs, ex);
+}
+
+int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+{
+ /* Syscall debug */
+ printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
+
+ die_if_kernel("unknown trapa", regs, scId);
+
+ return -ENOSYS;
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+#ifdef CONFIG_KALLSYMS
+ extern void sh64_unwind(struct pt_regs *regs);
+ struct pt_regs *regs;
+
+ regs = tsk ? tsk->thread.kregs : NULL;
+
+ sh64_unwind(regs);
+#else
+ printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
+#endif
+}
+
+void show_task(unsigned long *sp)
+{
+ show_stack(NULL, sp);
+}
+
+void dump_stack(void)
+{
+ show_task(NULL);
+}
+/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
+EXPORT_SYMBOL(dump_stack);
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+ unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
+{
+ show_excp_regs(fn_name, trapnr, signr, regs);
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+
+ if (user_mode(regs))
+ force_sig(signr, tsk);
+
+ die_if_no_fixup(str, regs, error_code);
+}
+
+static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
+{
+ int get_user_error;
+ unsigned long aligned_pc;
+ unsigned long opcode;
+
+ if ((pc & 3) == 1) {
+ /* SHmedia */
+ aligned_pc = pc & ~3;
+ if (from_user_mode) {
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ get_user_error = -EFAULT;
+ } else {
+ get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ *result_opcode = opcode;
+ }
+ return get_user_error;
+ } else {
+ /* If the fault was in the kernel, we can either read
+ * this directly, or if not, we fault.
+ */
+ *result_opcode = *(unsigned long *) aligned_pc;
+ return 0;
+ }
+ } else if ((pc & 1) == 0) {
+ /* SHcompact */
+ /* TODO : provide handling for this. We don't really support
+ user-mode SHcompact yet, and for a kernel fault, this would
+ have to come from a module built for SHcompact. */
+ return -EFAULT;
+ } else {
+ /* misaligned */
+ return -EFAULT;
+ }
+}
+
+static int address_is_sign_extended(__u64 a)
+{
+ __u64 b;
+#if (NEFF == 32)
+ b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
+ return (b == a) ? 1 : 0;
+#else
+#error "Sign extend check only works for NEFF==32"
+#endif
+}
+
+static int generate_and_check_address(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ __u64 *address)
+{
+ /* return -1 for fault, 0 for OK */
+
+ __u64 base_address, addr;
+ int basereg;
+
+ basereg = (opcode >> 20) & 0x3f;
+ base_address = regs->regs[basereg];
+ if (displacement_not_indexed) {
+ __s64 displacement;
+ displacement = (opcode >> 10) & 0x3ff;
+ displacement = ((displacement << 54) >> 54); /* sign extend */
+ addr = (__u64)((__s64)base_address + (displacement << width_shift));
+ } else {
+ __u64 offset;
+ int offsetreg;
+ offsetreg = (opcode >> 10) & 0x3f;
+ offset = regs->regs[offsetreg];
+ addr = base_address + offset;
+ }
+
+ /* Check sign extended */
+ if (!address_is_sign_extended(addr)) {
+ return -1;
+ }
+
+ /* Check accessible. For misaligned access in the kernel, assume the
+ address is always accessible (and if not, just fault when the
+ load/store gets done.) */
+ if (user_mode(regs)) {
+ if (addr >= TASK_SIZE) {
+ return -1;
+ }
+ /* Do access_ok check later - it depends on whether it's a load or a store. */
+ }
+
+ *address = addr;
+ return 0;
+}
+
+static int user_mode_unaligned_fixup_count = 10;
+static int user_mode_unaligned_fixup_enable = 1;
+static int kernel_mode_unaligned_fixup_count = 32;
+
+static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
+{
+ unsigned short x;
+ unsigned char *p, *q;
+ p = (unsigned char *) (int) address;
+ q = (unsigned char *) &x;
+ q[0] = p[0];
+ q[1] = p[1];
+
+ if (do_sign_extend) {
+ *result = (__u64)(__s64) *(short *) &x;
+ } else {
+ *result = (__u64) x;
+ }
+}
+
+static void misaligned_kernel_word_store(__u64 address, __u64 value)
+{
+ unsigned short x;
+ unsigned char *p, *q;
+ p = (unsigned char *) (int) address;
+ q = (unsigned char *) &x;
+
+ x = (__u16) value;
+ p[0] = q[0];
+ p[1] = q[1];
+}
+
+static int misaligned_load(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_sign_extend)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int destreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
+
+ destreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+
+ if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ switch (width_shift) {
+ case 1:
+ if (do_sign_extend) {
+ regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
+ } else {
+ regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
+ }
+ break;
+ case 2:
+ regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
+ break;
+ case 3:
+ regs->regs[destreg] = buffer;
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ } else {
+ /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+ __u64 lo, hi;
+
+ switch (width_shift) {
+ case 1:
+ misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
+ break;
+ case 2:
+ asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
+ asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
+ regs->regs[destreg] = lo | hi;
+ break;
+ case 3:
+ asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
+ asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
+ regs->regs[destreg] = lo | hi;
+ break;
+
+ default:
+ printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static int misaligned_store(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int srcreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
+
+ srcreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+
+ if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ switch (width_shift) {
+ case 1:
+ *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
+ break;
+ case 2:
+ *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
+ break;
+ case 3:
+ buffer = regs->regs[srcreg];
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+
+ if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ } else {
+ /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+ __u64 val = regs->regs[srcreg];
+
+ switch (width_shift) {
+ case 1:
+ misaligned_kernel_word_store(address, val);
+ break;
+ case 2:
+ asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
+ asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
+ break;
+ case 3:
+ asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
+ asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
+ break;
+
+ default:
+ printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
+ error. */
+static int misaligned_fpu_load(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_paired_load)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int destreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
+
+ destreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+ __u32 buflo, bufhi;
+
+ if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ /* 'current' may be the current owner of the FPU state, so
+ context switch the registers into memory so they can be
+ indexed by register number. */
+ if (last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ buflo = *(__u32*) &buffer;
+ bufhi = *(1 + (__u32*) &buffer);
+
+ switch (width_shift) {
+ case 2:
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ break;
+ case 3:
+ if (do_paired_load) {
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
+ } else {
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
+#else
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
+#endif
+ }
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+ return 0;
+ } else {
+ die ("Misaligned FPU load inside kernel", regs, 0);
+ return -1;
+ }
+
+
+}
+
+static int misaligned_fpu_store(struct pt_regs *regs,
+ __u32 opcode,
+ int displacement_not_indexed,
+ int width_shift,
+ int do_paired_load)
+{
+ /* Return -1 for a fault, 0 for OK */
+ int error;
+ int srcreg;
+ __u64 address;
+
+ error = generate_and_check_address(regs, opcode,
+ displacement_not_indexed, width_shift, &address);
+ if (error < 0) {
+ return error;
+ }
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
+
+ srcreg = (opcode >> 4) & 0x3f;
+ if (user_mode(regs)) {
+ __u64 buffer;
+ /* Initialise these to NaNs. */
+ __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
+
+ if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+ return -1;
+ }
+
+ /* 'current' may be the current owner of the FPU state, so
+ context switch the registers into memory so they can be
+ indexed by register number. */
+ if (last_task_used_math == current) {
+ enable_fpu();
+ save_fpu(current);
+ disable_fpu();
+ last_task_used_math = NULL;
+ regs->sr |= SR_FD;
+ }
+
+ switch (width_shift) {
+ case 2:
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ break;
+ case 3:
+ if (do_paired_load) {
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
+ } else {
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
+#else
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
+#endif
+ }
+ break;
+ default:
+ printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
+ width_shift, (unsigned long) regs->pc);
+ break;
+ }
+
+ *(__u32*) &buffer = buflo;
+ *(1 + (__u32*) &buffer) = bufhi;
+ if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+ return -1; /* fault */
+ }
+ return 0;
+ } else {
+ die ("Misaligned FPU load inside kernel", regs, 0);
+ return -1;
+ }
+}
+
+static int misaligned_fixup(struct pt_regs *regs)
+{
+ unsigned long opcode;
+ int error;
+ int major, minor;
+
+ if (!user_mode_unaligned_fixup_enable)
+ return -1;
+
+ error = read_opcode(regs->pc, &opcode, user_mode(regs));
+ if (error < 0) {
+ return error;
+ }
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+
+ if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
+ --user_mode_unaligned_fixup_count;
+ /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
+ printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+ current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+ } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
+ --kernel_mode_unaligned_fixup_count;
+ if (in_interrupt()) {
+ printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
+ (__u32)regs->pc, opcode);
+ } else {
+ printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+ current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+ }
+ }
+
+
+ switch (major) {
+ case (0x84>>2): /* LD.W */
+ error = misaligned_load(regs, opcode, 1, 1, 1);
+ break;
+ case (0xb0>>2): /* LD.UW */
+ error = misaligned_load(regs, opcode, 1, 1, 0);
+ break;
+ case (0x88>>2): /* LD.L */
+ error = misaligned_load(regs, opcode, 1, 2, 1);
+ break;
+ case (0x8c>>2): /* LD.Q */
+ error = misaligned_load(regs, opcode, 1, 3, 0);
+ break;
+
+ case (0xa4>>2): /* ST.W */
+ error = misaligned_store(regs, opcode, 1, 1);
+ break;
+ case (0xa8>>2): /* ST.L */
+ error = misaligned_store(regs, opcode, 1, 2);
+ break;
+ case (0xac>>2): /* ST.Q */
+ error = misaligned_store(regs, opcode, 1, 3);
+ break;
+
+ case (0x40>>2): /* indexed loads */
+ switch (minor) {
+ case 0x1: /* LDX.W */
+ error = misaligned_load(regs, opcode, 0, 1, 1);
+ break;
+ case 0x5: /* LDX.UW */
+ error = misaligned_load(regs, opcode, 0, 1, 0);
+ break;
+ case 0x2: /* LDX.L */
+ error = misaligned_load(regs, opcode, 0, 2, 1);
+ break;
+ case 0x3: /* LDX.Q */
+ error = misaligned_load(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+
+ case (0x60>>2): /* indexed stores */
+ switch (minor) {
+ case 0x1: /* STX.W */
+ error = misaligned_store(regs, opcode, 0, 1);
+ break;
+ case 0x2: /* STX.L */
+ error = misaligned_store(regs, opcode, 0, 2);
+ break;
+ case 0x3: /* STX.Q */
+ error = misaligned_store(regs, opcode, 0, 3);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+
+ case (0x94>>2): /* FLD.S */
+ error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
+ break;
+ case (0x98>>2): /* FLD.P */
+ error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
+ break;
+ case (0x9c>>2): /* FLD.D */
+ error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
+ break;
+ case (0x1c>>2): /* floating indexed loads */
+ switch (minor) {
+ case 0x8: /* FLDX.S */
+ error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
+ break;
+ case 0xd: /* FLDX.P */
+ error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
+ break;
+ case 0x9: /* FLDX.D */
+ error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+ case (0xb4>>2): /* FLD.S */
+ error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
+ break;
+ case (0xb8>>2): /* FLD.P */
+ error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
+ break;
+ case (0xbc>>2): /* FLD.D */
+ error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
+ break;
+ case (0x3c>>2): /* floating indexed stores */
+ switch (minor) {
+ case 0x8: /* FSTX.S */
+ error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
+ break;
+ case 0xd: /* FSTX.P */
+ error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
+ break;
+ case 0x9: /* FSTX.D */
+ error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ break;
+
+ default:
+ /* Fault */
+ error = -1;
+ break;
+ }
+
+ if (error < 0) {
+ return error;
+ } else {
+ regs->pc += 4; /* Skip the instruction that's just been emulated */
+ return 0;
+ }
+
+}
+
+static ctl_table unaligned_table[] = {
+ {
+ .procname = "kernel_reports",
+ .data = &kernel_mode_unaligned_fixup_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "user_reports",
+ .data = &user_mode_unaligned_fixup_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "user_enable",
+ .data = &user_mode_unaligned_fixup_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec},
+ {}
+};
+
+static ctl_table unaligned_root[] = {
+ {
+ .procname = "unaligned_fixup",
+ .mode = 0555,
+ .child = unaligned_table
+ },
+ {}
+};
+
+static ctl_table sh64_root[] = {
+ {
+ .procname = "sh64",
+ .mode = 0555,
+ .child = unaligned_root
+ },
+ {}
+};
+static struct ctl_table_header *sysctl_header;
+static int __init init_sysctl(void)
+{
+ sysctl_header = register_sysctl_table(sh64_root);
+ return 0;
+}
+
+__initcall(init_sysctl);
+
+
+asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
+{
+ u64 peek_real_address_q(u64 addr);
+ u64 poke_real_address_q(u64 addr, u64 val);
+ unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
+ unsigned long long exp_cause;
+ /* It's not worth ioremapping the debug module registers for the amount
+ of access we make to them - just go direct to their physical
+ addresses. */
+ exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
+ if (exp_cause & ~4) {
+ printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
+ (unsigned long)(exp_cause & 0xffffffff));
+ }
+ show_state();
+ /* Clear all DEBUGINT causes */
+ poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
+}
+
+void __cpuinit per_cpu_trap_init(void)
+{
+ /* Nothing to do for now, VBR initialization later. */
+}
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 00000000..521b5432
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2009 Matt Fleming
+ *
+ * Based, in part, on kernel/time/clocksource.c.
+ *
+ * This file provides arbitration code for stack unwinders.
+ *
+ * Multiple stack unwinders can be available on a system, usually with
+ * the most accurate unwinder being the currently active one.
+ */
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
+#include <linux/atomic.h>
+
+/*
+ * This is the most basic stack unwinder an architecture can
+ * provide. For architectures without reliable frame pointers, e.g.
+ * RISC CPUs, it can be implemented by looking through the stack for
+ * addresses that lie within the kernel text section.
+ *
+ * Other CPUs, e.g. x86, can use their frame pointer register to
+ * construct more accurate stack traces.
+ */
+static struct list_head unwinder_list;
+static struct unwinder stack_reader = {
+ .name = "stack-reader",
+ .dump = stack_reader_dump,
+ .rating = 50,
+ .list = {
+ .next = &unwinder_list,
+ .prev = &unwinder_list,
+ },
+};
+
+/*
+ * "curr_unwinder" points to the stack unwinder currently in use. This
+ * is the unwinder with the highest rating.
+ *
+ * "unwinder_list" is a linked-list of all available unwinders, sorted
+ * by rating.
+ *
+ * All modifications of "curr_unwinder" and "unwinder_list" must be
+ * performed whilst holding "unwinder_lock".
+ */
+static struct unwinder *curr_unwinder = &stack_reader;
+
+static struct list_head unwinder_list = {
+ .next = &stack_reader.list,
+ .prev = &stack_reader.list,
+};
+
+static DEFINE_SPINLOCK(unwinder_lock);
+
+/**
+ * select_unwinder - Select the best registered stack unwinder.
+ *
+ * Private function. Must hold unwinder_lock when called.
+ *
+ * Select the stack unwinder with the best rating. This is useful for
+ * setting up curr_unwinder.
+ */
+static struct unwinder *select_unwinder(void)
+{
+ struct unwinder *best;
+
+ if (list_empty(&unwinder_list))
+ return NULL;
+
+ best = list_entry(unwinder_list.next, struct unwinder, list);
+ if (best == curr_unwinder)
+ return NULL;
+
+ return best;
+}
+
+/*
+ * Enqueue the stack unwinder sorted by rating.
+ */
+static int unwinder_enqueue(struct unwinder *ops)
+{
+ struct list_head *tmp, *entry = &unwinder_list;
+
+ list_for_each(tmp, &unwinder_list) {
+ struct unwinder *o;
+
+ o = list_entry(tmp, struct unwinder, list);
+ if (o == ops)
+ return -EBUSY;
+ /* Keep track of the place, where to insert */
+ if (o->rating >= ops->rating)
+ entry = tmp;
+ }
+ list_add(&ops->list, entry);
+
+ return 0;
+}
+
+/**
+ * unwinder_register - Used to install new stack unwinder
+ * @u: unwinder to be registered
+ *
+ * Install the new stack unwinder on the unwinder list, which is sorted
+ * by rating.
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int unwinder_register(struct unwinder *u)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&unwinder_lock, flags);
+ ret = unwinder_enqueue(u);
+ if (!ret)
+ curr_unwinder = select_unwinder();
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+
+ return ret;
+}
+
+int unwinder_faulted = 0;
+
+/*
+ * Unwind the call stack and pass information to the stacktrace_ops
+ * functions. Also handle the case where we need to switch to a new
+ * stack dumper because the current one faulted unexpectedly.
+ */
+void unwind_stack(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ unsigned long flags;
+
+ /*
+ * The problem with unwinders with high ratings is that they are
+ * inherently more complicated than the simple ones with lower
+ * ratings. We are therefore more likely to fault in the
+ * complicated ones, e.g. hitting BUG()s. If we fault in the
+ * code for the current stack unwinder we try to downgrade to
+ * one with a lower rating.
+ *
+ * Hopefully this will give us a semi-reliable stacktrace so we
+ * can diagnose why curr_unwinder->dump() faulted.
+ */
+ if (unwinder_faulted) {
+ spin_lock_irqsave(&unwinder_lock, flags);
+
+ /* Make sure no one beat us to changing the unwinder */
+ if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
+ list_del(&curr_unwinder->list);
+ curr_unwinder = select_unwinder();
+
+ unwinder_faulted = 0;
+ }
+
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+ }
+
+ curr_unwinder->dump(task, regs, sp, ops, data);
+}
+EXPORT_SYMBOL_GPL(unwind_stack);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
new file mode 100644
index 00000000..c98905f7
--- /dev/null
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -0,0 +1,88 @@
+/*
+ * ld script to make SuperH Linux kernel
+ * Written by Niibe Yutaka and Paul Mundt
+ */
+#ifdef CONFIG_SUPERH64
+#define LOAD_OFFSET PAGE_OFFSET
+OUTPUT_ARCH(sh:sh5)
+#else
+#define LOAD_OFFSET 0
+OUTPUT_ARCH(sh)
+#endif
+
+#include <asm/thread_info.h>
+#include <asm/cache.h>
+#include <asm/vmlinux.lds.h>
+
+#ifdef CONFIG_PMB
+ #define MEMORY_OFFSET 0
+#else
+ #define MEMORY_OFFSET __MEMORY_START
+#endif
+
+ENTRY(_start)
+SECTIONS
+{
+ . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+
+ _text = .; /* Text and read-only data */
+
+ .empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
+ *(.empty_zero_page)
+ } = 0
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ HEAD_TEXT
+ TEXT_TEXT
+ EXTRA_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ _etext = .; /* End of text section */
+ } = 0x0009
+
+ EXCEPTION_TABLE(16)
+ NOTES
+
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ DWARF_EH_FRAME
+
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+
+ . = ALIGN(4);
+ .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
+ __machvec_start = .;
+ *(.machvec.init)
+ __machvec_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discarded at runtime, not link time, to deal with
+ * references from __bug_table
+ */
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ BSS_SECTION(0, PAGE_SIZE, 4)
+ _ebss = .; /* uClinux MTD sucks */
+ _end = . ;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
new file mode 100644
index 00000000..8f0ea5fc
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -0,0 +1,36 @@
+obj-y += vsyscall.o vsyscall-syscall.o
+
+$(obj)/vsyscall-syscall.o: \
+ $(foreach F,trapa,$(obj)/vsyscall-$F.so)
+
+# Teach kbuild about targets
+targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
+targets += vsyscall-note.o vsyscall.lds
+
+# The DSO images are built using a special linker script
+quiet_cmd_syscall = SYSCALL $@
+ cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+export CPPFLAGS_vsyscall.lds += -P -C -Ush
+
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+
+SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
+
+$(obj)/vsyscall-trapa.so: \
+$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vsyscall-syms.o
+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+SYSCFLAGS_vsyscall-syms.o = -r
+$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+ $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
+ $(call if_changed,syscall)
diff --git a/arch/sh/kernel/vsyscall/vsyscall-note.S b/arch/sh/kernel/vsyscall/vsyscall-note.S
new file mode 100644
index 00000000..d4b5be4f
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-note.S
@@ -0,0 +1,25 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+
+#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
+ .section name, flags; \
+ .balign 4; \
+ .long 1f - 0f; /* name length */ \
+ .long 3f - 2f; /* data length */ \
+ .long type; /* note type */ \
+0: .asciz vendor; /* vendor name */ \
+1: .balign 4; \
+2:
+
+#define ASM_ELF_NOTE_END \
+3: .balign 4; /* pad out section */ \
+ .previous
+
+ ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
+ .long LINUX_VERSION_CODE
+ ASM_ELF_NOTE_END
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
new file mode 100644
index 00000000..23af1758
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -0,0 +1,74 @@
+#include <asm/unistd.h>
+
+ .text
+ .balign 32
+ .globl __kernel_sigreturn
+ .type __kernel_sigreturn,@function
+__kernel_sigreturn:
+.LSTART_sigreturn:
+ mov.w 1f, r3
+ trapa #0x10
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+
+1: .short __NR_sigreturn
+.LEND_sigreturn:
+ .size __kernel_sigreturn,.-.LSTART_sigreturn
+
+ .balign 32
+ .globl __kernel_rt_sigreturn
+ .type __kernel_rt_sigreturn,@function
+__kernel_rt_sigreturn:
+.LSTART_rt_sigreturn:
+ mov.w 1f, r3
+ trapa #0x10
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+
+1: .short __NR_rt_sigreturn
+.LEND_rt_sigreturn:
+ .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+ .previous
+
+ .section .eh_frame,"a",@progbits
+.LCIE1:
+ .ualong .LCIE1_end - .LCIE1_start
+.LCIE1_start:
+ .ualong 0 /* CIE ID */
+ .byte 0x1 /* Version number */
+ .string "zRS" /* NUL-terminated augmentation string */
+ .uleb128 0x1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 0x11 /* Return address register column */
+ .uleb128 0x1 /* Augmentation length and data */
+ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+ .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
+
+ .align 2
+.LCIE1_end:
+
+ .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
+.LFDE0_start:
+ .ualong .LFDE0_start-.LCIE1 /* CIE pointer */
+ .ualong .LSTART_sigreturn-. /* PC-relative start address */
+ .ualong .LEND_sigreturn-.LSTART_sigreturn
+ .uleb128 0 /* Augmentation */
+ .align 2
+.LFDE0_end:
+
+ .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
+.LFDE1_start:
+ .ualong .LFDE1_start-.LCIE1 /* CIE pointer */
+ .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
+ .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
+ .uleb128 0 /* Augmentation */
+ .align 2
+.LFDE1_end:
+
+ .previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-syscall.S b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
new file mode 100644
index 00000000..c2ac7f02
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
@@ -0,0 +1,10 @@
+#include <linux/init.h>
+
+__INITDATA
+
+ .globl vsyscall_trapa_start, vsyscall_trapa_end
+vsyscall_trapa_start:
+ .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
+vsyscall_trapa_end:
+
+__FINIT
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
new file mode 100644
index 00000000..0eb74d00
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -0,0 +1,39 @@
+ .text
+ .globl __kernel_vsyscall
+ .type __kernel_vsyscall,@function
+__kernel_vsyscall:
+.LSTART_vsyscall:
+ trapa #0x10
+ nop
+.LEND_vsyscall:
+ .size __kernel_vsyscall,.-.LSTART_vsyscall
+ .previous
+
+ .section .eh_frame,"a",@progbits
+.LCIE:
+ .ualong .LCIE_end - .LCIE_start
+.LCIE_start:
+ .ualong 0 /* CIE ID */
+ .byte 0x1 /* Version number */
+ .string "zR" /* NUL-terminated augmentation string */
+ .uleb128 0x1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 0x11 /* Return address register column */
+ .uleb128 0x1 /* Augmentation length and data */
+ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+ .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
+ .align 2
+.LCIE_end:
+
+ .ualong .LFDE_end-.LFDE_start /* Length FDE */
+.LFDE_start:
+ .ualong .LFDE_start-.LCIE /* CIE pointer */
+ .ualong .LSTART_vsyscall-. /* PC-relative start address */
+ .ualong .LEND_vsyscall-.LSTART_vsyscall
+ .uleb128 0 /* Augmentation */
+ .align 2
+.LFDE_end:
+ .previous
+
+/* Get the common code for the sigreturn entry points */
+#include "vsyscall-sigreturn.S"
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
new file mode 100644
index 00000000..5ca57972
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -0,0 +1,109 @@
+/*
+ * arch/sh/kernel/vsyscall/vsyscall.c
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * vDSO randomization
+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso_enabled = 1;
+EXPORT_SYMBOL_GPL(vdso_enabled);
+
+static int __init vdso_setup(char *s)
+{
+ vdso_enabled = simple_strtoul(s, NULL, 0);
+ return 1;
+}
+__setup("vdso=", vdso_setup);
+
+/*
+ * These symbols are defined by vsyscall.o to mark the bounds
+ * of the ELF DSO images included therein.
+ */
+extern const char vsyscall_trapa_start, vsyscall_trapa_end;
+static struct page *syscall_pages[1];
+
+int __init vsyscall_init(void)
+{
+ void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ syscall_pages[0] = virt_to_page(syscall_page);
+
+ /*
+ * XXX: Map this page to a fixmap entry if we get around
+ * to adding the page to ELF core dumps
+ */
+
+ memcpy(syscall_page,
+ &vsyscall_trapa_start,
+ &vsyscall_trapa_end - &vsyscall_trapa_start);
+
+ return 0;
+}
+
+/* Setup a VMA at program startup for the vsyscall page */
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ syscall_pages);
+ if (unlikely(ret))
+ goto up_fail;
+
+ current->mm->context.vdso = (void *)addr;
+
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+
+ return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+ return NULL;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long address)
+{
+ return 0;
+}
+
+int in_gate_area_no_mm(unsigned long address)
+{
+ return 0;
+}
diff --git a/arch/sh/kernel/vsyscall/vsyscall.lds.S b/arch/sh/kernel/vsyscall/vsyscall.lds.S
new file mode 100644
index 00000000..6d59ee7c
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.lds.S
@@ -0,0 +1,84 @@
+/*
+ * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
+ * object prelinked to its virtual address, and with only one read-only
+ * segment (that fits in one page). This script controls its layout.
+ */
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
+#else
+OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
+#endif
+OUTPUT_ARCH(sh)
+
+/* The ELF entry point can be used to set the AT_SYSINFO value. */
+ENTRY(__kernel_vsyscall);
+
+SECTIONS
+{
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ /*
+ * This linker script is used both with -r and with -shared.
+ * For the layouts to match, we need to skip more than enough
+ * space for the dynamic symbol table et al. If this amount
+ * is insufficient, ld -shared will barf. Just increase it here.
+ */
+ . = 0x400;
+
+ .text : { *(.text) } :text =0x90909090
+ .note : { *(.note.*) } :text :note
+ .eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr
+ .eh_frame : {
+ KEEP (*(.eh_frame))
+ LONG (0)
+ } :text
+ .dynamic : { *(.dynamic) } :text :dynamic
+ .useless : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ } :text
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __kernel_vsyscall;
+ __kernel_sigreturn;
+ __kernel_rt_sigreturn;
+
+ local: *;
+ };
+}