summaryrefslogtreecommitdiff
path: root/arch/mips/alchemy/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/alchemy/common')
-rw-r--r--arch/mips/alchemy/common/Makefile14
-rw-r--r--arch/mips/alchemy/common/clocks.c105
-rw-r--r--arch/mips/alchemy/common/dbdma.c1089
-rw-r--r--arch/mips/alchemy/common/dma.c264
-rw-r--r--arch/mips/alchemy/common/gpiolib.c175
-rw-r--r--arch/mips/alchemy/common/irq.c997
-rw-r--r--arch/mips/alchemy/common/platform.c391
-rw-r--r--arch/mips/alchemy/common/power.c135
-rw-r--r--arch/mips/alchemy/common/prom.c129
-rw-r--r--arch/mips/alchemy/common/setup.c91
-rw-r--r--arch/mips/alchemy/common/sleeper.S270
-rw-r--r--arch/mips/alchemy/common/time.c193
-rw-r--r--arch/mips/alchemy/common/vss.c84
13 files changed, 3937 insertions, 0 deletions
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
new file mode 100644
index 00000000..407ebc00
--- /dev/null
+++ b/arch/mips/alchemy/common/Makefile
@@ -0,0 +1,14 @@
+#
+# Copyright 2000, 2008 MontaVista Software Inc.
+# Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Au1xx0 CPUs, generic files.
+#
+
+obj-y += prom.o time.o clocks.o platform.o power.o setup.o \
+ sleeper.o dma.o dbdma.o vss.o irq.o
+
+# optional gpiolib support
+ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),)
+ obj-$(CONFIG_GPIOLIB) += gpiolib.o
+endif
diff --git a/arch/mips/alchemy/common/clocks.c b/arch/mips/alchemy/common/clocks.c
new file mode 100644
index 00000000..f38298a8
--- /dev/null
+++ b/arch/mips/alchemy/common/clocks.c
@@ -0,0 +1,105 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Simple Au1xx0 clocks routines.
+ *
+ * Copyright 2001, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/time.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * I haven't found anyone that doesn't use a 12 MHz source clock,
+ * but just in case.....
+ */
+#define AU1000_SRC_CLK 12000000
+
+static unsigned int au1x00_clock; /* Hz */
+static unsigned long uart_baud_base;
+
+/*
+ * Set the au1000_clock
+ */
+void set_au1x00_speed(unsigned int new_freq)
+{
+ au1x00_clock = new_freq;
+}
+
+unsigned int get_au1x00_speed(void)
+{
+ return au1x00_clock;
+}
+EXPORT_SYMBOL(get_au1x00_speed);
+
+/*
+ * The UART baud base is not known at compile time ... if
+ * we want to be able to use the same code on different
+ * speed CPUs.
+ */
+unsigned long get_au1x00_uart_baud_base(void)
+{
+ return uart_baud_base;
+}
+
+void set_au1x00_uart_baud_base(unsigned long new_baud_base)
+{
+ uart_baud_base = new_baud_base;
+}
+
+/*
+ * We read the real processor speed from the PLL. This is important
+ * because it is more accurate than computing it from the 32 KHz
+ * counter, if it exists. If we don't have an accurate processor
+ * speed, all of the peripherals that derive their clocks based on
+ * this advertised speed will introduce error and sometimes not work
+ * properly. This function is further convoluted to still allow configurations
+ * to do that in case they have really, really old silicon with a
+ * write-only PLL register. -- Dan
+ */
+unsigned long au1xxx_calc_clock(void)
+{
+ unsigned long cpu_speed;
+
+ /*
+ * On early Au1000, sys_cpupll was write-only. Since these
+ * silicon versions of Au1000 are not sold by AMD, we don't bend
+ * over backwards trying to determine the frequency.
+ */
+ if (au1xxx_cpu_has_pll_wo())
+ cpu_speed = 396000000;
+ else
+ cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
+
+ /* On Alchemy CPU:counter ratio is 1:1 */
+ mips_hpt_frequency = cpu_speed;
+ /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
+ set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
+ & 0x03) + 2) * 16));
+
+ set_au1x00_speed(cpu_speed);
+
+ return cpu_speed;
+}
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
new file mode 100644
index 00000000..cf02d7dc
--- /dev/null
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -0,0 +1,1089 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * The Descriptor Based DMA channel manager that first appeared
+ * on the Au1550. I started with dma.c, but I think all that is
+ * left is this initial comment :-)
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ * dan@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+
+/*
+ * The Descriptor Based DMA supports up to 16 channels.
+ *
+ * There are 32 devices defined. We keep an internal structure
+ * of devices using these channels, along with additional
+ * information.
+ *
+ * We allocate the descriptors and allow access to them through various
+ * functions. The drivers allocate the data buffers and assign them
+ * to the descriptors.
+ */
+static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
+
+/* I couldn't find a macro that did this... */
+#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
+
+static dbdma_global_t *dbdma_gptr =
+ (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+static int dbdma_initialized;
+
+static dbdev_tab_t *dbdev_tab;
+
+static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
+ /* UARTS */
+ { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
+
+ /* EXT DMA */
+ { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
+
+ /* USB DEV */
+ { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
+
+ /* PSCs */
+ { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
+
+ { AU1550_DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
+ { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
+
+ /* MAC 0 */
+ { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ /* MAC 1 */
+ { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
+ { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
+ { AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x10100004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x10100000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x10101004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x10101000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8, 0x10102004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN, 0, 8, 0x10102000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x10103004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x10103000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8, 0x10601000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 8, 8, 0x10601004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0301c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 16, 0x10a0301c, 0, 0 },
+
+ { AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8, 0x10602000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN, 4, 8, 0x10602004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE, 0, 32, 0x14001810, 0, 0 },
+
+ { AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+/* 32 predefined plus 32 custom */
+#define DBDEV_TAB_SIZE 64
+
+static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
+
+static dbdev_tab_t *find_dbdev_id(u32 id)
+{
+ int i;
+ dbdev_tab_t *p;
+ for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
+ p = &dbdev_tab[i];
+ if (p->dev_id == id)
+ return p;
+ }
+ return NULL;
+}
+
+void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
+{
+ return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+}
+EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
+
+u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
+{
+ u32 ret = 0;
+ dbdev_tab_t *p;
+ static u16 new_id = 0x1000;
+
+ p = find_dbdev_id(~0);
+ if (NULL != p) {
+ memcpy(p, dev, sizeof(dbdev_tab_t));
+ p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
+ ret = p->dev_id;
+ new_id++;
+#if 0
+ printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
+ p->dev_id, p->dev_flags, p->dev_physaddr);
+#endif
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(au1xxx_ddma_add_device);
+
+void au1xxx_ddma_del_device(u32 devid)
+{
+ dbdev_tab_t *p = find_dbdev_id(devid);
+
+ if (p != NULL) {
+ memset(p, 0, sizeof(dbdev_tab_t));
+ p->dev_id = ~0;
+ }
+}
+EXPORT_SYMBOL(au1xxx_ddma_del_device);
+
+/* Allocate a channel and return a non-zero descriptor if successful. */
+u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
+ void (*callback)(int, void *), void *callparam)
+{
+ unsigned long flags;
+ u32 used, chan;
+ u32 dcp;
+ int i;
+ dbdev_tab_t *stp, *dtp;
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ /*
+ * We do the intialization on the first channel allocation.
+ * We have to wait because of the interrupt handler initialization
+ * which can't be done successfully during board set up.
+ */
+ if (!dbdma_initialized)
+ return 0;
+
+ stp = find_dbdev_id(srcid);
+ if (stp == NULL)
+ return 0;
+ dtp = find_dbdev_id(destid);
+ if (dtp == NULL)
+ return 0;
+
+ used = 0;
+
+ /* Check to see if we can get both channels. */
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
+ (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got source */
+ stp->dev_flags |= DEV_FLAGS_INUSE;
+ if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
+ (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got destination */
+ dtp->dev_flags |= DEV_FLAGS_INUSE;
+ } else {
+ /* Can't get dest. Release src. */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ used++;
+ }
+ } else
+ used++;
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (used)
+ return 0;
+
+ /* Let's see if we can allocate a channel for it. */
+ ctp = NULL;
+ chan = 0;
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ for (i = 0; i < NUM_DBDMA_CHANS; i++)
+ if (chan_tab_ptr[i] == NULL) {
+ /*
+ * If kmalloc fails, it is caught below same
+ * as a channel not available.
+ */
+ ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
+ chan_tab_ptr[i] = ctp;
+ break;
+ }
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (ctp != NULL) {
+ memset(ctp, 0, sizeof(chan_tab_t));
+ ctp->chan_index = chan = i;
+ dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ dcp += (0x0100 * chan);
+ ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
+ cp = (au1x_dma_chan_t *)dcp;
+ ctp->chan_src = stp;
+ ctp->chan_dest = dtp;
+ ctp->chan_callback = callback;
+ ctp->chan_callparam = callparam;
+
+ /* Initialize channel configuration. */
+ i = 0;
+ if (stp->dev_intlevel)
+ i |= DDMA_CFG_SED;
+ if (stp->dev_intpolarity)
+ i |= DDMA_CFG_SP;
+ if (dtp->dev_intlevel)
+ i |= DDMA_CFG_DED;
+ if (dtp->dev_intpolarity)
+ i |= DDMA_CFG_DP;
+ if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
+ (dtp->dev_flags & DEV_FLAGS_SYNC))
+ i |= DDMA_CFG_SYNC;
+ cp->ddma_cfg = i;
+ au_sync();
+
+ /*
+ * Return a non-zero value that can be used to find the channel
+ * information in subsequent operations.
+ */
+ return (u32)(&chan_tab_ptr[chan]);
+ }
+
+ /* Release devices */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+
+ return 0;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
+
+/*
+ * Set the device width if source or destination is a FIFO.
+ * Should be 8, 16, or 32 bits.
+ */
+u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
+{
+ u32 rv;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ rv = 0;
+
+ if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
+ rv = stp->dev_devwidth;
+ stp->dev_devwidth = bits;
+ }
+ if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
+ rv = dtp->dev_devwidth;
+ dtp->dev_devwidth = bits;
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
+
+/* Allocate a descriptor ring, initializing as much as possible. */
+u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
+{
+ int i;
+ u32 desc_base, srcid, destid;
+ u32 cmd0, cmd1, src1, dest1;
+ u32 src0, dest0;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ /*
+ * The descriptors must be 32-byte aligned. There is a
+ * possibility the allocation will give us such an address,
+ * and if we try that first we are likely to not waste larger
+ * slabs of memory.
+ */
+ desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
+ GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ if (desc_base & 0x1f) {
+ /*
+ * Lost....do it again, allocate extra, and round
+ * the address base.
+ */
+ kfree((const void *)desc_base);
+ i = entries * sizeof(au1x_ddma_desc_t);
+ i += (sizeof(au1x_ddma_desc_t) - 1);
+ desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ ctp->cdb_membase = desc_base;
+ desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
+ } else
+ ctp->cdb_membase = desc_base;
+
+ dp = (au1x_ddma_desc_t *)desc_base;
+
+ /* Keep track of the base descriptor. */
+ ctp->chan_desc_base = dp;
+
+ /* Initialize the rings with as much information as we know. */
+ srcid = stp->dev_id;
+ destid = dtp->dev_id;
+
+ cmd0 = cmd1 = src1 = dest1 = 0;
+ src0 = dest0 = 0;
+
+ cmd0 |= DSCR_CMD0_SID(srcid);
+ cmd0 |= DSCR_CMD0_DID(destid);
+ cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
+ cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
+
+ /* Is it mem to mem transfer? */
+ if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
+ ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
+ cmd0 |= DSCR_CMD0_MEM;
+
+ switch (stp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ switch (dtp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ /*
+ * If the device is marked as an in/out FIFO, ensure it is
+ * set non-coherent.
+ */
+ if (stp->dev_flags & DEV_FLAGS_IN)
+ cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
+ if (dtp->dev_flags & DEV_FLAGS_OUT)
+ cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
+
+ /*
+ * Set up source1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (stp->dev_tsize) {
+ case 1:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If source input is FIFO, set static address. */
+ if (stp->dev_flags & DEV_FLAGS_IN) {
+ if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
+ else
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
+ }
+
+ if (stp->dev_physaddr)
+ src0 = stp->dev_physaddr;
+
+ /*
+ * Set up dest1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (dtp->dev_tsize) {
+ case 1:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If destination output is FIFO, set static address. */
+ if (dtp->dev_flags & DEV_FLAGS_OUT) {
+ if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
+ else
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
+ }
+
+ if (dtp->dev_physaddr)
+ dest0 = dtp->dev_physaddr;
+
+#if 0
+ printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
+ "source1:%x dest0:%x dest1:%x\n",
+ dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
+ src1, dest0, dest1);
+#endif
+ for (i = 0; i < entries; i++) {
+ dp->dscr_cmd0 = cmd0;
+ dp->dscr_cmd1 = cmd1;
+ dp->dscr_source0 = src0;
+ dp->dscr_source1 = src1;
+ dp->dscr_dest0 = dest0;
+ dp->dscr_dest1 = dest1;
+ dp->dscr_stat = 0;
+ dp->sw_context = 0;
+ dp->sw_status = 0;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
+ dp++;
+ }
+
+ /* Make last descrptor point to the first. */
+ dp--;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ return (u32)ctp->chan_desc_base;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
+
+/*
+ * Put a source buffer into the DMA ring.
+ * This updates the source pointer and byte count. Normally used
+ * for memory to fifo transfers.
+ */
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *(chan_tab_t **)chanid;
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count. */
+ dp->dscr_source0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ /*
+ * There is an errata on the Au1200/Au1550 parts that could result
+ * in "stale" data being DMA'ed. It has to do with the snoop logic on
+ * the cache eviction buffer. DMA_NONCOHERENT is on by default for
+ * these parts. If it is fixed in the future, these dma_cache_inv will
+ * just be nothing more than empty macros. See io.h.
+ */
+ dma_cache_wback_inv((unsigned long)buf, nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ au_sync();
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_source);
+
+/* Put a destination buffer into the DMA ring.
+ * This updates the destination pointer and byte count. Normally used
+ * to place an empty buffer into the ring for fifo to memory transfers.
+ */
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /* I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /* We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /* If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count */
+
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ dp->dscr_dest0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+#if 0
+ printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
+ dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
+ dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
+#endif
+ /*
+ * There is an errata on the Au1200/Au1550 parts that could result in
+ * "stale" data being DMA'ed. It has to do with the snoop logic on the
+ * cache eviction buffer. DMA_NONCOHERENT is on by default for these
+ * parts. If it is fixed in the future, these dma_cache_inv will just
+ * be nothing more than empty macros. See io.h.
+ */
+ dma_cache_inv((unsigned long)buf, nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ au_sync();
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
+
+/*
+ * Get a destination buffer into the DMA ring.
+ * Normally used to get a full buffer from the ring during fifo
+ * to memory transfers. This does not set the valid bit, you will
+ * have to put another destination buffer to keep the DMA going.
+ */
+u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 rv;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->get_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Return buffer address and byte count. */
+ *buf = (void *)(phys_to_virt(dp->dscr_dest0));
+ *nbytes = dp->dscr_cmd1;
+ rv = dp->dscr_stat;
+
+ /* Get next descriptor pointer. */
+ ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
+
+void au1xxx_dbdma_stop(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ int halt_timeout = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+
+ cp = ctp->chan_ptr;
+ cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
+ au_sync();
+ while (!(cp->ddma_stat & DDMA_STAT_H)) {
+ udelay(1);
+ halt_timeout++;
+ if (halt_timeout > 100) {
+ printk(KERN_WARNING "warning: DMA channel won't halt\n");
+ break;
+ }
+ }
+ /* clear current desc valid and doorbell */
+ cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
+ au_sync();
+}
+EXPORT_SYMBOL(au1xxx_dbdma_stop);
+
+/*
+ * Start using the current descriptor pointer. If the DBDMA encounters
+ * a non-valid descriptor, it will stop. In this case, we can just
+ * continue by adding a buffer to the list and starting again.
+ */
+void au1xxx_dbdma_start(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+ cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
+ cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
+ au_sync();
+ cp->ddma_dbell = 0;
+ au_sync();
+}
+EXPORT_SYMBOL(au1xxx_dbdma_start);
+
+void au1xxx_dbdma_reset(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ au1xxx_dbdma_stop(chanid);
+
+ ctp = *((chan_tab_t **)chanid);
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ /* Run through the descriptors and reset the valid indicator. */
+ dp = ctp->chan_desc_base;
+
+ do {
+ dp->dscr_cmd0 &= ~DSCR_CMD0_V;
+ /*
+ * Reset our software status -- this is used to determine
+ * if a descriptor is in use by upper level software. Since
+ * posting can reset 'V' bit.
+ */
+ dp->sw_status = 0;
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_reset);
+
+u32 au1xxx_get_dma_residue(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ u32 rv;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+
+ /* This is only valid if the channel is stopped. */
+ rv = cp->ddma_bytecnt;
+ au_sync();
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
+
+void au1xxx_dbdma_chan_free(u32 chanid)
+{
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ au1xxx_dbdma_stop(chanid);
+
+ kfree((void *)ctp->cdb_membase);
+
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+ chan_tab_ptr[ctp->chan_index] = NULL;
+
+ kfree(ctp);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
+
+static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
+{
+ u32 intstat;
+ u32 chan_index;
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ au1x_dma_chan_t *cp;
+
+ intstat = dbdma_gptr->ddma_intstat;
+ au_sync();
+ chan_index = __ffs(intstat);
+
+ ctp = chan_tab_ptr[chan_index];
+ cp = ctp->chan_ptr;
+ dp = ctp->cur_ptr;
+
+ /* Reset interrupt. */
+ cp->ddma_irq = 0;
+ au_sync();
+
+ if (ctp->chan_callback)
+ ctp->chan_callback(irq, ctp->chan_callparam);
+
+ ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ return IRQ_RETVAL(1);
+}
+
+void au1xxx_dbdma_dump(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_dma_chan_t *cp;
+ u32 i = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ cp = ctp->chan_ptr;
+
+ printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
+ (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
+ dtp - dbdev_tab);
+ printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
+ (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
+ (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
+
+ printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
+ printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
+ cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
+ printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
+ cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
+ cp->ddma_bytecnt);
+
+ /* Run through the descriptors */
+ dp = ctp->chan_desc_base;
+
+ do {
+ printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
+ i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
+ printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
+ dp->dscr_source0, dp->dscr_source1,
+ dp->dscr_dest0, dp->dscr_dest1);
+ printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
+ dp->dscr_stat, dp->dscr_nxtptr);
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+
+/* Put a descriptor into the DMA ring.
+ * This updates the source/destination pointers and byte count.
+ */
+u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 nbytes = 0;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer addresses and byte count. */
+ dp->dscr_dest0 = dscr->dscr_dest0;
+ dp->dscr_source0 = dscr->dscr_source0;
+ dp->dscr_dest1 = dscr->dscr_dest1;
+ dp->dscr_source1 = dscr->dscr_source1;
+ dp->dscr_cmd1 = dscr->dscr_cmd1;
+ nbytes = dscr->dscr_cmd1;
+ /* Allow the caller to specifiy if an interrupt is generated */
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+ dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+
+
+static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
+
+static int alchemy_dbdma_suspend(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
+
+ /* save channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
+ alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
+ alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
+
+ /* halt channel */
+ __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
+ wmb();
+ while (!(__raw_readl(addr + 0x14) & 1))
+ wmb();
+
+ addr += 0x100; /* next channel base */
+ }
+ /* disable channel interrupts */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(0, addr + 0x0c);
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_dbdma_resume(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
+
+ /* restore channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
+ __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
+ __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
+ wmb();
+ addr += 0x100; /* next channel base */
+ }
+}
+
+static struct syscore_ops alchemy_dbdma_syscore_ops = {
+ .suspend = alchemy_dbdma_suspend,
+ .resume = alchemy_dbdma_resume,
+};
+
+static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
+{
+ int ret;
+
+ dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL);
+ if (!dbdev_tab)
+ return -ENOMEM;
+
+ memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
+ for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
+ dbdev_tab[ret].dev_id = ~0;
+
+ dbdma_gptr->ddma_config = 0;
+ dbdma_gptr->ddma_throttle = 0;
+ dbdma_gptr->ddma_inten = 0xffff;
+ au_sync();
+
+ ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
+ if (ret)
+ printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
+ else {
+ dbdma_initialized = 1;
+ register_syscore_ops(&alchemy_dbdma_syscore_ops);
+ }
+
+ return ret;
+}
+
+static int __init alchemy_dbdma_init(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
+ case ALCHEMY_CPU_AU1200:
+ return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
+ case ALCHEMY_CPU_AU1300:
+ return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
+ }
+ return 0;
+}
+subsys_initcall(alchemy_dbdma_init);
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
new file mode 100644
index 00000000..9b624e2c
--- /dev/null
+++ b/arch/mips/alchemy/common/dma.c
@@ -0,0 +1,264 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * A DMA channel allocator for Au1x00. API is modeled loosely off of
+ * linux/kernel/dma.c.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1000_dma.h>
+
+/*
+ * A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
+ * When releasing them, first release the IRQ, then release the DMA. The
+ * main reason for this order is that, if you are requesting the DMA buffer
+ * done interrupt, you won't know the irq number until the DMA channel is
+ * returned from request_dma.
+ */
+
+/* DMA Channel register block spacing */
+#define DMA_CHANNEL_LEN 0x00000100
+
+DEFINE_SPINLOCK(au1000_dma_spin_lock);
+
+struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,}
+};
+EXPORT_SYMBOL(au1000_dma_table);
+
+/* Device FIFO addresses and default DMA modes */
+static const struct dma_dev {
+ unsigned int fifo_addr;
+ unsigned int dma_mode;
+} dma_dev_table[DMA_NUM_DEV] = {
+ { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */
+ { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */
+ { 0, 0 }, /* DMA_REQ0 */
+ { 0, 0 }, /* DMA_REQ1 */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */
+ { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */
+ { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
+ /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
+};
+
+int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
+ int length, int *eof, void *data)
+{
+ int i, len = 0;
+ struct dma_chan *chan;
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
+ chan = get_dma_chan(i);
+ if (chan != NULL)
+ len += sprintf(buf + len, "%2d: %s\n",
+ i, chan->dev_str);
+ }
+
+ if (fpos >= len) {
+ *start = buf;
+ *eof = 1;
+ return 0;
+ }
+ *start = buf + fpos;
+ len -= fpos;
+ if (len > length)
+ return length;
+ *eof = 1;
+ return len;
+}
+
+/* Device FIFO addresses and default DMA modes - 2nd bank */
+static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
+ { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
+};
+
+void dump_au1000_dma_channel(unsigned int dmanr)
+{
+ struct dma_chan *chan;
+
+ if (dmanr >= NUM_AU1000_DMA_CHANNELS)
+ return;
+ chan = &au1000_dma_table[dmanr];
+
+ printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
+ printk(KERN_INFO " mode = 0x%08x\n",
+ au_readl(chan->io + DMA_MODE_SET));
+ printk(KERN_INFO " addr = 0x%08x\n",
+ au_readl(chan->io + DMA_PERIPHERAL_ADDR));
+ printk(KERN_INFO " start0 = 0x%08x\n",
+ au_readl(chan->io + DMA_BUFFER0_START));
+ printk(KERN_INFO " start1 = 0x%08x\n",
+ au_readl(chan->io + DMA_BUFFER1_START));
+ printk(KERN_INFO " count0 = 0x%08x\n",
+ au_readl(chan->io + DMA_BUFFER0_COUNT));
+ printk(KERN_INFO " count1 = 0x%08x\n",
+ au_readl(chan->io + DMA_BUFFER1_COUNT));
+}
+
+/*
+ * Finds a free channel, and binds the requested device to it.
+ * Returns the allocated channel number, or negative on error.
+ * Requests the DMA done IRQ if irqhandler != NULL.
+ */
+int request_au1000_dma(int dev_id, const char *dev_str,
+ irq_handler_t irqhandler,
+ unsigned long irqflags,
+ void *irq_dev_id)
+{
+ struct dma_chan *chan;
+ const struct dma_dev *dev;
+ int i, ret;
+
+ if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
+ if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
+ return -EINVAL;
+ } else {
+ if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ if (au1000_dma_table[i].dev_id < 0)
+ break;
+
+ if (i == NUM_AU1000_DMA_CHANNELS)
+ return -ENODEV;
+
+ chan = &au1000_dma_table[i];
+
+ if (dev_id >= DMA_NUM_DEV) {
+ dev_id -= DMA_NUM_DEV;
+ dev = &dma_dev_table_bank2[dev_id];
+ } else
+ dev = &dma_dev_table[dev_id];
+
+ if (irqhandler) {
+ chan->irq_dev = irq_dev_id;
+ ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
+ chan->irq_dev);
+ if (ret) {
+ chan->irq_dev = NULL;
+ return ret;
+ }
+ } else {
+ chan->irq_dev = NULL;
+ }
+
+ /* fill it in */
+ chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN;
+ chan->dev_id = dev_id;
+ chan->dev_str = dev_str;
+ chan->fifo_addr = dev->fifo_addr;
+ chan->mode = dev->dma_mode;
+
+ /* initialize the channel before returning */
+ init_dma(i);
+
+ return i;
+}
+EXPORT_SYMBOL(request_au1000_dma);
+
+void free_au1000_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan) {
+ printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ disable_dma(dmanr);
+ if (chan->irq_dev)
+ free_irq(chan->irq, chan->irq_dev);
+
+ chan->irq_dev = NULL;
+ chan->dev_id = -1;
+}
+EXPORT_SYMBOL(free_au1000_dma);
+
+static int __init au1000_dma_init(void)
+{
+ int base, i;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ base = AU1000_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ base = AU1500_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ base = AU1100_DMA_INT_BASE;
+ break;
+ default:
+ goto out;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ au1000_dma_table[i].irq = base + i;
+
+ printk(KERN_INFO "Alchemy DMA initialized\n");
+
+out:
+ return 0;
+}
+arch_initcall(au1000_dma_init);
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
new file mode 100644
index 00000000..f1b50f0c
--- /dev/null
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org>
+ * GPIOLIB support for Alchemy chips.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Notes :
+ * This file must ONLY be built when CONFIG_GPIOLIB=y and
+ * CONFIG_ALCHEMY_GPIO_INDIRECT=n, otherwise compilation will fail!
+ * au1000 SoC have only one GPIO block : GPIO1
+ * Au1100, Au15x0, Au12x0 have a second one : GPIO2
+ * Au1300 is totally different: 1 block with up to 128 GPIOs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+static int gpio2_get(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
+}
+
+static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
+ value);
+}
+
+static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
+}
+
+
+static int gpio1_get(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static void gpio1_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
+}
+
+static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static int gpio1_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
+ value);
+}
+
+static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
+}
+
+struct gpio_chip alchemy_gpio_chip[] = {
+ [0] = {
+ .label = "alchemy-gpio1",
+ .direction_input = gpio1_direction_input,
+ .direction_output = gpio1_direction_output,
+ .get = gpio1_get,
+ .set = gpio1_set,
+ .to_irq = gpio1_to_irq,
+ .base = ALCHEMY_GPIO1_BASE,
+ .ngpio = ALCHEMY_GPIO1_NUM,
+ },
+ [1] = {
+ .label = "alchemy-gpio2",
+ .direction_input = gpio2_direction_input,
+ .direction_output = gpio2_direction_output,
+ .get = gpio2_get,
+ .set = gpio2_set,
+ .to_irq = gpio2_to_irq,
+ .base = ALCHEMY_GPIO2_BASE,
+ .ngpio = ALCHEMY_GPIO2_NUM,
+ },
+};
+
+static int alchemy_gpic_get(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_get_value(off + AU1300_GPIO_BASE);
+}
+
+static void alchemy_gpic_set(struct gpio_chip *chip, unsigned int off, int v)
+{
+ au1300_gpio_set_value(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_dir_input(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_direction_input(off + AU1300_GPIO_BASE);
+}
+
+static int alchemy_gpic_dir_output(struct gpio_chip *chip, unsigned int off,
+ int v)
+{
+ return au1300_gpio_direction_output(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_gpio_to_irq(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_to_irq(off + AU1300_GPIO_BASE);
+}
+
+static struct gpio_chip au1300_gpiochip = {
+ .label = "alchemy-gpic",
+ .direction_input = alchemy_gpic_dir_input,
+ .direction_output = alchemy_gpic_dir_output,
+ .get = alchemy_gpic_get,
+ .set = alchemy_gpic_set,
+ .to_irq = alchemy_gpic_gpio_to_irq,
+ .base = AU1300_GPIO_BASE,
+ .ngpio = AU1300_GPIO_NUM,
+};
+
+static int __init alchemy_gpiochip_init(void)
+{
+ int ret = 0;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ ret = gpiochip_add(&alchemy_gpio_chip[0]);
+ break;
+ case ALCHEMY_CPU_AU1500...ALCHEMY_CPU_AU1200:
+ ret = gpiochip_add(&alchemy_gpio_chip[0]);
+ ret |= gpiochip_add(&alchemy_gpio_chip[1]);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = gpiochip_add(&au1300_gpiochip);
+ break;
+ }
+ return ret;
+}
+arch_initcall(alchemy_gpiochip_init);
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
new file mode 100644
index 00000000..94fbcd19
--- /dev/null
+++ b/arch/mips/alchemy/common/irq.c
@@ -0,0 +1,997 @@
+/*
+ * Copyright 2001, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD 0x40
+#define IC_CFG0SET 0x40
+#define IC_CFG0CLR 0x44
+#define IC_CFG1RD 0x48
+#define IC_CFG1SET 0x48
+#define IC_CFG1CLR 0x4C
+#define IC_CFG2RD 0x50
+#define IC_CFG2SET 0x50
+#define IC_CFG2CLR 0x54
+#define IC_REQ0INT 0x54
+#define IC_SRCRD 0x58
+#define IC_SRCSET 0x58
+#define IC_SRCCLR 0x5C
+#define IC_REQ1INT 0x5C
+#define IC_ASSIGNRD 0x60
+#define IC_ASSIGNSET 0x60
+#define IC_ASSIGNCLR 0x64
+#define IC_WAKERD 0x68
+#define IC_WAKESET 0x68
+#define IC_WAKECLR 0x6C
+#define IC_MASKRD 0x70
+#define IC_MASKSET 0x70
+#define IC_MASKCLR 0x74
+#define IC_RISINGRD 0x78
+#define IC_RISINGCLR 0x78
+#define IC_FALLINGRD 0x7C
+#define IC_FALLINGCLR 0x7C
+#define IC_TESTBIT 0x80
+
+/* per-processor fixed function irqs */
+struct alchemy_irqmap {
+ int irq; /* linux IRQ number */
+ int type; /* IRQ_TYPE_ */
+ int prio; /* irq priority, 0 highest, 3 lowest */
+ int internal; /* GPIC: internal source (no ext. pin)? */
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int type);
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
+
+
+/* NOTE on interrupt priorities: The original writers of this code said:
+ *
+ * Because of the tight timing of SETUP token to reply transactions,
+ * the USB devices-side packet complete interrupt (USB_DEV_REQ_INT)
+ * needs the highest priority.
+ */
+struct alchemy_irqmap au1000_irqmap[] __initdata = {
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1500_irqmap[] __initdata = {
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1100_irqmap[] __initdata = {
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1550_irqmap[] __initdata = {
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1200_irqmap[] __initdata = {
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+static struct alchemy_irqmap au1300_irqmap[] __initdata = {
+ /* multifunction: gpio pin or device */
+ { AU1300_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_NAND_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ /* au1300 internal */
+ { AU1300_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MMU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_GPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_UDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 1, },
+ { AU1300_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_SD0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_BSA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPE_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_ITE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_CIM_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { -1, }, /* terminator */
+};
+
+/******************************************************************************/
+
+static void au1x_ic0_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic1_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic0_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic1_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic0_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic0_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
+{
+ int bit = d->irq - AU1000_INTC1_INT_BASE;
+ unsigned long wakemsk, flags;
+
+ /* only GPIO 0-7 can act as wakeup source. Fortunately these
+ * are wired up identically on all supported variants.
+ */
+ if ((bit < 0) || (bit > 7))
+ return -EINVAL;
+
+ local_irq_save(flags);
+ wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK);
+ if (on)
+ wakemsk |= 1 << bit;
+ else
+ wakemsk &= ~(1 << bit);
+ __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK);
+ wmb();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * irq_chips for both ICs; this way the mask handlers can be
+ * as short as possible.
+ */
+static struct irq_chip au1x_ic0_chip = {
+ .name = "Alchemy-IC0",
+ .irq_ack = au1x_ic0_ack,
+ .irq_mask = au1x_ic0_mask,
+ .irq_mask_ack = au1x_ic0_maskack,
+ .irq_unmask = au1x_ic0_unmask,
+ .irq_set_type = au1x_ic_settype,
+};
+
+static struct irq_chip au1x_ic1_chip = {
+ .name = "Alchemy-IC1",
+ .irq_ack = au1x_ic1_ack,
+ .irq_mask = au1x_ic1_mask,
+ .irq_mask_ack = au1x_ic1_maskack,
+ .irq_unmask = au1x_ic1_unmask,
+ .irq_set_type = au1x_ic_settype,
+ .irq_set_wake = au1x_ic1_setwake,
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
+{
+ struct irq_chip *chip;
+ unsigned int bit, irq = d->irq;
+ irq_flow_handler_t handler = NULL;
+ unsigned char *name = NULL;
+ void __iomem *base;
+ int ret;
+
+ if (irq >= AU1000_INTC1_INT_BASE) {
+ bit = irq - AU1000_INTC1_INT_BASE;
+ chip = &au1x_ic1_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq - AU1000_INTC0_INT_BASE;
+ chip = &au1x_ic0_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+
+ if (bit > 31)
+ return -EINVAL;
+
+ ret = 0;
+
+ switch (flow_type) { /* cfgregs 2:1:0 */
+ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "riseedge";
+ break;
+ case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_edge_irq;
+ name = "falledge";
+ break;
+ case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "bothedge";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_level_irq;
+ name = "hilevel";
+ break;
+ case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_level_irq;
+ name = "lowlevel";
+ break;
+ case IRQ_TYPE_NONE: /* 0:0:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ __irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
+
+ wmb();
+
+ return ret;
+}
+
+/******************************************************************************/
+
+/*
+ * au1300_gpic_chgcfg - change PIN configuration.
+ * @gpio: pin to change (0-based GPIO number from datasheet).
+ * @clr: clear all bits set in 'clr'.
+ * @set: set these bits.
+ *
+ * modifies a pins' configuration register, bits set in @clr will
+ * be cleared in the register, bits in @set will be set.
+ */
+static inline void au1300_gpic_chgcfg(unsigned int gpio,
+ unsigned long clr,
+ unsigned long set)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long l;
+
+ r += gpio * 4; /* offset into pin config array */
+ l = __raw_readl(r + AU1300_GPIC_PINCFG);
+ l &= ~clr;
+ l |= set;
+ __raw_writel(l, r + AU1300_GPIC_PINCFG);
+ wmb();
+}
+
+/*
+ * au1300_pinfunc_to_gpio - assign a pin as GPIO input (GPIO ctrl).
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to the GPIO controller, so its level can either
+ * be read or set through the generic GPIO functions.
+ * If you need a GPOUT, use au1300_gpio_set_value(pin, 0/1).
+ * REVISIT: is this function really necessary?
+ */
+void au1300_pinfunc_to_gpio(enum au1300_multifunc_pins gpio)
+{
+ au1300_gpio_direction_input(gpio + AU1300_GPIO_BASE);
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_gpio);
+
+/*
+ * au1300_pinfunc_to_dev - assign a pin to the device function.
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to its associated device function; the pin will be
+ * driven by the device and not through GPIO functions.
+ */
+void au1300_pinfunc_to_dev(enum au1300_multifunc_pins gpio)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit;
+
+ r += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ __raw_writel(bit, r + AU1300_GPIC_DEVSEL);
+ wmb();
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_dev);
+
+/*
+ * au1300_set_irq_priority - set internal priority of IRQ.
+ * @irq: irq to set priority (linux irq number).
+ * @p: priority (0 = highest, 3 = lowest).
+ */
+void au1300_set_irq_priority(unsigned int irq, int p)
+{
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ au1300_gpic_chgcfg(irq, GPIC_CFG_IL_MASK, GPIC_CFG_IL_SET(p));
+}
+EXPORT_SYMBOL_GPL(au1300_set_irq_priority);
+
+/*
+ * au1300_set_dbdma_gpio - assign a gpio to one of the DBDMA triggers.
+ * @dchan: dbdma trigger select (0, 1).
+ * @gpio: pin to assign as trigger.
+ *
+ * DBDMA controller has 2 external trigger sources; this function
+ * assigns a GPIO to the selected trigger.
+ */
+void au1300_set_dbdma_gpio(int dchan, unsigned int gpio)
+{
+ unsigned long r;
+
+ if ((dchan >= 0) && (dchan <= 1)) {
+ r = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ r &= ~(0xff << (8 * dchan));
+ r |= (gpio & 0x7f) << (8 * dchan);
+ __raw_writel(r, AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ wmb();
+ }
+}
+
+static inline void gpic_pin_set_idlewake(unsigned int gpio, int allow)
+{
+ au1300_gpic_chgcfg(gpio, GPIC_CFG_IDLEWAKE,
+ allow ? GPIC_CFG_IDLEWAKE : 0);
+}
+
+static void au1300_gpic_mask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IDIS);
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_unmask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+
+ gpic_pin_set_idlewake(irq, 1);
+
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IEN);
+ wmb();
+}
+
+static void au1300_gpic_maskack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ __raw_writel(bit, r + AU1300_GPIC_IDIS); /* mask */
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_ack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ wmb();
+}
+
+static struct irq_chip au1300_gpic = {
+ .name = "GPIOINT",
+ .irq_ack = au1300_gpic_ack,
+ .irq_mask = au1300_gpic_mask,
+ .irq_mask_ack = au1300_gpic_maskack,
+ .irq_unmask = au1300_gpic_unmask,
+ .irq_set_type = au1300_gpic_settype,
+};
+
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type)
+{
+ unsigned long s;
+ unsigned char *name = NULL;
+ irq_flow_handler_t hdl = NULL;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ s = GPIC_CFG_IC_LEVEL_HIGH;
+ name = "high";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ s = GPIC_CFG_IC_LEVEL_LOW;
+ name = "low";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ s = GPIC_CFG_IC_EDGE_RISE;
+ name = "posedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ s = GPIC_CFG_IC_EDGE_FALL;
+ name = "negedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ s = GPIC_CFG_IC_EDGE_BOTH;
+ name = "bothedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_NONE:
+ s = GPIC_CFG_IC_OFF;
+ name = "disabled";
+ hdl = handle_level_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __irq_set_chip_handler_name_locked(d->irq, &au1300_gpic, hdl, name);
+
+ au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
+
+ return 0;
+}
+
+/******************************************************************************/
+
+static inline void ic_init(void __iomem *base)
+{
+ /* initialize interrupt controller to a safe state */
+ __raw_writel(0xffffffff, base + IC_CFG0CLR);
+ __raw_writel(0xffffffff, base + IC_CFG1CLR);
+ __raw_writel(0xffffffff, base + IC_CFG2CLR);
+ __raw_writel(0xffffffff, base + IC_MASKCLR);
+ __raw_writel(0xffffffff, base + IC_ASSIGNCLR);
+ __raw_writel(0xffffffff, base + IC_WAKECLR);
+ __raw_writel(0xffffffff, base + IC_SRCSET);
+ __raw_writel(0xffffffff, base + IC_FALLINGCLR);
+ __raw_writel(0xffffffff, base + IC_RISINGCLR);
+ __raw_writel(0x00000000, base + IC_TESTBIT);
+ wmb();
+}
+
+static unsigned long alchemy_gpic_pmdata[ALCHEMY_GPIC_INT_NUM + 6];
+
+static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d)
+{
+ d[0] = __raw_readl(base + IC_CFG0RD);
+ d[1] = __raw_readl(base + IC_CFG1RD);
+ d[2] = __raw_readl(base + IC_CFG2RD);
+ d[3] = __raw_readl(base + IC_SRCRD);
+ d[4] = __raw_readl(base + IC_ASSIGNRD);
+ d[5] = __raw_readl(base + IC_WAKERD);
+ d[6] = __raw_readl(base + IC_MASKRD);
+ ic_init(base); /* shut it up too while at it */
+}
+
+static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d)
+{
+ ic_init(base);
+
+ __raw_writel(d[0], base + IC_CFG0SET);
+ __raw_writel(d[1], base + IC_CFG1SET);
+ __raw_writel(d[2], base + IC_CFG2SET);
+ __raw_writel(d[3], base + IC_SRCSET);
+ __raw_writel(d[4], base + IC_ASSIGNSET);
+ __raw_writel(d[5], base + IC_WAKESET);
+ wmb();
+
+ __raw_writel(d[6], base + IC_MASKSET);
+ wmb();
+}
+
+static int alchemy_ic_suspend(void)
+{
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ return 0;
+}
+
+static void alchemy_ic_resume(void)
+{
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+}
+
+static int alchemy_gpic_suspend(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* save 4 interrupt mask status registers */
+ alchemy_gpic_pmdata[0] = __raw_readl(base + AU1300_GPIC_IEN + 0x0);
+ alchemy_gpic_pmdata[1] = __raw_readl(base + AU1300_GPIC_IEN + 0x4);
+ alchemy_gpic_pmdata[2] = __raw_readl(base + AU1300_GPIC_IEN + 0x8);
+ alchemy_gpic_pmdata[3] = __raw_readl(base + AU1300_GPIC_IEN + 0xc);
+
+ /* save misc register(s) */
+ alchemy_gpic_pmdata[4] = __raw_readl(base + AU1300_GPIC_DMASEL);
+
+ /* molto silenzioso */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* save pin/int-type configuration */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ alchemy_gpic_pmdata[i + 5] = __raw_readl(base + (i << 2));
+
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_gpic_resume(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* disable all first */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* restore pin/int-type configurations */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ __raw_writel(alchemy_gpic_pmdata[i + 5], base + (i << 2));
+ wmb();
+
+ /* restore misc register(s) */
+ base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ __raw_writel(alchemy_gpic_pmdata[4], base + AU1300_GPIC_DMASEL);
+ wmb();
+
+ /* finally restore masks */
+ __raw_writel(alchemy_gpic_pmdata[0], base + AU1300_GPIC_IEN + 0x0);
+ __raw_writel(alchemy_gpic_pmdata[1], base + AU1300_GPIC_IEN + 0x4);
+ __raw_writel(alchemy_gpic_pmdata[2], base + AU1300_GPIC_IEN + 0x8);
+ __raw_writel(alchemy_gpic_pmdata[3], base + AU1300_GPIC_IEN + 0xc);
+ wmb();
+}
+
+static struct syscore_ops alchemy_ic_pmops = {
+ .suspend = alchemy_ic_suspend,
+ .resume = alchemy_ic_resume,
+};
+
+static struct syscore_ops alchemy_gpic_pmops = {
+ .suspend = alchemy_gpic_suspend,
+ .resume = alchemy_gpic_resume,
+};
+
+/******************************************************************************/
+
+/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
+#define DISP(name, base, addr) \
+static void au1000_##name##_dispatch(unsigned int irq, struct irq_desc *d) \
+{ \
+ unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
+ if (likely(r)) \
+ generic_handle_irq(base + __ffs(r)); \
+ else \
+ spurious_interrupt(); \
+}
+
+DISP(ic0r0, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ0INT)
+DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
+DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
+DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
+
+static void alchemy_gpic_dispatch(unsigned int irq, struct irq_desc *d)
+{
+ int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
+ generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
+}
+
+/******************************************************************************/
+
+static void __init au1000_init_irq(struct alchemy_irqmap *map)
+{
+ unsigned int bit, irq_nr;
+ void __iomem *base;
+
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR));
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR));
+ register_syscore_ops(&alchemy_ic_pmops);
+ mips_cpu_irq_init();
+
+ /* register all 64 possible IC0+IC1 irq sources as type "none".
+ * Use set_irq_type() to set edge/level behaviour at runtime.
+ */
+ for (irq_nr = AU1000_INTC0_INT_BASE;
+ (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ for (irq_nr = AU1000_INTC1_INT_BASE;
+ (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ /*
+ * Initialize IC0, which is fixed per processor.
+ */
+ while (map->irq != -1) {
+ irq_nr = map->irq;
+
+ if (irq_nr >= AU1000_INTC1_INT_BASE) {
+ bit = irq_nr - AU1000_INTC1_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq_nr - AU1000_INTC0_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+ if (map->prio == 0)
+ __raw_writel(1 << bit, base + IC_ASSIGNSET);
+
+ au1x_ic_settype(irq_get_irq_data(irq_nr), map->type);
+ ++map;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, au1000_ic0r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, au1000_ic0r1_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, au1000_ic1r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, au1000_ic1r1_dispatch);
+}
+
+static void __init alchemy_gpic_init_irq(const struct alchemy_irqmap *dints)
+{
+ int i;
+ void __iomem *bank_base;
+
+ register_syscore_ops(&alchemy_gpic_pmops);
+ mips_cpu_irq_init();
+
+ /* disable & ack all possible interrupt sources */
+ for (i = 0; i < 4; i++) {
+ bank_base = AU1300_GPIC_ADDR + (i * 4);
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IDIS);
+ wmb();
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IPEND);
+ wmb();
+ }
+
+ /* register an irq_chip for them, with 2nd highest priority */
+ for (i = ALCHEMY_GPIC_INT_BASE; i <= ALCHEMY_GPIC_INT_LAST; i++) {
+ au1300_set_irq_priority(i, 1);
+ au1300_gpic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
+ }
+
+ /* setup known on-chip sources */
+ while ((i = dints->irq) != -1) {
+ au1300_gpic_settype(irq_get_irq_data(i), dints->type);
+ au1300_set_irq_priority(i, dints->prio);
+
+ if (dints->internal)
+ au1300_pinfunc_to_dev(i - ALCHEMY_GPIC_INT_BASE);
+
+ dints++;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, alchemy_gpic_dispatch);
+}
+
+/******************************************************************************/
+
+void __init arch_init_irq(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ au1000_init_irq(au1000_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1500:
+ au1000_init_irq(au1500_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1100:
+ au1000_init_irq(au1100_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_init_irq(au1550_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1000_init_irq(au1200_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_gpic_init_irq(au1300_irqmap);
+ break;
+ default:
+ pr_err("unknown Alchemy IRQ core\n");
+ break;
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long r = (read_c0_status() & read_c0_cause()) >> 8;
+ do_IRQ(MIPS_CPU_IRQ_BASE + __ffs(r & 0xff));
+}
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
new file mode 100644
index 00000000..95cb9113
--- /dev/null
+++ b/arch/mips/alchemy/common/platform.c
@@ -0,0 +1,391 @@
+/*
+ * Platform device support for Au1x00 SoCs.
+ *
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * (C) Copyright Embedded Alley Solutions, Inc 2005
+ * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/slab.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
+#include <prom.h>
+
+static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int old_state)
+{
+#ifdef CONFIG_SERIAL_8250
+ switch (state) {
+ case 0:
+ alchemy_uart_enable(CPHYSADDR(port->membase));
+ serial8250_do_pm(port, state, old_state);
+ break;
+ case 3: /* power off */
+ serial8250_do_pm(port, state, old_state);
+ alchemy_uart_disable(CPHYSADDR(port->membase));
+ break;
+ default:
+ serial8250_do_pm(port, state, old_state);
+ break;
+ }
+#endif
+}
+
+#define PORT(_base, _irq) \
+ { \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_AU, \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
+ .pm = alchemy_8250_pm, \
+ }
+
+static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
+ PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1200] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
+ },
+ [ALCHEMY_CPU_AU1300] = {
+ PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT),
+ PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT),
+ PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT),
+ PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT),
+ },
+};
+
+static struct platform_device au1xx0_uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_AU1X00,
+};
+
+static void __init alchemy_setup_uarts(int ctype)
+{
+ unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
+ int s = sizeof(struct plat_serial8250_port);
+ int c = alchemy_get_uarts(ctype);
+ struct plat_serial8250_port *ports;
+
+ ports = kzalloc(s * (c + 1), GFP_KERNEL);
+ if (!ports) {
+ printk(KERN_INFO "Alchemy: no memory for UART data\n");
+ return;
+ }
+ memcpy(ports, au1x00_uart_data[ctype], s * c);
+ au1xx0_uart_device.dev.platform_data = ports;
+
+ /* Fill up uartclk. */
+ for (s = 0; s < c; s++)
+ ports[s].uartclk = uartclk;
+ if (platform_device_register(&au1xx0_uart_device))
+ printk(KERN_INFO "Alchemy: failed to register UARTs\n");
+}
+
+
+/* The dmamask must be set for OHCI/EHCI to work */
+static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32);
+static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32);
+
+static unsigned long alchemy_ohci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static unsigned long alchemy_ehci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static int __init _new_usbres(struct resource **r, struct platform_device **d)
+{
+ *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL);
+ if (!*r)
+ return -ENOMEM;
+ *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!*d) {
+ kfree(*r);
+ return -ENOMEM;
+ }
+
+ (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ (*d)->num_resources = 2;
+ (*d)->resource = *r;
+
+ return 0;
+}
+
+static void __init alchemy_setup_usb(int ctype)
+{
+ struct resource *res;
+ struct platform_device *pdev;
+
+ /* setup OHCI0. Every variant has one */
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ohci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ohci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "au1xxx-ohci";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n");
+
+
+ /* setup EHCI0: Au1200/Au1300 */
+ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ehci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ehci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "au1xxx-ehci";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_ehci_dmamask;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n");
+ }
+
+ /* Au1300: OHCI1 */
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = AU1300_USB_OHCI1_PHYS_ADDR;
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = AU1300_USB_INT;
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "au1xxx-ohci";
+ pdev->id = 1;
+ pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n");
+ }
+}
+
+/* Macro to help defining the Ethernet MAC resources */
+#define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */
+#define MAC_RES(_base, _enable, _irq, _macdma) \
+ { \
+ .start = _base, \
+ .end = _base + 0xffff, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _enable, \
+ .end = _enable + 0x3, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _irq, \
+ .end = _irq, \
+ .flags = IORESOURCE_IRQ \
+ }, \
+ { \
+ .start = _macdma, \
+ .end = _macdma + 0x1ff, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1000_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC0_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR,
+ AU1500_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1100_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1550_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth0_device = {
+ .name = "au1000-eth",
+ .id = 0,
+ .num_resources = MAC_RES_COUNT,
+ .dev.platform_data = &au1xxx_eth0_platform_data,
+};
+
+static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1000_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC1_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR + 4,
+ AU1500_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1550_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth1_device = {
+ .name = "au1000-eth",
+ .id = 1,
+ .num_resources = MAC_RES_COUNT,
+ .dev.platform_data = &au1xxx_eth1_platform_data,
+};
+
+void __init au1xxx_override_eth_cfg(unsigned int port,
+ struct au1000_eth_platform_data *eth_data)
+{
+ if (!eth_data || port > 1)
+ return;
+
+ if (port == 0)
+ memcpy(&au1xxx_eth0_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+ else
+ memcpy(&au1xxx_eth1_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+}
+
+static void __init alchemy_setup_macs(int ctype)
+{
+ int ret, i;
+ unsigned char ethaddr[6];
+ struct resource *macres;
+
+ /* Handle 1st MAC */
+ if (alchemy_get_macs(ctype) < 1)
+ return;
+
+ macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
+ return;
+ }
+ memcpy(macres, au1xxx_eth0_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT);
+ au1xxx_eth0_device.resource = macres;
+
+ i = prom_get_ethernet_addr(ethaddr);
+ if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
+ memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
+
+ ret = platform_device_register(&au1xxx_eth0_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC0\n");
+
+
+ /* Handle 2nd MAC */
+ if (alchemy_get_macs(ctype) < 2)
+ return;
+
+ macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
+ return;
+ }
+ memcpy(macres, au1xxx_eth1_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT);
+ au1xxx_eth1_device.resource = macres;
+
+ ethaddr[5] += 1; /* next addr for 2nd MAC */
+ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
+ memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
+
+ /* Register second MAC if enabled in pinfunc */
+ if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) {
+ ret = platform_device_register(&au1xxx_eth1_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC1\n");
+ }
+}
+
+static int __init au1xxx_platform_init(void)
+{
+ int ctype = alchemy_get_cputype();
+
+ alchemy_setup_uarts(ctype);
+ alchemy_setup_macs(ctype);
+ alchemy_setup_usb(ctype);
+
+ return 0;
+}
+
+arch_initcall(au1xxx_platform_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
new file mode 100644
index 00000000..0c7fce2a
--- /dev/null
+++ b/arch/mips/alchemy/common/power.c
@@ -0,0 +1,135 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Au1xx0 Power Management routines.
+ *
+ * Copyright 2001, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Some of the routines are right out of init/main.c, whose
+ * copyrights apply here.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/sysctl.h>
+#include <linux/jiffies.h>
+
+#include <asm/uaccess.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * We need to save/restore a bunch of core registers that are
+ * either volatile or reset to some state across a processor sleep.
+ * If reading a register doesn't provide a proper result for a
+ * later restore, we have to provide a function for loading that
+ * register and save a copy.
+ *
+ * We only have to save/restore registers that aren't otherwise
+ * done as part of a driver pm_* function.
+ */
+static unsigned int sleep_sys_clocks[5];
+static unsigned int sleep_sys_pinfunc;
+static unsigned int sleep_static_memctlr[4][3];
+
+
+static void save_core_regs(void)
+{
+ /* Clocks and PLLs. */
+ sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
+ sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
+ sleep_sys_clocks[2] = au_readl(SYS_CLKSRC);
+ sleep_sys_clocks[3] = au_readl(SYS_CPUPLL);
+ sleep_sys_clocks[4] = au_readl(SYS_AUXPLL);
+
+ /* pin mux config */
+ sleep_sys_pinfunc = au_readl(SYS_PINFUNC);
+
+ /* Save the static memory controller configuration. */
+ sleep_static_memctlr[0][0] = au_readl(MEM_STCFG0);
+ sleep_static_memctlr[0][1] = au_readl(MEM_STTIME0);
+ sleep_static_memctlr[0][2] = au_readl(MEM_STADDR0);
+ sleep_static_memctlr[1][0] = au_readl(MEM_STCFG1);
+ sleep_static_memctlr[1][1] = au_readl(MEM_STTIME1);
+ sleep_static_memctlr[1][2] = au_readl(MEM_STADDR1);
+ sleep_static_memctlr[2][0] = au_readl(MEM_STCFG2);
+ sleep_static_memctlr[2][1] = au_readl(MEM_STTIME2);
+ sleep_static_memctlr[2][2] = au_readl(MEM_STADDR2);
+ sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
+ sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
+ sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
+}
+
+static void restore_core_regs(void)
+{
+ /* restore clock configuration. Writing CPUPLL last will
+ * stall a bit and stabilize other clocks (unless this is
+ * one of those Au1000 with a write-only PLL, where we dont
+ * have a valid value)
+ */
+ au_writel(sleep_sys_clocks[0], SYS_FREQCTRL0);
+ au_writel(sleep_sys_clocks[1], SYS_FREQCTRL1);
+ au_writel(sleep_sys_clocks[2], SYS_CLKSRC);
+ au_writel(sleep_sys_clocks[4], SYS_AUXPLL);
+ if (!au1xxx_cpu_has_pll_wo())
+ au_writel(sleep_sys_clocks[3], SYS_CPUPLL);
+ au_sync();
+
+ au_writel(sleep_sys_pinfunc, SYS_PINFUNC);
+ au_sync();
+
+ /* Restore the static memory controller configuration. */
+ au_writel(sleep_static_memctlr[0][0], MEM_STCFG0);
+ au_writel(sleep_static_memctlr[0][1], MEM_STTIME0);
+ au_writel(sleep_static_memctlr[0][2], MEM_STADDR0);
+ au_writel(sleep_static_memctlr[1][0], MEM_STCFG1);
+ au_writel(sleep_static_memctlr[1][1], MEM_STTIME1);
+ au_writel(sleep_static_memctlr[1][2], MEM_STADDR1);
+ au_writel(sleep_static_memctlr[2][0], MEM_STCFG2);
+ au_writel(sleep_static_memctlr[2][1], MEM_STTIME2);
+ au_writel(sleep_static_memctlr[2][2], MEM_STADDR2);
+ au_writel(sleep_static_memctlr[3][0], MEM_STCFG3);
+ au_writel(sleep_static_memctlr[3][1], MEM_STTIME3);
+ au_writel(sleep_static_memctlr[3][2], MEM_STADDR3);
+}
+
+void au_sleep(void)
+{
+ save_core_regs();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ alchemy_sleep_au1000();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ alchemy_sleep_au1550();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_sleep_au1300();
+ break;
+ }
+
+ restore_core_regs();
+}
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
new file mode 100644
index 00000000..53402105
--- /dev/null
+++ b/arch/mips/alchemy/common/prom.c
@@ -0,0 +1,129 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * PROM library initialisation code, supports YAMON and U-Boot.
+ *
+ * Copyright 2000-2001, 2006, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file was derived from Carsten Langgaard's
+ * arch/mips/mips-boards/xx files.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+
+int prom_argc;
+char **prom_argv;
+char **prom_envp;
+
+void __init prom_init_cmdline(void)
+{
+ int i;
+
+ for (i = 1; i < prom_argc; i++) {
+ strlcat(arcs_cmdline, prom_argv[i], COMMAND_LINE_SIZE);
+ if (i < (prom_argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+char *prom_getenv(char *envname)
+{
+ /*
+ * Return a pointer to the given environment variable.
+ * YAMON uses "name", "value" pairs, while U-Boot uses "name=value".
+ */
+
+ char **env = prom_envp;
+ int i = strlen(envname);
+ int yamon = (*env && strchr(*env, '=') == NULL);
+
+ while (*env) {
+ if (yamon) {
+ if (strcmp(envname, *env++) == 0)
+ return *env;
+ } else if (strncmp(envname, *env, i) == 0 && (*env)[i] == '=')
+ return *env + i + 1;
+ env++;
+ }
+
+ return NULL;
+}
+
+static inline unsigned char str2hexnum(unsigned char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+
+ return 0; /* foo */
+}
+
+static inline void str2eaddr(unsigned char *ea, unsigned char *str)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = str2hexnum(*str++) << 4;
+ num |= str2hexnum(*str++);
+ ea[i] = num;
+ }
+}
+
+int __init prom_get_ethernet_addr(char *ethernet_addr)
+{
+ char *ethaddr_str;
+
+ /* Check the environment variables first */
+ ethaddr_str = prom_getenv("ethaddr");
+ if (!ethaddr_str) {
+ /* Check command line */
+ ethaddr_str = strstr(arcs_cmdline, "ethaddr=");
+ if (!ethaddr_str)
+ return -1;
+
+ ethaddr_str += strlen("ethaddr=");
+ }
+
+ str2eaddr(ethernet_addr, ethaddr_str);
+
+ return 0;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
new file mode 100644
index 00000000..37ffd997
--- /dev/null
+++ b/arch/mips/alchemy/common/setup.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2000, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com
+ *
+ * Updates to 2.6, Pete Popov, Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+
+#include <au1000.h>
+
+extern void __init board_setup(void);
+extern void set_cpuspec(void);
+
+void __init plat_mem_setup(void)
+{
+ unsigned long est_freq;
+
+ /* determine core clock */
+ est_freq = au1xxx_calc_clock();
+ est_freq += 5000; /* round */
+ est_freq -= est_freq % 10000;
+ printk(KERN_INFO "(PRId %08x) @ %lu.%02lu MHz\n", read_c0_prid(),
+ est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
+
+ /* this is faster than wasting cycles trying to approximate it */
+ preset_lpj = (est_freq >> 1) / HZ;
+
+ if (au1xxx_cpu_needs_config_od())
+ /* Various early Au1xx0 errata corrected by this */
+ set_c0_config(1 << 19); /* Set Config[OD] */
+ else
+ /* Clear to obtain best system bus performance */
+ clear_c0_config(1 << 19); /* Clear Config[OD] */
+
+ board_setup(); /* board specific setup */
+
+ /* IO/MEM resources. */
+ set_io_port_base(0);
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+}
+
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI)
+/* This routine should be valid for all Au1x based boards */
+phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+{
+ unsigned long start = ALCHEMY_PCI_MEMWIN_START;
+ unsigned long end = ALCHEMY_PCI_MEMWIN_END;
+
+ /* Don't fixup 36-bit addresses */
+ if ((phys_addr >> 32) != 0)
+ return phys_addr;
+
+ /* Check for PCI memory window */
+ if (phys_addr >= start && (phys_addr + size - 1) <= end)
+ return (phys_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
+
+ /* default nop */
+ return phys_addr;
+}
+EXPORT_SYMBOL(__fixup_bigphys_addr);
+#endif
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
new file mode 100644
index 00000000..c7bcc7e5
--- /dev/null
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2002 Embedded Edge, LLC
+ * Author: dan@embeddededge.com
+ *
+ * Sleep helper for Au1xxx sleep mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .extern __flush_cache_all
+
+ .text
+ .set noreorder
+ .set noat
+ .align 5
+
+
+/* preparatory stuff */
+.macro SETUP_SLEEP
+ subu sp, PT_SIZE
+ sw $1, PT_R1(sp)
+ sw $2, PT_R2(sp)
+ sw $3, PT_R3(sp)
+ sw $4, PT_R4(sp)
+ sw $5, PT_R5(sp)
+ sw $6, PT_R6(sp)
+ sw $7, PT_R7(sp)
+ sw $16, PT_R16(sp)
+ sw $17, PT_R17(sp)
+ sw $18, PT_R18(sp)
+ sw $19, PT_R19(sp)
+ sw $20, PT_R20(sp)
+ sw $21, PT_R21(sp)
+ sw $22, PT_R22(sp)
+ sw $23, PT_R23(sp)
+ sw $26, PT_R26(sp)
+ sw $27, PT_R27(sp)
+ sw $28, PT_R28(sp)
+ sw $30, PT_R30(sp)
+ sw $31, PT_R31(sp)
+ mfc0 k0, CP0_STATUS
+ sw k0, 0x20(sp)
+ mfc0 k0, CP0_CONTEXT
+ sw k0, 0x1c(sp)
+ mfc0 k0, CP0_PAGEMASK
+ sw k0, 0x18(sp)
+ mfc0 k0, CP0_CONFIG
+ sw k0, 0x14(sp)
+
+ /* flush caches to make sure context is in memory */
+ la t1, __flush_cache_all
+ lw t0, 0(t1)
+ jalr t0
+ nop
+
+ /* Now set up the scratch registers so the boot rom will
+ * return to this point upon wakeup.
+ * sys_scratch0 : SP
+ * sys_scratch1 : RA
+ */
+ lui t3, 0xb190 /* sys_xxx */
+ sw sp, 0x0018(t3)
+ la k0, alchemy_sleep_wakeup /* resume path */
+ sw k0, 0x001c(t3)
+.endm
+
+.macro DO_SLEEP
+ /* put power supply and processor to sleep */
+ sw zero, 0x0078(t3) /* sys_slppwr */
+ sync
+ sw zero, 0x007c(t3) /* sys_sleep */
+ sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+.endm
+
+/* sleep code for Au1000/Au1100/Au1500 memory controller type */
+LEAF(alchemy_sleep_au1000)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set mips3
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x001c(a0) /* Precharge */
+ sync
+ sw zero, 0x0020(a0) /* Auto Refresh */
+ sync
+ sw zero, 0x0030(a0) /* Sleep */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1000)
+
+/* sleep code for Au1550/Au1200 memory controller type */
+LEAF(alchemy_sleep_au1550)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set mips3
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x08c0(a0) /* Precharge */
+ sync
+ sw zero, 0x08d0(a0) /* Self Refresh */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ beq t2, zero, 2b
+ nop
+
+ /* disable SDRAM clocks */
+ lui t0, 0xcfff
+ ori t0, t0, 0xffff
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1550)
+
+/* sleepcode for Au1300 memory controller type */
+LEAF(alchemy_sleep_au1300)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 2f
+ la t1, 4f
+ subu t2, t1, t0
+
+ .set mips3
+
+1: cache 0x14, 0(t0)
+ subu t2, t2, 32
+ bgez t2, 1b
+ addu t0, t0, 32
+
+ .set mips0
+
+2: lui a0, 0xb400 /* mem_xxx */
+
+ /* disable all ports in mem_sdportcfga */
+ sw zero, 0x868(a0) /* mem_sdportcfga */
+ sync
+
+ /* disable ODT */
+ li t0, 0x03010000
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sync
+
+ /* precharge */
+ li t0, 0x23000400
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* auto refresh */
+ sw zero, 0x08c8(a0) /* mem_sdautoref */
+ sync
+
+ /* block access to the DDR */
+ lw t0, 0x0848(a0) /* mem_sdconfigb */
+ li t1, (1 << 7 | 0x3F)
+ or t0, t0, t1
+ sw t0, 0x0848(a0) /* mem_sdconfigb */
+ sync
+
+ /* issue the Self Refresh command */
+ li t0, 0x10000000
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0300
+3: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ bne t2, t0, 3b
+ nop
+
+ /* disable SDRAM clocks */
+ li t0, ~(3<<28)
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t1, t0 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+4:
+
+END(alchemy_sleep_au1300)
+
+
+ /* This is where we return upon wakeup.
+ * Reload all of the registers and return.
+ */
+LEAF(alchemy_sleep_wakeup)
+ lw k0, 0x20(sp)
+ mtc0 k0, CP0_STATUS
+ lw k0, 0x1c(sp)
+ mtc0 k0, CP0_CONTEXT
+ lw k0, 0x18(sp)
+ mtc0 k0, CP0_PAGEMASK
+ lw k0, 0x14(sp)
+ mtc0 k0, CP0_CONFIG
+
+ /* We need to catch the early Alchemy SOCs with
+ * the write-only Config[OD] bit and set it back to one...
+ */
+ jal au1x00_fixup_config_od
+ nop
+ lw $1, PT_R1(sp)
+ lw $2, PT_R2(sp)
+ lw $3, PT_R3(sp)
+ lw $4, PT_R4(sp)
+ lw $5, PT_R5(sp)
+ lw $6, PT_R6(sp)
+ lw $7, PT_R7(sp)
+ lw $16, PT_R16(sp)
+ lw $17, PT_R17(sp)
+ lw $18, PT_R18(sp)
+ lw $19, PT_R19(sp)
+ lw $20, PT_R20(sp)
+ lw $21, PT_R21(sp)
+ lw $22, PT_R22(sp)
+ lw $23, PT_R23(sp)
+ lw $26, PT_R26(sp)
+ lw $27, PT_R27(sp)
+ lw $28, PT_R28(sp)
+ lw $30, PT_R30(sp)
+ lw $31, PT_R31(sp)
+ jr ra
+ addiu sp, PT_SIZE
+END(alchemy_sleep_wakeup)
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
new file mode 100644
index 00000000..a7193ae1
--- /dev/null
+++ b/arch/mips/alchemy/common/time.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2008-2009 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ * Previous incarnations were:
+ * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
+ * Copied and modified Carsten Langgaard's time.c
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
+ * databooks). Firmware/Board init code must enable the counters in the
+ * counter control register, otherwise the CP0 counter clocksource/event
+ * will be installed instead (and use of 'wait' instruction is prohibited).
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* 32kHz clock enabled and detected */
+#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
+
+static cycle_t au1x_counter1_read(struct clocksource *cs)
+{
+ return au_readl(SYS_RTCREAD);
+}
+
+static struct clocksource au1x_counter1_clocksource = {
+ .name = "alchemy-counter1",
+ .read = au1x_counter1_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .rating = 100,
+};
+
+static int au1x_rtcmatch2_set_next_event(unsigned long delta,
+ struct clock_event_device *cd)
+{
+ delta += au_readl(SYS_RTCREAD);
+ /* wait for register access */
+ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21)
+ ;
+ au_writel(delta, SYS_RTCMATCH2);
+ au_sync();
+
+ return 0;
+}
+
+static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *cd)
+{
+}
+
+static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+static struct clock_event_device au1x_rtcmatch2_clockdev = {
+ .name = "rtcmatch2",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
+ .set_mode = au1x_rtcmatch2_set_mode,
+ .cpumask = cpu_all_mask,
+};
+
+static struct irqaction au1x_rtcmatch2_irqaction = {
+ .handler = au1x_rtcmatch2_irq,
+ .flags = IRQF_TIMER,
+ .name = "timer",
+ .dev_id = &au1x_rtcmatch2_clockdev,
+};
+
+static int __init alchemy_time_init(unsigned int m2int)
+{
+ struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
+ unsigned long t;
+
+ au1x_rtcmatch2_clockdev.irq = m2int;
+
+ /* Check if firmware (YAMON, ...) has enabled 32kHz and clock
+ * has been detected. If so install the rtcmatch2 clocksource,
+ * otherwise don't bother. Note that both bits being set is by
+ * no means a definite guarantee that the counters actually work
+ * (the 32S bit seems to be stuck set to 1 once a single clock-
+ * edge is detected, hence the timeouts).
+ */
+ if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
+ goto cntr_err;
+
+ /*
+ * setup counter 1 (RTC) to tick at full speed
+ */
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */
+ au_sync();
+
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+ au_writel(0, SYS_RTCWRITE);
+ au_sync();
+
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ /* register counter1 clocksource and event device */
+ clocksource_register_hz(&au1x_counter1_clocksource, 32768);
+
+ cd->shift = 32;
+ cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(9, cd); /* ~0.28ms */
+ clockevents_register_device(cd);
+ setup_irq(m2int, &au1x_rtcmatch2_irqaction);
+
+ printk(KERN_INFO "Alchemy clocksource installed\n");
+
+ return 0;
+
+cntr_err:
+ return -1;
+}
+
+static void __init alchemy_setup_c0timer(void)
+{
+ /*
+ * MIPS kernel assigns 'au1k_wait' to 'cpu_wait' before this
+ * function is called. Because the Alchemy counters are unusable
+ * the C0 timekeeping code is installed and use of the 'wait'
+ * instruction must be prohibited, which is done most easily by
+ * assigning NULL to cpu_wait.
+ */
+ cpu_wait = NULL;
+ r4k_clockevent_init();
+ init_r4k_clocksource();
+}
+
+static int alchemy_m2inttab[] __initdata = {
+ AU1000_RTC_MATCH2_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1550_RTC_MATCH2_INT,
+ AU1200_RTC_MATCH2_INT,
+ AU1300_RTC_MATCH2_INT,
+};
+
+void __init plat_time_init(void)
+{
+ int t;
+
+ t = alchemy_get_cputype();
+ if (t == ALCHEMY_CPU_UNKNOWN)
+ alchemy_setup_c0timer();
+ else if (alchemy_time_init(alchemy_m2inttab[t]))
+ alchemy_setup_c0timer();
+}
diff --git a/arch/mips/alchemy/common/vss.c b/arch/mips/alchemy/common/vss.c
new file mode 100644
index 00000000..d23b1444
--- /dev/null
+++ b/arch/mips/alchemy/common/vss.c
@@ -0,0 +1,84 @@
+/*
+ * Au1300 media block power gating (VSS)
+ *
+ * This is a stop-gap solution until I have the clock framework integration
+ * ready. This stuff here really must be handled transparently when clocks
+ * for various media blocks are enabled/disabled.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/mach-au1x00/au1000.h>
+
+#define VSS_GATE 0x00 /* gate wait timers */
+#define VSS_CLKRST 0x04 /* clock/block control */
+#define VSS_FTR 0x08 /* footers */
+
+#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
+
+static DEFINE_SPINLOCK(au1300_vss_lock);
+
+/* enable a block as outlined in the databook */
+static inline void __enable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
+ wmb();
+
+ __raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
+ wmb();
+
+ /* enable footers in sequence */
+ __raw_writel(0x01, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x03, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x07, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x0f, base + VSS_FTR);
+ wmb();
+
+ __raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
+ wmb();
+
+ __raw_writel(2, base + VSS_CLKRST); /* deassert reset */
+ wmb();
+
+ __raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
+ wmb();
+}
+
+/* disable a block as outlined in the databook */
+static inline void __disable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
+ wmb();
+ __raw_writel(0, base + VSS_GATE); /* disable FSM */
+ wmb();
+ __raw_writel(3, base + VSS_CLKRST); /* assert reset */
+ wmb();
+ __raw_writel(1, base + VSS_CLKRST); /* disable clock */
+ wmb();
+ __raw_writel(0, base + VSS_FTR); /* disable all footers */
+ wmb();
+}
+
+void au1300_vss_block_control(int block, int enable)
+{
+ unsigned long flags;
+
+ if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
+ return;
+
+ /* only one block at a time */
+ spin_lock_irqsave(&au1300_vss_lock, flags);
+ if (enable)
+ __enable_block(block);
+ else
+ __disable_block(block);
+ spin_unlock_irqrestore(&au1300_vss_lock, flags);
+}
+EXPORT_SYMBOL_GPL(au1300_vss_block_control);