diff options
Diffstat (limited to 'ANDROID_3.4.5/arch/arm/common')
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/Kconfig | 92 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/Makefile | 19 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/dmabounce.c | 532 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/fiq_debugger.c | 1388 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/fiq_debugger_ringbuf.h | 94 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/fiq_glue.S | 111 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/fiq_glue_setup.c | 100 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/gic.c | 784 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/icst.c | 100 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/it8152.c | 354 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/locomo.c | 914 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/sa1111.c | 1459 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/scoop.c | 284 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/sharpsl_param.c | 62 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/timer-sp.c | 191 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/uengine.c | 507 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/via82c505.c | 92 | ||||
-rw-r--r-- | ANDROID_3.4.5/arch/arm/common/vic.c | 460 |
18 files changed, 7543 insertions, 0 deletions
diff --git a/ANDROID_3.4.5/arch/arm/common/Kconfig b/ANDROID_3.4.5/arch/arm/common/Kconfig new file mode 100644 index 00000000..271dd136 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/Kconfig @@ -0,0 +1,92 @@ +config ARM_GIC + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + bool + +config GIC_NON_BANKED + bool + +config ARM_VIC + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + bool + +config ARM_VIC_NR + int + default 4 if ARCH_S5PV210 + default 3 if ARCH_S5PC100 + default 2 + depends on ARM_VIC + help + The maximum number of VICs available in the system, for + power management. + +config ICST + bool + +config SA1111 + bool + select DMABOUNCE if !ARCH_PXA + +config DMABOUNCE + bool + select ZONE_DMA + +config SHARP_LOCOMO + bool + +config SHARP_PARAM + bool + +config SHARP_SCOOP + bool + +config FIQ_GLUE + bool + select FIQ + +config FIQ_DEBUGGER + bool "FIQ Mode Serial Debugger" + select FIQ + select FIQ_GLUE + default n + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. + + +config FIQ_DEBUGGER_NO_SLEEP + bool "Keep serial debugger active" + depends on FIQ_DEBUGGER + default n + help + Enables the serial debugger at boot. Passing + fiq_debugger.no_sleep on the kernel commandline will + override this config option. + +config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + bool "Don't disable wakeup IRQ when debugger is active" + depends on FIQ_DEBUGGER + default n + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + on some MSM radio builds that ignore the uart clock request in power + collapse. + +config FIQ_DEBUGGER_CONSOLE + bool "Console on FIQ Serial Debugger port" + depends on FIQ_DEBUGGER + default n + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. + +config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE + bool "Put the FIQ debugger into console mode by default" + depends on FIQ_DEBUGGER_CONSOLE + default n + help + If enabled, this puts the fiq debugger into console mode by default. + Otherwise, the fiq debugger will start out in debug mode. diff --git a/ANDROID_3.4.5/arch/arm/common/Makefile b/ANDROID_3.4.5/arch/arm/common/Makefile new file mode 100644 index 00000000..11670f9d --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the linux kernel. +# + +obj-$(CONFIG_ARM_GIC) += gic.o +obj-$(CONFIG_ARM_VIC) += vic.o +obj-$(CONFIG_ICST) += icst.o +obj-$(CONFIG_SA1111) += sa1111.o +obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o +obj-$(CONFIG_DMABOUNCE) += dmabounce.o +obj-$(CONFIG_SHARP_LOCOMO) += locomo.o +obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o +obj-$(CONFIG_SHARP_SCOOP) += scoop.o +obj-$(CONFIG_ARCH_IXP2000) += uengine.o +obj-$(CONFIG_ARCH_IXP23XX) += uengine.o +obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o +obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o +obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o +obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o diff --git a/ANDROID_3.4.5/arch/arm/common/dmabounce.c b/ANDROID_3.4.5/arch/arm/common/dmabounce.c new file mode 100644 index 00000000..595ecd29 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/dmabounce.c @@ -0,0 +1,532 @@ +/* + * arch/arm/common/dmabounce.c + * + * Special dma_{map/unmap/dma_sync}_* routines for systems that have + * limited DMA windows. These functions utilize bounce buffers to + * copy data to/from buffers located outside the DMA region. This + * only works for systems in which DMA memory is at the bottom of + * RAM, the remainder of memory is at the top and the DMA memory + * can be marked as ZONE_DMA. Anything beyond that such as discontiguous + * DMA windows will require custom implementations that reserve memory + * areas at early bootup. + * + * Original version by Brad Parker (brad@heeltoe.com) + * Re-written by Christopher Hoover <ch@murgatroid.com> + * Made generic by Deepak Saxena <dsaxena@plexity.net> + * + * Copyright (C) 2002 Hewlett Packard Company. + * Copyright (C) 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/page-flags.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/list.h> +#include <linux/scatterlist.h> + +#include <asm/cacheflush.h> + +#undef STATS + +#ifdef STATS +#define DO_STATS(X) do { X ; } while (0) +#else +#define DO_STATS(X) do { } while (0) +#endif + +/* ************************************************** */ + +struct safe_buffer { + struct list_head node; + + /* original request */ + void *ptr; + size_t size; + int direction; + + /* safe buffer info */ + struct dmabounce_pool *pool; + void *safe; + dma_addr_t safe_dma_addr; +}; + +struct dmabounce_pool { + unsigned long size; + struct dma_pool *pool; +#ifdef STATS + unsigned long allocs; +#endif +}; + +struct dmabounce_device_info { + struct device *dev; + struct list_head safe_buffers; +#ifdef STATS + unsigned long total_allocs; + unsigned long map_op_count; + unsigned long bounce_count; + int attr_res; +#endif + struct dmabounce_pool small; + struct dmabounce_pool large; + + rwlock_t lock; + + int (*needs_bounce)(struct device *, dma_addr_t, size_t); +}; + +#ifdef STATS +static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", + device_info->small.allocs, + device_info->large.allocs, + device_info->total_allocs - device_info->small.allocs - + device_info->large.allocs, + device_info->total_allocs, + device_info->map_op_count, + device_info->bounce_count); +} + +static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); +#endif + + +/* allocate a 'safe' buffer and keep track of it */ +static inline struct safe_buffer * +alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, + size_t size, enum dma_data_direction dir) +{ + struct safe_buffer *buf; + struct dmabounce_pool *pool; + struct device *dev = device_info->dev; + unsigned long flags; + + dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", + __func__, ptr, size, dir); + + if (size <= device_info->small.size) { + pool = &device_info->small; + } else if (size <= device_info->large.size) { + pool = &device_info->large; + } else { + pool = NULL; + } + + buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); + if (buf == NULL) { + dev_warn(dev, "%s: kmalloc failed\n", __func__); + return NULL; + } + + buf->ptr = ptr; + buf->size = size; + buf->direction = dir; + buf->pool = pool; + + if (pool) { + buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, + &buf->safe_dma_addr); + } else { + buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, + GFP_ATOMIC); + } + + if (buf->safe == NULL) { + dev_warn(dev, + "%s: could not alloc dma memory (size=%d)\n", + __func__, size); + kfree(buf); + return NULL; + } + +#ifdef STATS + if (pool) + pool->allocs++; + device_info->total_allocs++; +#endif + + write_lock_irqsave(&device_info->lock, flags); + list_add(&buf->node, &device_info->safe_buffers); + write_unlock_irqrestore(&device_info->lock, flags); + + return buf; +} + +/* determine if a buffer is from our "safe" pool */ +static inline struct safe_buffer * +find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) +{ + struct safe_buffer *b, *rb = NULL; + unsigned long flags; + + read_lock_irqsave(&device_info->lock, flags); + + list_for_each_entry(b, &device_info->safe_buffers, node) + if (b->safe_dma_addr == safe_dma_addr) { + rb = b; + break; + } + + read_unlock_irqrestore(&device_info->lock, flags); + return rb; +} + +static inline void +free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) +{ + unsigned long flags; + + dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); + + write_lock_irqsave(&device_info->lock, flags); + + list_del(&buf->node); + + write_unlock_irqrestore(&device_info->lock, flags); + + if (buf->pool) + dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); + else + dma_free_coherent(device_info->dev, buf->size, buf->safe, + buf->safe_dma_addr); + + kfree(buf); +} + +/* ************************************************** */ + +static struct safe_buffer *find_safe_buffer_dev(struct device *dev, + dma_addr_t dma_addr, const char *where) +{ + if (!dev || !dev->archdata.dmabounce) + return NULL; + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "Trying to %s invalid mapping\n", where); + return NULL; + } + return find_safe_buffer(dev->archdata.dmabounce, dma_addr); +} + +static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + if (!dev || !dev->archdata.dmabounce) + return 0; + + if (dev->dma_mask) { + unsigned long limit, mask = *dev->dma_mask; + + limit = (mask + 1) & ~mask; + if (limit && size > limit) { + dev_err(dev, "DMA mapping too big (requested %#x " + "mask %#Lx)\n", size, *dev->dma_mask); + return -E2BIG; + } + + /* Figure out if we need to bounce from the DMA mask. */ + if ((dma_addr | (dma_addr + size - 1)) & ~mask) + return 1; + } + + return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); +} + +static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction dir) +{ + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + struct safe_buffer *buf; + + if (device_info) + DO_STATS ( device_info->map_op_count++ ); + + buf = alloc_safe_buffer(device_info, ptr, size, dir); + if (buf == NULL) { + dev_err(dev, "%s: unable to map unsafe buffer %p!\n", + __func__, ptr); + return ~0; + } + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", + __func__, ptr, buf->safe, size); + memcpy(buf->safe, ptr, size); + } + + return buf->safe_dma_addr; +} + +static inline void unmap_single(struct device *dev, struct safe_buffer *buf, + size_t size, enum dma_data_direction dir) +{ + BUG_ON(buf->size != size); + BUG_ON(buf->direction != dir); + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + DO_STATS(dev->archdata.dmabounce->bounce_count++); + + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + void *ptr = buf->ptr; + + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", + __func__, buf->safe, ptr, size); + memcpy(ptr, buf->safe, size); + + /* + * Since we may have written to a page cache page, + * we need to ensure that the data will be coherent + * with user mappings. + */ + __cpuc_flush_dcache_area(ptr, size); + } + free_safe_buffer(dev->archdata.dmabounce, buf); +} + +/* ************************************************** */ + +/* + * see if a buffer address is in an 'unsafe' range. if it is + * allocate a 'safe' buffer and copy the unsafe buffer into it. + * substitute the safe buffer for the unsafe one. + * (basically move the buffer from an unsafe area to a safe one) + */ +dma_addr_t __dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir) +{ + dma_addr_t dma_addr; + int ret; + + dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", + __func__, page, offset, size, dir); + + dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; + + ret = needs_bounce(dev, dma_addr, size); + if (ret < 0) + return ~0; + + if (ret == 0) { + __dma_page_cpu_to_dev(page, offset, size, dir); + return dma_addr; + } + + if (PageHighMem(page)) { + dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); + return ~0; + } + + return map_single(dev, page_address(page) + offset, size, dir); +} +EXPORT_SYMBOL(__dma_map_page); + +/* + * see if a mapped address was really a "safe" buffer and if so, copy + * the data from the safe buffer back to the unsafe buffer and free up + * the safe buffer. (basically return things back to the way they + * should be) + */ +void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + struct safe_buffer *buf; + + dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", + __func__, dma_addr, size, dir); + + buf = find_safe_buffer_dev(dev, dma_addr, __func__); + if (!buf) { + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), + dma_addr & ~PAGE_MASK, size, dir); + return; + } + + unmap_single(dev, buf, size, dir); +} +EXPORT_SYMBOL(__dma_unmap_page); + +int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, + unsigned long off, size_t sz, enum dma_data_direction dir) +{ + struct safe_buffer *buf; + + dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", + __func__, addr, off, sz, dir); + + buf = find_safe_buffer_dev(dev, addr, __func__); + if (!buf) + return 1; + + BUG_ON(buf->direction != dir); + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + DO_STATS(dev->archdata.dmabounce->bounce_count++); + + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", + __func__, buf->safe + off, buf->ptr + off, sz); + memcpy(buf->ptr + off, buf->safe + off, sz); + } + return 0; +} +EXPORT_SYMBOL(dmabounce_sync_for_cpu); + +int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, + unsigned long off, size_t sz, enum dma_data_direction dir) +{ + struct safe_buffer *buf; + + dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", + __func__, addr, off, sz, dir); + + buf = find_safe_buffer_dev(dev, addr, __func__); + if (!buf) + return 1; + + BUG_ON(buf->direction != dir); + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + DO_STATS(dev->archdata.dmabounce->bounce_count++); + + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", + __func__,buf->ptr + off, buf->safe + off, sz); + memcpy(buf->safe + off, buf->ptr + off, sz); + } + return 0; +} +EXPORT_SYMBOL(dmabounce_sync_for_device); + +static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, + const char *name, unsigned long size) +{ + pool->size = size; + DO_STATS(pool->allocs = 0); + pool->pool = dma_pool_create(name, dev, size, + 0 /* byte alignment */, + 0 /* no page-crossing issues */); + + return pool->pool ? 0 : -ENOMEM; +} + +int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, + unsigned long large_buffer_size, + int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) +{ + struct dmabounce_device_info *device_info; + int ret; + + device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); + if (!device_info) { + dev_err(dev, + "Could not allocated dmabounce_device_info\n"); + return -ENOMEM; + } + + ret = dmabounce_init_pool(&device_info->small, dev, + "small_dmabounce_pool", small_buffer_size); + if (ret) { + dev_err(dev, + "dmabounce: could not allocate DMA pool for %ld byte objects\n", + small_buffer_size); + goto err_free; + } + + if (large_buffer_size) { + ret = dmabounce_init_pool(&device_info->large, dev, + "large_dmabounce_pool", + large_buffer_size); + if (ret) { + dev_err(dev, + "dmabounce: could not allocate DMA pool for %ld byte objects\n", + large_buffer_size); + goto err_destroy; + } + } + + device_info->dev = dev; + INIT_LIST_HEAD(&device_info->safe_buffers); + rwlock_init(&device_info->lock); + device_info->needs_bounce = needs_bounce_fn; + +#ifdef STATS + device_info->total_allocs = 0; + device_info->map_op_count = 0; + device_info->bounce_count = 0; + device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); +#endif + + dev->archdata.dmabounce = device_info; + + dev_info(dev, "dmabounce: registered device\n"); + + return 0; + + err_destroy: + dma_pool_destroy(device_info->small.pool); + err_free: + kfree(device_info); + return ret; +} +EXPORT_SYMBOL(dmabounce_register_dev); + +void dmabounce_unregister_dev(struct device *dev) +{ + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + + dev->archdata.dmabounce = NULL; + + if (!device_info) { + dev_warn(dev, + "Never registered with dmabounce but attempting" + "to unregister!\n"); + return; + } + + if (!list_empty(&device_info->safe_buffers)) { + dev_err(dev, + "Removing from dmabounce with pending buffers!\n"); + BUG(); + } + + if (device_info->small.pool) + dma_pool_destroy(device_info->small.pool); + if (device_info->large.pool) + dma_pool_destroy(device_info->large.pool); + +#ifdef STATS + if (device_info->attr_res == 0) + device_remove_file(dev, &dev_attr_dmabounce_stats); +#endif + + kfree(device_info); + + dev_info(dev, "dmabounce: device unregistered\n"); +} +EXPORT_SYMBOL(dmabounce_unregister_dev); + +MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); +MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); +MODULE_LICENSE("GPL"); diff --git a/ANDROID_3.4.5/arch/arm/common/fiq_debugger.c b/ANDROID_3.4.5/arch/arm/common/fiq_debugger.c new file mode 100644 index 00000000..d0686388 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/fiq_debugger.c @@ -0,0 +1,1388 @@ +/* + * arch/arm/common/fiq_debugger.c + * + * Serial Debugger Interface accessed through an FIQ interrupt. + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/console.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/kernel_stat.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/timer.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/wakelock.h> + +#include <asm/fiq_debugger.h> +#include <asm/fiq_glue.h> +#include <asm/stacktrace.h> + +#include <linux/uaccess.h> + +#include "fiq_debugger_ringbuf.h" + +#define DEBUG_MAX 64 +#define MAX_UNHANDLED_FIQ_COUNT 1000000 + +#define MAX_FIQ_DEBUGGER_PORTS 4 + +#define THREAD_INFO(sp) ((struct thread_info *) \ + ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) + +struct fiq_debugger_state { + struct fiq_glue_handler handler; + + int fiq; + int uart_irq; + int signal_irq; + int wakeup_irq; + bool wakeup_irq_no_set_wake; + struct clk *clk; + struct fiq_debugger_pdata *pdata; + struct platform_device *pdev; + + char debug_cmd[DEBUG_MAX]; + int debug_busy; + int debug_abort; + + char debug_buf[DEBUG_MAX]; + int debug_count; + + bool no_sleep; + bool debug_enable; + bool ignore_next_wakeup_irq; + struct timer_list sleep_timer; + spinlock_t sleep_timer_lock; + bool uart_enabled; + struct wake_lock debugger_wake_lock; + bool console_enable; + int current_cpu; + atomic_t unhandled_fiq_count; + bool in_fiq; + + struct work_struct work; + spinlock_t work_lock; + char work_cmd[DEBUG_MAX]; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + spinlock_t console_lock; + struct console console; + struct tty_struct *tty; + int tty_open_count; + struct fiq_debugger_ringbuf *tty_rbuf; + bool syslog_dumping; +#endif + + unsigned int last_irqs[NR_IRQS]; + unsigned int last_local_timer_irqs[NR_CPUS]; +}; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +struct tty_driver *fiq_tty_driver; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP +static bool initial_no_sleep = true; +#else +static bool initial_no_sleep; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE +static bool initial_debug_enable = true; +static bool initial_console_enable = true; +#else +static bool initial_debug_enable; +static bool initial_console_enable; +#endif + +static bool fiq_kgdb_enable; + +module_param_named(no_sleep, initial_no_sleep, bool, 0644); +module_param_named(debug_enable, initial_debug_enable, bool, 0644); +module_param_named(console_enable, initial_console_enable, bool, 0644); +module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644); + +#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {} +static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {} +#else +static inline void enable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + enable_irq(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + enable_irq_wake(state->wakeup_irq); +} +static inline void disable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + disable_irq_nosync(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + disable_irq_wake(state->wakeup_irq); +} +#endif + +static bool inline debug_have_fiq(struct fiq_debugger_state *state) +{ + return (state->fiq >= 0); +} + +static void debug_force_irq(struct fiq_debugger_state *state) +{ + unsigned int irq = state->signal_irq; + + if (WARN_ON(!debug_have_fiq(state))) + return; + if (state->pdata->force_irq) { + state->pdata->force_irq(state->pdev, irq); + } else { + struct irq_chip *chip = irq_get_chip(irq); + if (chip && chip->irq_retrigger) + chip->irq_retrigger(irq_get_irq_data(irq)); + } +} + +static void debug_uart_enable(struct fiq_debugger_state *state) +{ + if (state->clk) + clk_enable(state->clk); + if (state->pdata->uart_enable) + state->pdata->uart_enable(state->pdev); +} + +static void debug_uart_disable(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_disable) + state->pdata->uart_disable(state->pdev); + if (state->clk) + clk_disable(state->clk); +} + +static void debug_uart_flush(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_flush) + state->pdata->uart_flush(state->pdev); +} + +static void debug_putc(struct fiq_debugger_state *state, char c) +{ + state->pdata->uart_putc(state->pdev, c); +} + +static void debug_puts(struct fiq_debugger_state *state, char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + debug_putc(state, '\r'); + debug_putc(state, c); + } +} + +static void debug_prompt(struct fiq_debugger_state *state) +{ + debug_puts(state, "debug> "); +} + +int log_buf_copy(char *dest, int idx, int len); +static void dump_kernel_log(struct fiq_debugger_state *state) +{ + char buf[1024]; + int idx = 0; + int ret; + int saved_oip; + + /* setting oops_in_progress prevents log_buf_copy() + * from trying to take a spinlock which will make it + * very unhappy in some cases... + */ + saved_oip = oops_in_progress; + oops_in_progress = 1; + for (;;) { + ret = log_buf_copy(buf, idx, 1023); + if (ret <= 0) + break; + buf[ret] = 0; + debug_puts(state, buf); + idx += ret; + } + oops_in_progress = saved_oip; +} + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +static int debug_printf(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + debug_puts(state, buf); + return state->debug_abort; +} + +/* Safe outside fiq context */ +static int debug_printf_nfiq(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + debug_puts(state, buf); + debug_uart_flush(state); + local_irq_restore(irq_flags); + return state->debug_abort; +} + +static void dump_regs(struct fiq_debugger_state *state, unsigned *regs) +{ + debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs[0], regs[1], regs[2], regs[3]); + debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs[4], regs[5], regs[6], regs[7]); + debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs[8], regs[9], regs[10], regs[11], + mode_name(regs[16])); + if ((regs[16] & MODE_MASK) == USR_MODE) + debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " + "cpsr %08x\n", regs[12], regs[13], regs[14], + regs[15], regs[16]); + else + debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " + "cpsr %08x spsr %08x\n", regs[12], regs[13], + regs[14], regs[15], regs[16], regs[17]); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs) +{ + struct mode_regs mode_regs; + dump_regs(state, regs); + get_mode_regs(&mode_regs); + debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + debug_printf(state, " und: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x " + "r12 %08x\n", + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +static void dump_irqs(struct fiq_debugger_state *state) +{ + int n; + + debug_printf(state, "irqnr total since-last status name\n"); + for (n = 0; n < NR_IRQS; n++) { + struct irqaction *act = irq_desc[n].action; + if (!act && !kstat_irqs(n)) + continue; + debug_printf(state, "%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - state->last_irqs[n], + irq_desc[n].status_use_accessors, + (act && act->name) ? act->name : "???"); + state->last_irqs[n] = kstat_irqs(n); + } +} + +struct stacktrace_state { + struct fiq_debugger_state *state; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + debug_printf(sts->state, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + sts->depth--; + return 0; + } + debug_printf(sts->state, " ...\n"); + + return sts->depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct fiq_debugger_state *state, + struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + debug_printf(state, " invalid frame pointer %p\n", tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + debug_printf(state, + " failed to copy frame pointer %p\n", tail); + return NULL; + } + + debug_printf(state, " %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void dump_stacktrace(struct fiq_debugger_state *state, + struct pt_regs * const regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.state = state; + *current_thread_info() = *real_thread_info; + + if (!current) + debug_printf(state, "current NULL\n"); + else + debug_printf(state, "pid: %d comm: %s\n", + current->pid, current->comm); + dump_regs(state, (unsigned *)regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + debug_printf(state, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &sts); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(state, tail); +} + +static void do_ps(struct fiq_debugger_state *state) +{ + struct task_struct *g; + struct task_struct *p; + unsigned task_state; + static const char stat_nam[] = "RSDTtZX"; + + debug_printf(state, "pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_state = p->state ? __ffs(p->state) + 1 : 0; + debug_printf(state, + "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + debug_printf(state, "%-13.13s %c", p->comm, + task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); + if (task_state == TASK_RUNNING) + debug_printf(state, " running\n"); + else + debug_printf(state, " %08lx\n", thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +static void begin_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = true; +} + +static void end_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = false; +} +#else +extern int do_syslog(int type, char __user *bug, int count); +static void begin_syslog_dump(struct fiq_debugger_state *state) +{ + do_syslog(5 /* clear */, NULL, 0); +} + +static void end_syslog_dump(struct fiq_debugger_state *state) +{ + char buf[128]; + int ret; + int idx = 0; + + while (1) { + ret = log_buf_copy(buf, idx, sizeof(buf) - 1); + if (ret <= 0) + break; + buf[ret] = 0; + debug_printf(state, "%s", buf); + idx += ret; + } +} +#endif + +static void do_sysrq(struct fiq_debugger_state *state, char rq) +{ + if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) { + debug_printf(state, "sysrq-g blocked\n"); + return; + } + begin_syslog_dump(state); + handle_sysrq(rq); + end_syslog_dump(state); +} + +#ifdef CONFIG_KGDB +static void do_kgdb(struct fiq_debugger_state *state) +{ + if (!fiq_kgdb_enable) { + debug_printf(state, "kgdb through fiq debugger not enabled\n"); + return; + } + + debug_printf(state, "enabling console and triggering kgdb\n"); + state->console_enable = true; + handle_sysrq('g'); +} +#endif + +static void debug_schedule_work(struct fiq_debugger_state *state, char *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&state->work_lock, flags); + if (state->work_cmd[0] != '\0') { + debug_printf(state, "work command processor busy\n"); + spin_unlock_irqrestore(&state->work_lock, flags); + return; + } + + strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd)); + spin_unlock_irqrestore(&state->work_lock, flags); + + schedule_work(&state->work); +} + +static void debug_work(struct work_struct *work) +{ + struct fiq_debugger_state *state; + char work_cmd[DEBUG_MAX]; + char *cmd; + unsigned long flags; + + state = container_of(work, struct fiq_debugger_state, work); + + spin_lock_irqsave(&state->work_lock, flags); + + strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd)); + state->work_cmd[0] = '\0'; + + spin_unlock_irqrestore(&state->work_lock, flags); + + cmd = work_cmd; + if (!strncmp(cmd, "reboot", 6)) { + cmd += 6; + while (*cmd == ' ') + cmd++; + if (cmd != '\0') + kernel_restart(cmd); + else + kernel_restart(NULL); + } else { + debug_printf(state, "unknown work command '%s'\n", work_cmd); + } +} + +/* This function CANNOT be called in FIQ context */ +static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd) +{ + if (!strcmp(cmd, "ps")) + do_ps(state); + if (!strcmp(cmd, "sysrq")) + do_sysrq(state, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + do_sysrq(state, cmd[6]); +#ifdef CONFIG_KGDB + if (!strcmp(cmd, "kgdb")) + do_kgdb(state); +#endif + if (!strncmp(cmd, "reboot", 6)) + debug_schedule_work(state, cmd); +} + +static void debug_help(struct fiq_debugger_state *state) +{ + debug_printf(state, "FIQ Debugger commands:\n" + " pc PC status\n" + " regs Register dump\n" + " allregs Extended Register dump\n" + " bt Stack trace\n" + " reboot [<c>] Reboot with command <c>\n" + " reset [<c>] Hard reset with command <c>\n" + " irqs Interupt status\n" + " kmsg Kernel log\n" + " version Kernel version\n"); + debug_printf(state, " sleep Allow sleep while in FIQ\n" + " nosleep Disable sleep while in FIQ\n" + " console Switch terminal to console\n" + " cpu Current CPU\n" + " cpu <number> Switch to CPU<number>\n"); + debug_printf(state, " ps Process list\n" + " sysrq sysrq options\n" + " sysrq <param> Execute sysrq with <param>\n"); +#ifdef CONFIG_KGDB + debug_printf(state, " kgdb Enter kernel debugger\n"); +#endif +} + +static void take_affinity(void *info) +{ + struct fiq_debugger_state *state = info; + struct cpumask cpumask; + + cpumask_clear(&cpumask); + cpumask_set_cpu(get_cpu(), &cpumask); + + irq_set_affinity(state->uart_irq, &cpumask); +} + +static void switch_cpu(struct fiq_debugger_state *state, int cpu) +{ + if (!debug_have_fiq(state)) + smp_call_function_single(cpu, take_affinity, state, false); + state->current_cpu = cpu; +} + +static bool debug_fiq_exec(struct fiq_debugger_state *state, + const char *cmd, unsigned *regs, void *svc_sp) +{ + bool signal_helper = false; + + if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { + debug_help(state); + } else if (!strcmp(cmd, "pc")) { + debug_printf(state, " pc %08x cpsr %08x mode %s\n", + regs[15], regs[16], mode_name(regs[16])); + } else if (!strcmp(cmd, "regs")) { + dump_regs(state, regs); + } else if (!strcmp(cmd, "allregs")) { + dump_allregs(state, regs); + } else if (!strcmp(cmd, "bt")) { + dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp); + } else if (!strncmp(cmd, "reset", 5)) { + cmd += 5; + while (*cmd == ' ') + cmd++; + if (*cmd) { + char tmp_cmd[32]; + strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd)); + machine_restart(tmp_cmd); + } else { + machine_restart(NULL); + } + } else if (!strcmp(cmd, "irqs")) { + dump_irqs(state); + } else if (!strcmp(cmd, "kmsg")) { + dump_kernel_log(state); + } else if (!strcmp(cmd, "version")) { + debug_printf(state, "%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + state->no_sleep = false; + debug_printf(state, "enabling sleep\n"); + } else if (!strcmp(cmd, "nosleep")) { + state->no_sleep = true; + debug_printf(state, "disabling sleep\n"); + } else if (!strcmp(cmd, "console")) { + debug_printf(state, "console mode\n"); + debug_uart_flush(state); + state->console_enable = true; + } else if (!strcmp(cmd, "cpu")) { + debug_printf(state, "cpu %d\n", state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4)) { + unsigned long cpu = 0; + if (strict_strtoul(cmd + 4, 10, &cpu) == 0) + switch_cpu(state, cpu); + else + debug_printf(state, "invalid cpu\n"); + debug_printf(state, "cpu %d\n", state->current_cpu); + } else { + if (state->debug_busy) { + debug_printf(state, + "command processor busy. trying to abort.\n"); + state->debug_abort = -1; + } else { + strcpy(state->debug_cmd, cmd); + state->debug_busy = 1; + } + + return true; + } + if (!state->console_enable) + debug_prompt(state); + + return signal_helper; +} + +static void sleep_timer_expired(unsigned long data) +{ + struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->uart_enabled && !state->no_sleep) { + if (state->debug_enable && !state->console_enable) { + state->debug_enable = false; + debug_printf_nfiq(state, "suspending fiq debugger\n"); + } + state->ignore_next_wakeup_irq = true; + debug_uart_disable(state); + state->uart_enabled = false; + enable_wakeup_irq(state); + } + wake_unlock(&state->debugger_wake_lock); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static void handle_wakeup(struct fiq_debugger_state *state) +{ + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { + state->ignore_next_wakeup_irq = false; + } else if (!state->uart_enabled) { + wake_lock(&state->debugger_wake_lock); + debug_uart_enable(state); + state->uart_enabled = true; + disable_wakeup_irq(state); + mod_timer(&state->sleep_timer, jiffies + HZ / 2); + } + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static irqreturn_t wakeup_irq_handler(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (!state->no_sleep) + debug_puts(state, "WAKEUP\n"); + handle_wakeup(state); + + return IRQ_HANDLED; +} + + +static void debug_handle_irq_context(struct fiq_debugger_state *state) +{ + if (!state->no_sleep) { + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + wake_lock(&state->debugger_wake_lock); + mod_timer(&state->sleep_timer, jiffies + HZ * 5); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); + } +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + if (state->tty) { + int i; + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + for (i = 0; i < count; i++) { + int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + tty_insert_flip_char(state->tty, c, TTY_NORMAL); + if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) + pr_warn("fiq tty failed to consume byte\n"); + } + tty_flip_buffer_push(state->tty); + } +#endif + if (state->debug_busy) { + debug_irq_exec(state, state->debug_cmd); + if (!state->console_enable) + debug_prompt(state); + state->debug_busy = 0; + } +} + +static int debug_getc(struct fiq_debugger_state *state) +{ + return state->pdata->uart_getc(state->pdev); +} + +static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state, + int this_cpu, void *regs, void *svc_sp) +{ + int c; + static int last_c; + int count = 0; + bool signal_helper = false; + + if (this_cpu != state->current_cpu) { + if (state->in_fiq) + return false; + + if (atomic_inc_return(&state->unhandled_fiq_count) != + MAX_UNHANDLED_FIQ_COUNT) + return false; + + debug_printf(state, "fiq_debugger: cpu %d not responding, " + "reverting to cpu %d\n", state->current_cpu, + this_cpu); + + atomic_set(&state->unhandled_fiq_count, 0); + switch_cpu(state, this_cpu); + return false; + } + + state->in_fiq = true; + + while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { + count++; + if (!state->debug_enable) { + if ((c == 13) || (c == 10)) { + state->debug_enable = true; + state->debug_count = 0; + debug_prompt(state); + } + } else if (c == FIQ_DEBUGGER_BREAK) { + state->console_enable = false; + debug_puts(state, "fiq debugger mode\n"); + state->debug_count = 0; + debug_prompt(state); +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + } else if (state->console_enable && state->tty_rbuf) { + fiq_debugger_ringbuf_push(state->tty_rbuf, c); + signal_helper = true; +#endif + } else if ((c >= ' ') && (c < 127)) { + if (state->debug_count < (DEBUG_MAX - 1)) { + state->debug_buf[state->debug_count++] = c; + debug_putc(state, c); + } + } else if ((c == 8) || (c == 127)) { + if (state->debug_count > 0) { + state->debug_count--; + debug_putc(state, 8); + debug_putc(state, ' '); + debug_putc(state, 8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + debug_putc(state, '\r'); + debug_putc(state, '\n'); + } + if (state->debug_count) { + state->debug_buf[state->debug_count] = 0; + state->debug_count = 0; + signal_helper |= + debug_fiq_exec(state, state->debug_buf, + regs, svc_sp); + } else { + debug_prompt(state); + } + } + last_c = c; + } + if (!state->console_enable) + debug_uart_flush(state); + if (state->pdata->fiq_ack) + state->pdata->fiq_ack(state->pdev, state->fiq); + + /* poke sleep timer if necessary */ + if (state->debug_enable && !state->no_sleep) + signal_helper = true; + + atomic_set(&state->unhandled_fiq_count, 0); + state->in_fiq = false; + + return signal_helper; +} + +static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; + bool need_irq; + + need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp); + if (need_irq) + debug_force_irq(state); +} + +/* + * When not using FIQs, we only use this single interrupt as an entry point. + * This just effectively takes over the UART interrupt and does all the work + * in this context. + */ +static irqreturn_t debug_uart_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + bool not_done; + + handle_wakeup(state); + + /* handle the debugger irq in regular context */ + not_done = debug_handle_uart_interrupt(state, smp_processor_id(), + get_irq_regs(), + current_thread_info()); + if (not_done) + debug_handle_irq_context(state); + + return IRQ_HANDLED; +} + +/* + * If FIQs are used, not everything can happen in fiq context. + * FIQ handler does what it can and then signals this interrupt to finish the + * job in irq context. + */ +static irqreturn_t debug_signal_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (state->pdata->force_irq_ack) + state->pdata->force_irq_ack(state->pdev, state->signal_irq); + + debug_handle_irq_context(state); + + return IRQ_HANDLED; +} + +static void debug_resume(struct fiq_glue_handler *h) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + if (state->pdata->uart_resume) + state->pdata->uart_resume(state->pdev); +} + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) +struct tty_driver *debug_console_device(struct console *co, int *index) +{ + *index = co->index; + return fiq_tty_driver; +} + +static void debug_console_write(struct console *co, + const char *s, unsigned int count) +{ + struct fiq_debugger_state *state; + unsigned long flags; + + state = container_of(co, struct fiq_debugger_state, console); + + if (!state->console_enable && !state->syslog_dumping) + return; + + debug_uart_enable(state); + spin_lock_irqsave(&state->console_lock, flags); + while (count--) { + if (*s == '\n') + debug_putc(state, '\r'); + debug_putc(state, *s++); + } + debug_uart_flush(state); + spin_unlock_irqrestore(&state->console_lock, flags); + debug_uart_disable(state); +} + +static struct console fiq_debugger_console = { + .name = "ttyFIQ", + .device = debug_console_device, + .write = debug_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; + +int fiq_tty_open(struct tty_struct *tty, struct file *filp) +{ + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + if (state->tty_open_count++) + return 0; + + tty->driver_data = state; + state->tty = tty; + return 0; +} + +void fiq_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct fiq_debugger_state *state = tty->driver_data; + if (--state->tty_open_count) + return; + state->tty = NULL; +} + +int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + struct fiq_debugger_state *state = tty->driver_data; + + if (!state->console_enable) + return count; + + debug_uart_enable(state); + spin_lock_irq(&state->console_lock); + for (i = 0; i < count; i++) + debug_putc(state, *buf++); + spin_unlock_irq(&state->console_lock); + debug_uart_disable(state); + + return count; +} + +int fiq_tty_write_room(struct tty_struct *tty) +{ + return 16; +} + +#ifdef CONFIG_CONSOLE_POLL +static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options) +{ + return 0; +} + +static int fiq_tty_poll_get_char(struct tty_driver *driver, int line) +{ + struct fiq_debugger_state *state = driver->ttys[line]->driver_data; + int c = NO_POLL_CHAR; + + debug_uart_enable(state); + if (debug_have_fiq(state)) { + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + if (count > 0) { + c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + fiq_debugger_ringbuf_consume(state->tty_rbuf, 1); + } + } else { + c = debug_getc(state); + if (c == FIQ_DEBUGGER_NO_CHAR) + c = NO_POLL_CHAR; + } + debug_uart_disable(state); + + return c; +} + +static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct fiq_debugger_state *state = driver->ttys[line]->driver_data; + debug_uart_enable(state); + debug_putc(state, ch); + debug_uart_disable(state); +} +#endif + +static const struct tty_operations fiq_tty_driver_ops = { + .write = fiq_tty_write, + .write_room = fiq_tty_write_room, + .open = fiq_tty_open, + .close = fiq_tty_close, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = fiq_tty_poll_init, + .poll_get_char = fiq_tty_poll_get_char, + .poll_put_char = fiq_tty_poll_put_char, +#endif +}; + +static int fiq_debugger_tty_init(void) +{ + int ret; + struct fiq_debugger_state **states = NULL; + + states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL); + if (!states) { + pr_err("Failed to allocate fiq debugger state structres\n"); + return -ENOMEM; + } + + fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS); + if (!fiq_tty_driver) { + pr_err("Failed to allocate fiq debugger tty\n"); + ret = -ENOMEM; + goto err_free_state; + } + + fiq_tty_driver->owner = THIS_MODULE; + fiq_tty_driver->driver_name = "fiq-debugger"; + fiq_tty_driver->name = "ttyFIQ"; + fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL; + fiq_tty_driver->init_termios = tty_std_termios; + fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + fiq_tty_driver->driver_state = states; + + fiq_tty_driver->init_termios.c_cflag = + B115200 | CS8 | CREAD | HUPCL | CLOCAL; + fiq_tty_driver->init_termios.c_ispeed = 115200; + fiq_tty_driver->init_termios.c_ospeed = 115200; + + tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops); + + ret = tty_register_driver(fiq_tty_driver); + if (ret) { + pr_err("Failed to register fiq tty: %d\n", ret); + goto err_free_tty; + } + + pr_info("Registered FIQ tty driver\n"); + return 0; + +err_free_tty: + put_tty_driver(fiq_tty_driver); + fiq_tty_driver = NULL; +err_free_state: + kfree(states); + return ret; +} + +static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state) +{ + int ret; + struct device *tty_dev; + struct fiq_debugger_state **states = fiq_tty_driver->driver_state; + + states[state->pdev->id] = state; + + state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); + if (!state->tty_rbuf) { + pr_err("Failed to allocate fiq debugger ringbuf\n"); + ret = -ENOMEM; + goto err; + } + + tty_dev = tty_register_device(fiq_tty_driver, state->pdev->id, + &state->pdev->dev); + if (IS_ERR(tty_dev)) { + pr_err("Failed to register fiq debugger tty device\n"); + ret = PTR_ERR(tty_dev); + goto err; + } + + device_set_wakeup_capable(tty_dev, 1); + + pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id); + + return 0; + +err: + fiq_debugger_ringbuf_free(state->tty_rbuf); + state->tty_rbuf = NULL; + return ret; +} +#endif + +static int fiq_debugger_dev_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_suspend) + return state->pdata->uart_dev_suspend(pdev); + return 0; +} + +static int fiq_debugger_dev_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_resume) + return state->pdata->uart_dev_resume(pdev); + return 0; +} + +static int fiq_debugger_probe(struct platform_device *pdev) +{ + int ret; + struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fiq_debugger_state *state; + int fiq; + int uart_irq; + + if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS) + return -EINVAL; + + if (!pdata->uart_getc || !pdata->uart_putc) + return -EINVAL; + if ((pdata->uart_enable && !pdata->uart_disable) || + (!pdata->uart_enable && pdata->uart_disable)) + return -EINVAL; + + fiq = platform_get_irq_byname(pdev, "fiq"); + uart_irq = platform_get_irq_byname(pdev, "uart_irq"); + + /* uart_irq mode and fiq mode are mutually exclusive, but one of them + * is required */ + if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) + return -EINVAL; + if (fiq >= 0 && !pdata->fiq_enable) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + setup_timer(&state->sleep_timer, sleep_timer_expired, + (unsigned long)state); + state->pdata = pdata; + state->pdev = pdev; + state->no_sleep = initial_no_sleep; + state->debug_enable = initial_debug_enable; + state->console_enable = initial_console_enable; + + state->fiq = fiq; + state->uart_irq = uart_irq; + state->signal_irq = platform_get_irq_byname(pdev, "signal"); + state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); + + INIT_WORK(&state->work, debug_work); + spin_lock_init(&state->work_lock); + + platform_set_drvdata(pdev, state); + + spin_lock_init(&state->sleep_timer_lock); + + if (state->wakeup_irq < 0 && debug_have_fiq(state)) + state->no_sleep = true; + state->ignore_next_wakeup_irq = !state->no_sleep; + + wake_lock_init(&state->debugger_wake_lock, + WAKE_LOCK_SUSPEND, "serial-debug"); + + state->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(state->clk)) + state->clk = NULL; + + /* do not call pdata->uart_enable here since uart_init may still + * need to do some initialization before uart_enable can work. + * So, only try to manage the clock during init. + */ + if (state->clk) + clk_enable(state->clk); + + if (pdata->uart_init) { + ret = pdata->uart_init(pdev); + if (ret) + goto err_uart_init; + } + + debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n", + state->no_sleep ? "" : "twice "); + + if (debug_have_fiq(state)) { + state->handler.fiq = debug_fiq; + state->handler.resume = debug_resume; + ret = fiq_glue_register_handler(&state->handler); + if (ret) { + pr_err("%s: could not install fiq handler\n", __func__); + goto err_register_fiq; + } + + pdata->fiq_enable(pdev, state->fiq, 1); + } else { + ret = request_irq(state->uart_irq, debug_uart_irq, + IRQF_NO_SUSPEND, "debug", state); + if (ret) { + pr_err("%s: could not install irq handler\n", __func__); + goto err_register_irq; + } + + /* for irq-only mode, we want this irq to wake us up, if it + * can. + */ + enable_irq_wake(state->uart_irq); + } + + if (state->clk) + clk_disable(state->clk); + + if (state->signal_irq >= 0) { + ret = request_irq(state->signal_irq, debug_signal_irq, + IRQF_TRIGGER_RISING, "debug-signal", state); + if (ret) + pr_err("serial_debugger: could not install signal_irq"); + } + + if (state->wakeup_irq >= 0) { + ret = request_irq(state->wakeup_irq, wakeup_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_DISABLED, + "debug-wakeup", state); + if (ret) { + pr_err("serial_debugger: " + "could not install wakeup irq\n"); + state->wakeup_irq = -1; + } else { + ret = enable_irq_wake(state->wakeup_irq); + if (ret) { + pr_err("serial_debugger: " + "could not enable wakeup\n"); + state->wakeup_irq_no_set_wake = true; + } + } + } + if (state->no_sleep) + handle_wakeup(state); + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + spin_lock_init(&state->console_lock); + state->console = fiq_debugger_console; + state->console.index = pdev->id; + if (!console_set_on_cmdline) + add_preferred_console(state->console.name, + state->console.index, NULL); + register_console(&state->console); + fiq_debugger_tty_init_one(state); +#endif + return 0; + +err_register_irq: +err_register_fiq: + if (pdata->uart_free) + pdata->uart_free(pdev); +err_uart_init: + if (state->clk) + clk_disable(state->clk); + if (state->clk) + clk_put(state->clk); + wake_lock_destroy(&state->debugger_wake_lock); + platform_set_drvdata(pdev, NULL); + kfree(state); + return ret; +} + +static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { + .suspend = fiq_debugger_dev_suspend, + .resume = fiq_debugger_dev_resume, +}; + +static struct platform_driver fiq_debugger_driver = { + .probe = fiq_debugger_probe, + .driver = { + .name = "fiq_debugger", + .pm = &fiq_debugger_dev_pm_ops, + }, +}; + +static int __init fiq_debugger_init(void) +{ + fiq_debugger_tty_init(); + return platform_driver_register(&fiq_debugger_driver); +} + +postcore_initcall(fiq_debugger_init); diff --git a/ANDROID_3.4.5/arch/arm/common/fiq_debugger_ringbuf.h b/ANDROID_3.4.5/arch/arm/common/fiq_debugger_ringbuf.h new file mode 100644 index 00000000..2649b558 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/fiq_debugger_ringbuf.h @@ -0,0 +1,94 @@ +/* + * arch/arm/common/fiq_debugger_ringbuf.c + * + * simple lockless ringbuffer + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +struct fiq_debugger_ringbuf { + int len; + int head; + int tail; + u8 buf[]; +}; + + +static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) +{ + struct fiq_debugger_ringbuf *rbuf; + + rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); + if (rbuf == NULL) + return NULL; + + rbuf->len = len; + rbuf->head = 0; + rbuf->tail = 0; + smp_mb(); + + return rbuf; +} + +static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) +{ + kfree(rbuf); +} + +static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) +{ + int level = rbuf->head - rbuf->tail; + + if (level < 0) + level = rbuf->len + level; + + return level; +} + +static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) +{ + return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; +} + +static inline u8 +fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) +{ + return rbuf->buf[(rbuf->tail + i) % rbuf->len]; +} + +static inline int +fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) +{ + count = min(count, fiq_debugger_ringbuf_level(rbuf)); + + rbuf->tail = (rbuf->tail + count) % rbuf->len; + smp_mb(); + + return count; +} + +static inline int +fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) +{ + if (fiq_debugger_ringbuf_room(rbuf) == 0) + return 0; + + rbuf->buf[rbuf->head] = datum; + smp_mb(); + rbuf->head = (rbuf->head + 1) % rbuf->len; + smp_mb(); + + return 1; +} diff --git a/ANDROID_3.4.5/arch/arm/common/fiq_glue.S b/ANDROID_3.4.5/arch/arm/common/fiq_glue.S new file mode 100644 index 00000000..9e3455a0 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/fiq_glue.S @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + + .text + + .global fiq_glue_end + + /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ + +ENTRY(fiq_glue) + /* store pc, cpsr from previous mode */ + mrs r12, spsr + sub r11, lr, #4 + subs r10, #1 + bne nested_fiq + + stmfd sp!, {r11-r12, lr} + + /* store r8-r14 from previous mode */ + sub sp, sp, #(7 * 4) + stmia sp, {r8-r14}^ + nop + + /* store r0-r7 from previous mode */ + stmfd sp!, {r0-r7} + + /* setup func(data,regs) arguments */ + mov r0, r9 + mov r1, sp + mov r3, r8 + + mov r7, sp + + /* Get sp and lr from non-user modes */ + and r4, r12, #MODE_MASK + cmp r4, #USR_MODE + beq fiq_from_usr_mode + + mov r7, sp + orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r4 + str sp, [r7, #(4 * 13)] + str lr, [r7, #(4 * 14)] + mrs r5, spsr + str r5, [r7, #(4 * 17)] + + cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + /* use fiq stack if we reenter this mode */ + subne sp, r7, #(4 * 3) + +fiq_from_usr_mode: + msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + mov r2, sp + sub sp, r7, #12 + stmfd sp!, {r2, ip, lr} + /* call func(data,regs) */ + blx r3 + ldmfd sp, {r2, ip, lr} + mov sp, r2 + + /* restore/discard saved state */ + cmp r4, #USR_MODE + beq fiq_from_usr_mode_exit + + msr cpsr_c, r4 + ldr sp, [r7, #(4 * 13)] + ldr lr, [r7, #(4 * 14)] + msr spsr_cxsf, r5 + +fiq_from_usr_mode_exit: + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + + ldmfd sp!, {r0-r7} + add sp, sp, #(7 * 4) + ldmfd sp!, {r11-r12, lr} +exit_fiq: + msr spsr_cxsf, r12 + add r10, #1 + movs pc, r11 + +nested_fiq: + orr r12, r12, #(PSR_F_BIT) + b exit_fiq + +fiq_glue_end: + +ENTRY(fiq_glue_setup) /* func, data, sp */ + mrs r3, cpsr + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + movs r8, r0 + mov r9, r1 + mov sp, r2 + moveq r10, #0 + movne r10, #1 + msr cpsr_c, r3 + bx lr + diff --git a/ANDROID_3.4.5/arch/arm/common/fiq_glue_setup.c b/ANDROID_3.4.5/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 00000000..4044c7db --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/fiq_glue_setup.c @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <asm/fiq.h> +#include <asm/fiq_glue.h> + +extern unsigned char fiq_glue, fiq_glue_end; +extern void fiq_glue_setup(void *func, void *data, void *sp); + +static struct fiq_handler fiq_debbuger_fiq_handler = { + .name = "fiq_glue", +}; +DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *current_handler; +static DEFINE_MUTEX(fiq_glue_lock); + +static void fiq_glue_setup_helper(void *info) +{ + struct fiq_glue_handler *handler = info; + fiq_glue_setup(handler->fiq, handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP); +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + int cpu; + + if (!handler || !handler->fiq) + return -EINVAL; + + mutex_lock(&fiq_glue_lock); + if (fiq_stack) { + ret = -EBUSY; + goto err_busy; + } + + for_each_possible_cpu(cpu) { + void *stack; + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + } + + ret = claim_fiq(&fiq_debbuger_fiq_handler); + if (WARN_ON(ret)) + goto err_claim_fiq; + + current_handler = handler; + on_each_cpu(fiq_glue_setup_helper, handler, true); + set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_claim_fiq: +err_alloc_fiq_stack: + for_each_possible_cpu(cpu) { + __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); + per_cpu(fiq_stack, cpu) = NULL; + } +err_busy: + mutex_unlock(&fiq_glue_lock); + return ret; +} + +/** + * fiq_glue_resume - Restore fiqs after suspend or low power idle states + * + * This must be called before calling local_fiq_enable after returning from a + * power state where the fiq mode registers were lost. If a driver provided + * a resume hook when it registered the handler it will be called. + */ + +void fiq_glue_resume(void) +{ + if (!current_handler) + return; + fiq_glue_setup(current_handler->fiq, current_handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP); + if (current_handler->resume) + current_handler->resume(current_handler); +} + diff --git a/ANDROID_3.4.5/arch/arm/common/gic.c b/ANDROID_3.4.5/arch/arm/common/gic.c new file mode 100644 index 00000000..aa526998 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/gic.c @@ -0,0 +1,784 @@ +/* + * linux/arch/arm/common/gic.c + * + * Copyright (C) 2002 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Interrupt architecture for the GIC: + * + * o There is one Interrupt Distributor, which receives interrupts + * from system devices and sends them to the Interrupt Controllers. + * + * o There is one CPU Interface per CPU, which sends interrupts sent + * by the Distributor, and interrupts generated locally, to the + * associated CPU. The base address of the CPU interface is usually + * aliased so that the same address points to different chips depending + * on the CPU it is accessed from. + * + * Note that IRQs 0-31 are special - they are local to each CPU. + * As such, the enable set/clear, pending set/clear and active bit + * registers are banked per-cpu for these sources. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/list.h> +#include <linux/smp.h> +#include <linux/cpu_pm.h> +#include <linux/cpumask.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/slab.h> + +#include <asm/irq.h> +#include <asm/exception.h> +#include <asm/smp_plat.h> +#include <asm/mach/irq.h> +#include <asm/hardware/gic.h> + +union gic_base { + void __iomem *common_base; + void __percpu __iomem **percpu_base; +}; + +struct gic_chip_data { + union gic_base dist_base; + union gic_base cpu_base; +#ifdef CONFIG_CPU_PM + u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; + u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; + u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; + u32 __percpu *saved_ppi_enable; + u32 __percpu *saved_ppi_conf; +#endif + struct irq_domain *domain; + unsigned int gic_irqs; +#ifdef CONFIG_GIC_NON_BANKED + void __iomem *(*get_base)(union gic_base *); +#endif +}; + +static DEFINE_RAW_SPINLOCK(irq_controller_lock); + +/* + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +struct irq_chip gic_arch_extn = { + .irq_eoi = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, + .irq_retrigger = NULL, + .irq_set_type = NULL, + .irq_set_wake = NULL, +}; + +#ifndef MAX_GIC_NR +#define MAX_GIC_NR 1 +#endif + +static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; + +#ifdef CONFIG_GIC_NON_BANKED +static void __iomem *gic_get_percpu_base(union gic_base *base) +{ + return *__this_cpu_ptr(base->percpu_base); +} + +static void __iomem *gic_get_common_base(union gic_base *base) +{ + return base->common_base; +} + +static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) +{ + return data->get_base(&data->dist_base); +} + +static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) +{ + return data->get_base(&data->cpu_base); +} + +static inline void gic_set_base_accessor(struct gic_chip_data *data, + void __iomem *(*f)(union gic_base *)) +{ + data->get_base = f; +} +#else +#define gic_data_dist_base(d) ((d)->dist_base.common_base) +#define gic_data_cpu_base(d) ((d)->cpu_base.common_base) +#define gic_set_base_accessor(d,f) +#endif + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); + return gic_data_dist_base(gic_data); +} + +static inline void __iomem *gic_cpu_base(struct irq_data *d) +{ + struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); + return gic_data_cpu_base(gic_data); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +/* + * Routines to acknowledge, disable and enable interrupts + */ +static void gic_mask_irq(struct irq_data *d) +{ + u32 mask = 1 << (gic_irq(d) % 32); + + raw_spin_lock(&irq_controller_lock); + writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_mask) + gic_arch_extn.irq_mask(d); + raw_spin_unlock(&irq_controller_lock); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + u32 mask = 1 << (gic_irq(d) % 32); + + raw_spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_unmask) + gic_arch_extn.irq_unmask(d); + writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); + raw_spin_unlock(&irq_controller_lock); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + if (gic_arch_extn.irq_eoi) { + raw_spin_lock(&irq_controller_lock); + gic_arch_extn.irq_eoi(d); + raw_spin_unlock(&irq_controller_lock); + } + + writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + void __iomem *base = gic_dist_base(d); + unsigned int gicirq = gic_irq(d); + u32 enablemask = 1 << (gicirq % 32); + u32 enableoff = (gicirq / 32) * 4; + u32 confmask = 0x2 << ((gicirq % 16) * 2); + u32 confoff = (gicirq / 16) * 4; + bool enabled = false; + u32 val; + + /* Interrupt configuration for SGIs can't be changed */ + if (gicirq < 16) + return -EINVAL; + + if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + raw_spin_lock(&irq_controller_lock); + + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + + val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); + if (type == IRQ_TYPE_LEVEL_HIGH) + val &= ~confmask; + else if (type == IRQ_TYPE_EDGE_RISING) + val |= confmask; + + /* + * As recommended by the spec, disable the interrupt before changing + * the configuration + */ + if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); + enabled = true; + } + + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); + + if (enabled) + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); + + raw_spin_unlock(&irq_controller_lock); + + return 0; +} + +static int gic_retrigger(struct irq_data *d) +{ + if (gic_arch_extn.irq_retrigger) + return gic_arch_extn.irq_retrigger(d); + + return -ENXIO; +} + +#ifdef CONFIG_SMP +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); + unsigned int shift = (gic_irq(d) % 4) * 8; + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + u32 val, mask, bit; + + if (cpu >= 8 || cpu >= nr_cpu_ids) + return -EINVAL; + + mask = 0xff << shift; + bit = 1 << (cpu_logical_map(cpu) + shift); + + raw_spin_lock(&irq_controller_lock); + val = readl_relaxed(reg) & ~mask; + writel_relaxed(val | bit, reg); + raw_spin_unlock(&irq_controller_lock); + + return IRQ_SET_MASK_OK; +} +#endif + +#ifdef CONFIG_PM +static int gic_set_wake(struct irq_data *d, unsigned int on) +{ + int ret = -ENXIO; + + if (gic_arch_extn.irq_set_wake) + ret = gic_arch_extn.irq_set_wake(d, on); + + return ret; +} + +#else +#define gic_set_wake NULL +#endif + +asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + u32 irqstat, irqnr; + struct gic_chip_data *gic = &gic_data[0]; + void __iomem *cpu_base = gic_data_cpu_base(gic); + + do { + irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); + irqnr = irqstat & ~0x1c00; + + if (likely(irqnr > 15 && irqnr < 1021)) { + irqnr = irq_find_mapping(gic->domain, irqnr); + handle_IRQ(irqnr, regs); + continue; + } + if (irqnr < 16) { + writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); +#ifdef CONFIG_SMP + handle_IPI(irqnr, regs); +#endif + continue; + } + break; + } while (1); +} + +static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) +{ + struct gic_chip_data *chip_data = irq_get_handler_data(irq); + struct irq_chip *chip = irq_get_chip(irq); + unsigned int cascade_irq, gic_irq; + unsigned long status; + + chained_irq_enter(chip, desc); + + raw_spin_lock(&irq_controller_lock); + status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); + raw_spin_unlock(&irq_controller_lock); + + gic_irq = (status & 0x3ff); + if (gic_irq == 1023) + goto out; + + cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); + if (unlikely(gic_irq < 32 || gic_irq > 1020)) + do_bad_IRQ(cascade_irq, desc); + else + generic_handle_irq(cascade_irq); + + out: + chained_irq_exit(chip, desc); +} + +static struct irq_chip gic_chip = { + .name = "GIC", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, +#ifdef CONFIG_SMP + .irq_set_affinity = gic_set_affinity, +#endif + .irq_set_wake = gic_set_wake, +}; + +void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) +{ + if (gic_nr >= MAX_GIC_NR) + BUG(); + if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) + BUG(); + irq_set_chained_handler(irq, gic_handle_cascade_irq); +} + +static void __init gic_dist_init(struct gic_chip_data *gic) +{ + unsigned int i; + u32 cpumask; + unsigned int gic_irqs = gic->gic_irqs; + void __iomem *base = gic_data_dist_base(gic); + u32 cpu = cpu_logical_map(smp_processor_id()); + + cpumask = 1 << cpu; + cpumask |= cpumask << 8; + cpumask |= cpumask << 16; + + writel_relaxed(0, base + GIC_DIST_CTRL); + + /* + * Set all global interrupts to be level triggered, active low. + */ + for (i = 32; i < gic_irqs; i += 16) + writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); + + /* + * Set all global interrupts to this CPU only. + */ + for (i = 32; i < gic_irqs; i += 4) + writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); + + /* + * Set priority on all global interrupts. + */ + for (i = 32; i < gic_irqs; i += 4) + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); + + /* + * Disable all interrupts. Leave the PPI and SGIs alone + * as these enables are banked registers. + */ + for (i = 32; i < gic_irqs; i += 32) + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); + + writel_relaxed(1, base + GIC_DIST_CTRL); +} + +static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) +{ + void __iomem *dist_base = gic_data_dist_base(gic); + void __iomem *base = gic_data_cpu_base(gic); + int i; + + /* + * Deal with the banked PPI and SGI interrupts - disable all + * PPI interrupts, ensure all SGI interrupts are enabled. + */ + writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); + writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); + + /* + * Set priority on PPI and SGI interrupts + */ + for (i = 0; i < 32; i += 4) + writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); + + writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); + writel_relaxed(1, base + GIC_CPU_CTRL); +} + +#ifdef CONFIG_CPU_PM +/* + * Saves the GIC distributor registers during suspend or idle. Must be called + * with interrupts disabled but before powering down the GIC. After calling + * this function, no interrupts will be delivered by the GIC, and another + * platform-specific wakeup source must be enabled. + */ +static void gic_dist_save(unsigned int gic_nr) +{ + unsigned int gic_irqs; + void __iomem *dist_base; + int i; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + gic_irqs = gic_data[gic_nr].gic_irqs; + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + + if (!dist_base) + return; + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) + gic_data[gic_nr].saved_spi_conf[i] = + readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) + gic_data[gic_nr].saved_spi_target[i] = + readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) + gic_data[gic_nr].saved_spi_enable[i] = + readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); +} + +/* + * Restores the GIC distributor registers during resume or when coming out of + * idle. Must be called before enabling interrupts. If a level interrupt + * that occured while the GIC was suspended is still present, it will be + * handled normally, but any edge interrupts that occured will not be seen by + * the GIC and need to be handled by the platform-specific wakeup source. + */ +static void gic_dist_restore(unsigned int gic_nr) +{ + unsigned int gic_irqs; + unsigned int i; + void __iomem *dist_base; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + gic_irqs = gic_data[gic_nr].gic_irqs; + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + + if (!dist_base) + return; + + writel_relaxed(0, dist_base + GIC_DIST_CTRL); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) + writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], + dist_base + GIC_DIST_CONFIG + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) + writel_relaxed(0xa0a0a0a0, + dist_base + GIC_DIST_PRI + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) + writel_relaxed(gic_data[gic_nr].saved_spi_target[i], + dist_base + GIC_DIST_TARGET + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) + writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], + dist_base + GIC_DIST_ENABLE_SET + i * 4); + + writel_relaxed(1, dist_base + GIC_DIST_CTRL); +} + +static void gic_cpu_save(unsigned int gic_nr) +{ + int i; + u32 *ptr; + void __iomem *dist_base; + void __iomem *cpu_base; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); + + if (!dist_base || !cpu_base) + return; + + ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) + ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); + + ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); + for (i = 0; i < DIV_ROUND_UP(32, 16); i++) + ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); + +} + +static void gic_cpu_restore(unsigned int gic_nr) +{ + int i; + u32 *ptr; + void __iomem *dist_base; + void __iomem *cpu_base; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); + + if (!dist_base || !cpu_base) + return; + + ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) + writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); + + ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); + for (i = 0; i < DIV_ROUND_UP(32, 16); i++) + writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); + + for (i = 0; i < DIV_ROUND_UP(32, 4); i++) + writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); + + writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); + writel_relaxed(1, cpu_base + GIC_CPU_CTRL); +} + +static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) +{ + int i; + + for (i = 0; i < MAX_GIC_NR; i++) { +#ifdef CONFIG_GIC_NON_BANKED + /* Skip over unused GICs */ + if (!gic_data[i].get_base) + continue; +#endif + switch (cmd) { + case CPU_PM_ENTER: + gic_cpu_save(i); + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + gic_cpu_restore(i); + break; + case CPU_CLUSTER_PM_ENTER: + gic_dist_save(i); + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + gic_dist_restore(i); + break; + } + } + + return NOTIFY_OK; +} + +static struct notifier_block gic_notifier_block = { + .notifier_call = gic_notifier, +}; + +static void __init gic_pm_init(struct gic_chip_data *gic) +{ + gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, + sizeof(u32)); + BUG_ON(!gic->saved_ppi_enable); + + gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, + sizeof(u32)); + BUG_ON(!gic->saved_ppi_conf); + + if (gic == &gic_data[0]) + cpu_pm_register_notifier(&gic_notifier_block); +} +#else +static void __init gic_pm_init(struct gic_chip_data *gic) +{ +} +#endif + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + if (hw < 32) { + irq_set_percpu_devid(irq); + irq_set_chip_and_handler(irq, &gic_chip, + handle_percpu_devid_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } else { + irq_set_chip_and_handler(irq, &gic_chip, + handle_fasteoi_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + irq_set_chip_data(irq, d->host_data); + return 0; +} + +static int gic_irq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (d->of_node != controller) + return -EINVAL; + if (intsize < 3) + return -EINVAL; + + /* Get the interrupt number and add 16 to skip over SGIs */ + *out_hwirq = intspec[1] + 16; + + /* For SPIs, we need to add 16 more to get the GIC irq ID number */ + if (!intspec[0]) + *out_hwirq += 16; + + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; + return 0; +} + +const struct irq_domain_ops gic_irq_domain_ops = { + .map = gic_irq_domain_map, + .xlate = gic_irq_domain_xlate, +}; + +void __init gic_init_bases(unsigned int gic_nr, int irq_start, + void __iomem *dist_base, void __iomem *cpu_base, + u32 percpu_offset, struct device_node *node) +{ + irq_hw_number_t hwirq_base; + struct gic_chip_data *gic; + int gic_irqs, irq_base; + + BUG_ON(gic_nr >= MAX_GIC_NR); + + gic = &gic_data[gic_nr]; +#ifdef CONFIG_GIC_NON_BANKED + if (percpu_offset) { /* Frankein-GIC without banked registers... */ + unsigned int cpu; + + gic->dist_base.percpu_base = alloc_percpu(void __iomem *); + gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); + if (WARN_ON(!gic->dist_base.percpu_base || + !gic->cpu_base.percpu_base)) { + free_percpu(gic->dist_base.percpu_base); + free_percpu(gic->cpu_base.percpu_base); + return; + } + + for_each_possible_cpu(cpu) { + unsigned long offset = percpu_offset * cpu_logical_map(cpu); + *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; + *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; + } + + gic_set_base_accessor(gic, gic_get_percpu_base); + } else +#endif + { /* Normal, sane GIC... */ + WARN(percpu_offset, + "GIC_NON_BANKED not enabled, ignoring %08x offset!", + percpu_offset); + gic->dist_base.common_base = dist_base; + gic->cpu_base.common_base = cpu_base; + gic_set_base_accessor(gic, gic_get_common_base); + } + + /* + * For primary GICs, skip over SGIs. + * For secondary GICs, skip over PPIs, too. + */ + if (gic_nr == 0 && (irq_start & 31) > 0) { + hwirq_base = 16; + if (irq_start != -1) + irq_start = (irq_start & ~31) + 16; + } else { + hwirq_base = 32; + } + + /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources. + */ + gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; + gic_irqs = (gic_irqs + 1) * 32; + if (gic_irqs > 1020) + gic_irqs = 1020; + gic->gic_irqs = gic_irqs; + + gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { + WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", + irq_start); + irq_base = irq_start; + } + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); + if (WARN_ON(!gic->domain)) + return; + + gic_chip.flags |= gic_arch_extn.flags; + gic_dist_init(gic); + gic_cpu_init(gic); + gic_pm_init(gic); +} + +void __cpuinit gic_secondary_init(unsigned int gic_nr) +{ + BUG_ON(gic_nr >= MAX_GIC_NR); + + gic_cpu_init(&gic_data[gic_nr]); +} + +#ifdef CONFIG_SMP +void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + unsigned long map = 0; + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= 1 << cpu_logical_map(cpu); + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* this always happens on GIC0 */ + writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); +} +#endif + +#ifdef CONFIG_OF +static int gic_cnt __initdata = 0; + +int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *cpu_base; + void __iomem *dist_base; + u32 percpu_offset; + int irq; + + if (WARN_ON(!node)) + return -ENODEV; + + dist_base = of_iomap(node, 0); + WARN(!dist_base, "unable to map gic dist registers\n"); + + cpu_base = of_iomap(node, 1); + WARN(!cpu_base, "unable to map gic cpu registers\n"); + + if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) + percpu_offset = 0; + + gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); + + if (parent) { + irq = irq_of_parse_and_map(node, 0); + gic_cascade_irq(gic_cnt, irq); + } + gic_cnt++; + return 0; +} +#endif diff --git a/ANDROID_3.4.5/arch/arm/common/icst.c b/ANDROID_3.4.5/arch/arm/common/icst.c new file mode 100644 index 00000000..2dc6da70 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/icst.c @@ -0,0 +1,100 @@ +/* + * linux/arch/arm/common/icst307.c + * + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support functions for calculating clocks/divisors for the ICST307 + * clock generators. See http://www.idt.com/ for more information + * on these devices. + * + * This is an almost identical implementation to the ICST525 clock generator. + * The s2div and idx2s files are different + */ +#include <linux/module.h> +#include <linux/kernel.h> + +#include <asm/hardware/icst.h> + +/* + * Divisors for each OD setting. + */ +const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 }; +const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 }; +EXPORT_SYMBOL(icst307_s2div); +EXPORT_SYMBOL(icst525_s2div); + +unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) +{ + return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]); +} + +EXPORT_SYMBOL(icst_hz); + +/* + * Ascending divisor S values. + */ +const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 }; +const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 }; +EXPORT_SYMBOL(icst307_idx2s); +EXPORT_SYMBOL(icst525_idx2s); + +struct icst_vco +icst_hz_to_vco(const struct icst_params *p, unsigned long freq) +{ + struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max }; + unsigned long f; + unsigned int i = 0, rd, best = (unsigned int)-1; + + /* + * First, find the PLL output divisor such + * that the PLL output is within spec. + */ + do { + f = freq * p->s2div[p->idx2s[i]]; + + if (f > p->vco_min && f <= p->vco_max) + break; + } while (i < 8); + + if (i >= 8) + return vco; + + vco.s = p->idx2s[i]; + + /* + * Now find the closest divisor combination + * which gives a PLL output of 'f'. + */ + for (rd = p->rd_min; rd <= p->rd_max; rd++) { + unsigned long fref_div, f_pll; + unsigned int vd; + int f_diff; + + fref_div = (2 * p->ref) / rd; + + vd = (f + fref_div / 2) / fref_div; + if (vd < p->vd_min || vd > p->vd_max) + continue; + + f_pll = fref_div * vd; + f_diff = f_pll - f; + if (f_diff < 0) + f_diff = -f_diff; + + if ((unsigned)f_diff < best) { + vco.v = vd - 8; + vco.r = rd - 2; + if (f_diff == 0) + break; + best = f_diff; + } + } + + return vco; +} + +EXPORT_SYMBOL(icst_hz_to_vco); diff --git a/ANDROID_3.4.5/arch/arm/common/it8152.c b/ANDROID_3.4.5/arch/arm/common/it8152.c new file mode 100644 index 00000000..dcb13494 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/it8152.c @@ -0,0 +1,354 @@ +/* + * linux/arch/arm/common/it8152.c + * + * Copyright Compulab Ltd, 2002-2007 + * Mike Rapoport <mike@compulab.co.il> + * + * The DMA bouncing part is taken from arch/arm/mach-ixp4xx/common-pci.c + * (see this file for respective copyrights) + * + * Thanks to Guennadi Liakhovetski <gl@dsa-ac.de> for IRQ enumberation + * and demux code. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/ptrace.h> +#include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/export.h> + +#include <asm/mach/pci.h> +#include <asm/hardware/it8152.h> + +#define MAX_SLOTS 21 + +static void it8152_mask_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + if (irq >= IT8152_LD_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) | + (1 << (irq - IT8152_LD_IRQ(0)))), + IT8152_INTC_LDCNIMR); + } else if (irq >= IT8152_LP_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) | + (1 << (irq - IT8152_LP_IRQ(0)))), + IT8152_INTC_LPCNIMR); + } else if (irq >= IT8152_PD_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) | + (1 << (irq - IT8152_PD_IRQ(0)))), + IT8152_INTC_PDCNIMR); + } +} + +static void it8152_unmask_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + if (irq >= IT8152_LD_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) & + ~(1 << (irq - IT8152_LD_IRQ(0)))), + IT8152_INTC_LDCNIMR); + } else if (irq >= IT8152_LP_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) & + ~(1 << (irq - IT8152_LP_IRQ(0)))), + IT8152_INTC_LPCNIMR); + } else if (irq >= IT8152_PD_IRQ(0)) { + __raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) & + ~(1 << (irq - IT8152_PD_IRQ(0)))), + IT8152_INTC_PDCNIMR); + } +} + +static struct irq_chip it8152_irq_chip = { + .name = "it8152", + .irq_ack = it8152_mask_irq, + .irq_mask = it8152_mask_irq, + .irq_unmask = it8152_unmask_irq, +}; + +void it8152_init_irq(void) +{ + int irq; + + __raw_writel((0xffff), IT8152_INTC_PDCNIMR); + __raw_writel((0), IT8152_INTC_PDCNIRR); + __raw_writel((0xffff), IT8152_INTC_LPCNIMR); + __raw_writel((0), IT8152_INTC_LPCNIRR); + __raw_writel((0xffff), IT8152_INTC_LDCNIMR); + __raw_writel((0), IT8152_INTC_LDCNIRR); + + for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { + irq_set_chip_and_handler(irq, &it8152_irq_chip, + handle_level_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } +} + +void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) +{ + int bits_pd, bits_lp, bits_ld; + int i; + + while (1) { + /* Read all */ + bits_pd = __raw_readl(IT8152_INTC_PDCNIRR); + bits_lp = __raw_readl(IT8152_INTC_LPCNIRR); + bits_ld = __raw_readl(IT8152_INTC_LDCNIRR); + + /* Ack */ + __raw_writel((~bits_pd), IT8152_INTC_PDCNIRR); + __raw_writel((~bits_lp), IT8152_INTC_LPCNIRR); + __raw_writel((~bits_ld), IT8152_INTC_LDCNIRR); + + if (!(bits_ld | bits_lp | bits_pd)) { + /* Re-read to guarantee, that there was a moment of + time, when they all three were 0. */ + bits_pd = __raw_readl(IT8152_INTC_PDCNIRR); + bits_lp = __raw_readl(IT8152_INTC_LPCNIRR); + bits_ld = __raw_readl(IT8152_INTC_LDCNIRR); + if (!(bits_ld | bits_lp | bits_pd)) + return; + } + + bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1); + while (bits_pd) { + i = __ffs(bits_pd); + generic_handle_irq(IT8152_PD_IRQ(i)); + bits_pd &= ~(1 << i); + } + + bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1); + while (bits_lp) { + i = __ffs(bits_lp); + generic_handle_irq(IT8152_LP_IRQ(i)); + bits_lp &= ~(1 << i); + } + + bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1); + while (bits_ld) { + i = __ffs(bits_ld); + generic_handle_irq(IT8152_LD_IRQ(i)); + bits_ld &= ~(1 << i); + } + } +} + +/* mapping for on-chip devices */ +int __init it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if ((dev->vendor == PCI_VENDOR_ID_ITE) && + (dev->device == PCI_DEVICE_ID_ITE_8152)) { + if ((dev->class >> 8) == PCI_CLASS_MULTIMEDIA_AUDIO) + return IT8152_AUDIO_INT; + if ((dev->class >> 8) == PCI_CLASS_SERIAL_USB) + return IT8152_USB_INT; + if ((dev->class >> 8) == PCI_CLASS_SYSTEM_DMA) + return IT8152_CDMA_INT; + } + + return 0; +} + +static unsigned long it8152_pci_dev_base_address(struct pci_bus *bus, + unsigned int devfn) +{ + unsigned long addr = 0; + + if (bus->number == 0) { + if (devfn < PCI_DEVFN(MAX_SLOTS, 0)) + addr = (devfn << 8); + } else + addr = (bus->number << 16) | (devfn << 8); + + return addr; +} + +static int it8152_pci_read_config(struct pci_bus *bus, + unsigned int devfn, int where, + int size, u32 *value) +{ + unsigned long addr = it8152_pci_dev_base_address(bus, devfn); + u32 v; + int shift; + + shift = (where & 3); + + __raw_writel((addr + where), IT8152_PCI_CFG_ADDR); + v = (__raw_readl(IT8152_PCI_CFG_DATA) >> (8 * (shift))); + + *value = v; + + return PCIBIOS_SUCCESSFUL; +} + +static int it8152_pci_write_config(struct pci_bus *bus, + unsigned int devfn, int where, + int size, u32 value) +{ + unsigned long addr = it8152_pci_dev_base_address(bus, devfn); + u32 v, vtemp, mask = 0; + int shift; + + if (size == 1) + mask = 0xff; + if (size == 2) + mask = 0xffff; + + shift = (where & 3); + + __raw_writel((addr + where), IT8152_PCI_CFG_ADDR); + vtemp = __raw_readl(IT8152_PCI_CFG_DATA); + + if (mask) + vtemp &= ~(mask << (8 * shift)); + else + vtemp = 0; + + v = (value << (8 * shift)); + __raw_writel((addr + where), IT8152_PCI_CFG_ADDR); + __raw_writel((v | vtemp), IT8152_PCI_CFG_DATA); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops it8152_ops = { + .read = it8152_pci_read_config, + .write = it8152_pci_write_config, +}; + +static struct resource it8152_io = { + .name = "IT8152 PCI I/O region", + .flags = IORESOURCE_IO, +}; + +static struct resource it8152_mem = { + .name = "IT8152 PCI memory region", + .start = 0x10000000, + .end = 0x13e00000, + .flags = IORESOURCE_MEM, +}; + +/* + * The following functions are needed for DMA bouncing. + * ITE8152 chip can address up to 64MByte, so all the devices + * connected to ITE8152 (PCI and USB) should have limited DMA window + */ +static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", + __func__, dma_addr, size); + return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; +} + +/* + * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all + * other devices. + */ +static int it8152_pci_platform_notify(struct device *dev) +{ + if (dev->bus == &pci_bus_type) { + if (dev->dma_mask) + *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; + dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; + dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); + } + return 0; +} + +static int it8152_pci_platform_notify_remove(struct device *dev) +{ + if (dev->bus == &pci_bus_type) + dmabounce_unregister_dev(dev); + + return 0; +} + +int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (mask >= PHYS_OFFSET + SZ_64M - 1) + return 0; + + return -EIO; +} + +int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) +{ + it8152_io.start = IT8152_IO_BASE + 0x12000; + it8152_io.end = IT8152_IO_BASE + 0x12000 + 0x100000; + + sys->mem_offset = 0x10000000; + sys->io_offset = IT8152_IO_BASE; + + if (request_resource(&ioport_resource, &it8152_io)) { + printk(KERN_ERR "PCI: unable to allocate IO region\n"); + goto err0; + } + if (request_resource(&iomem_resource, &it8152_mem)) { + printk(KERN_ERR "PCI: unable to allocate memory region\n"); + goto err1; + } + + pci_add_resource_offset(&sys->resources, &it8152_io, sys->io_offset); + pci_add_resource_offset(&sys->resources, &it8152_mem, sys->mem_offset); + + if (platform_notify || platform_notify_remove) { + printk(KERN_ERR "PCI: Can't use platform_notify\n"); + goto err2; + } + + platform_notify = it8152_pci_platform_notify; + platform_notify_remove = it8152_pci_platform_notify_remove; + + return 1; + +err2: + release_resource(&it8152_io); +err1: + release_resource(&it8152_mem); +err0: + return -EBUSY; +} + +/* ITE bridge requires setting latency timer to avoid early bus access + termination by PCI bus master devices +*/ +void pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + + /* no need to update on-chip OHCI controller */ + if ((dev->vendor == PCI_VENDOR_ID_ITE) && + (dev->device == PCI_DEVICE_ID_ITE_8152) && + ((dev->class >> 8) == PCI_CLASS_SERIAL_USB)) + return; + + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat < 16) + lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; + else if (lat > pcibios_max_latency) + lat = pcibios_max_latency; + else + return; + printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", + pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} + + +struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys) +{ + return pci_scan_root_bus(NULL, nr, &it8152_ops, sys, &sys->resources); +} + +EXPORT_SYMBOL(dma_set_coherent_mask); diff --git a/ANDROID_3.4.5/arch/arm/common/locomo.c b/ANDROID_3.4.5/arch/arm/common/locomo.c new file mode 100644 index 00000000..b55c3625 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/locomo.c @@ -0,0 +1,914 @@ +/* + * linux/arch/arm/common/locomo.c + * + * Sharp LoCoMo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains all generic LoCoMo support. + * + * All initialization functions provided here are intended to be called + * from machine specific code with proper arguments when required. + * + * Based on sa1111.c + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/io.h> + +#include <mach/hardware.h> +#include <asm/irq.h> +#include <asm/mach/irq.h> + +#include <asm/hardware/locomo.h> + +/* LoCoMo Interrupts */ +#define IRQ_LOCOMO_KEY (0) +#define IRQ_LOCOMO_GPIO (1) +#define IRQ_LOCOMO_LT (2) +#define IRQ_LOCOMO_SPI (3) + +/* M62332 output channel selection */ +#define M62332_EVR_CH 1 /* M62332 volume channel number */ + /* 0 : CH.1 , 1 : CH. 2 */ +/* DAC send data */ +#define M62332_SLAVE_ADDR 0x4e /* Slave address */ +#define M62332_W_BIT 0x00 /* W bit (0 only) */ +#define M62332_SUB_ADDR 0x00 /* Sub address */ +#define M62332_A_BIT 0x00 /* A bit (0 only) */ + +/* DAC setup and hold times (expressed in us) */ +#define DAC_BUS_FREE_TIME 5 /* 4.7 us */ +#define DAC_START_SETUP_TIME 5 /* 4.7 us */ +#define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ +#define DAC_START_HOLD_TIME 5 /* 4.7 us */ +#define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ +#define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ +#define DAC_DATA_SETUP_TIME 1 /* 250 ns */ +#define DAC_DATA_HOLD_TIME 1 /* 300 ns */ +#define DAC_LOW_SETUP_TIME 1 /* 300 ns */ +#define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ + +/* the following is the overall data for the locomo chip */ +struct locomo { + struct device *dev; + unsigned long phys; + unsigned int irq; + int irq_base; + spinlock_t lock; + void __iomem *base; +#ifdef CONFIG_PM + void *saved_state; +#endif +}; + +struct locomo_dev_info { + unsigned long offset; + unsigned long length; + unsigned int devid; + unsigned int irq[1]; + const char * name; +}; + +/* All the locomo devices. If offset is non-zero, the mapbase for the + * locomo_dev will be set to the chip base plus offset. If offset is + * zero, then the mapbase for the locomo_dev will be set to zero. An + * offset of zero means the device only uses GPIOs or other helper + * functions inside this file */ +static struct locomo_dev_info locomo_devices[] = { + { + .devid = LOCOMO_DEVID_KEYBOARD, + .irq = { IRQ_LOCOMO_KEY }, + .name = "locomo-keyboard", + .offset = LOCOMO_KEYBOARD, + .length = 16, + }, + { + .devid = LOCOMO_DEVID_FRONTLIGHT, + .irq = {}, + .name = "locomo-frontlight", + .offset = LOCOMO_FRONTLIGHT, + .length = 8, + + }, + { + .devid = LOCOMO_DEVID_BACKLIGHT, + .irq = {}, + .name = "locomo-backlight", + .offset = LOCOMO_BACKLIGHT, + .length = 8, + }, + { + .devid = LOCOMO_DEVID_AUDIO, + .irq = {}, + .name = "locomo-audio", + .offset = LOCOMO_AUDIO, + .length = 4, + }, + { + .devid = LOCOMO_DEVID_LED, + .irq = {}, + .name = "locomo-led", + .offset = LOCOMO_LED, + .length = 8, + }, + { + .devid = LOCOMO_DEVID_UART, + .irq = {}, + .name = "locomo-uart", + .offset = 0, + .length = 0, + }, + { + .devid = LOCOMO_DEVID_SPI, + .irq = {}, + .name = "locomo-spi", + .offset = LOCOMO_SPI, + .length = 0x30, + }, +}; + +static void locomo_handler(unsigned int irq, struct irq_desc *desc) +{ + struct locomo *lchip = irq_get_chip_data(irq); + int req, i; + + /* Acknowledge the parent IRQ */ + desc->irq_data.chip->irq_ack(&desc->irq_data); + + /* check why this interrupt was generated */ + req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; + + if (req) { + /* generate the next interrupt(s) */ + irq = lchip->irq_base; + for (i = 0; i <= 3; i++, irq++) { + if (req & (0x0100 << i)) { + generic_handle_irq(irq); + } + + } + } +} + +static void locomo_ack_irq(struct irq_data *d) +{ +} + +static void locomo_mask_irq(struct irq_data *d) +{ + struct locomo *lchip = irq_data_get_irq_chip_data(d); + unsigned int r; + r = locomo_readl(lchip->base + LOCOMO_ICR); + r &= ~(0x0010 << (d->irq - lchip->irq_base)); + locomo_writel(r, lchip->base + LOCOMO_ICR); +} + +static void locomo_unmask_irq(struct irq_data *d) +{ + struct locomo *lchip = irq_data_get_irq_chip_data(d); + unsigned int r; + r = locomo_readl(lchip->base + LOCOMO_ICR); + r |= (0x0010 << (d->irq - lchip->irq_base)); + locomo_writel(r, lchip->base + LOCOMO_ICR); +} + +static struct irq_chip locomo_chip = { + .name = "LOCOMO", + .irq_ack = locomo_ack_irq, + .irq_mask = locomo_mask_irq, + .irq_unmask = locomo_unmask_irq, +}; + +static void locomo_setup_irq(struct locomo *lchip) +{ + int irq = lchip->irq_base; + + /* + * Install handler for IRQ_LOCOMO_HW. + */ + irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); + irq_set_chip_data(lchip->irq, lchip); + irq_set_chained_handler(lchip->irq, locomo_handler); + + /* Install handlers for IRQ_LOCOMO_* */ + for ( ; irq <= lchip->irq_base + 3; irq++) { + irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); + irq_set_chip_data(irq, lchip); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } +} + + +static void locomo_dev_release(struct device *_dev) +{ + struct locomo_dev *dev = LOCOMO_DEV(_dev); + + kfree(dev); +} + +static int +locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) +{ + struct locomo_dev *dev; + int ret; + + dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + + /* + * If the parent device has a DMA mask associated with it, + * propagate it down to the children. + */ + if (lchip->dev->dma_mask) { + dev->dma_mask = *lchip->dev->dma_mask; + dev->dev.dma_mask = &dev->dma_mask; + } + + dev_set_name(&dev->dev, "%s", info->name); + dev->devid = info->devid; + dev->dev.parent = lchip->dev; + dev->dev.bus = &locomo_bus_type; + dev->dev.release = locomo_dev_release; + dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; + + if (info->offset) + dev->mapbase = lchip->base + info->offset; + else + dev->mapbase = 0; + dev->length = info->length; + + dev->irq[0] = (lchip->irq_base == NO_IRQ) ? + NO_IRQ : lchip->irq_base + info->irq[0]; + + ret = device_register(&dev->dev); + if (ret) { + out: + kfree(dev); + } + return ret; +} + +#ifdef CONFIG_PM + +struct locomo_save_data { + u16 LCM_GPO; + u16 LCM_SPICT; + u16 LCM_GPE; + u16 LCM_ASD; + u16 LCM_SPIMD; +}; + +static int locomo_suspend(struct platform_device *dev, pm_message_t state) +{ + struct locomo *lchip = platform_get_drvdata(dev); + struct locomo_save_data *save; + unsigned long flags; + + save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); + if (!save) + return -ENOMEM; + + lchip->saved_state = save; + + spin_lock_irqsave(&lchip->lock, flags); + + save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ + locomo_writel(0x00, lchip->base + LOCOMO_GPO); + save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ + locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); + save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ + locomo_writel(0x00, lchip->base + LOCOMO_GPE); + save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ + locomo_writel(0x00, lchip->base + LOCOMO_ASD); + save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ + locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); + + locomo_writel(0x00, lchip->base + LOCOMO_PAIF); + locomo_writel(0x00, lchip->base + LOCOMO_DAC); + locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); + + if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) + locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ + else + /* 18MHz already enabled, so no wait */ + locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ + + locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ + locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ + locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ + + spin_unlock_irqrestore(&lchip->lock, flags); + + return 0; +} + +static int locomo_resume(struct platform_device *dev) +{ + struct locomo *lchip = platform_get_drvdata(dev); + struct locomo_save_data *save; + unsigned long r; + unsigned long flags; + + save = lchip->saved_state; + if (!save) + return 0; + + spin_lock_irqsave(&lchip->lock, flags); + + locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); + locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); + locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); + locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); + locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); + + locomo_writel(0x00, lchip->base + LOCOMO_C32K); + locomo_writel(0x90, lchip->base + LOCOMO_TADC); + + locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); + r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); + r &= 0xFEFF; + locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); + locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); + + spin_unlock_irqrestore(&lchip->lock, flags); + + lchip->saved_state = NULL; + kfree(save); + + return 0; +} +#endif + + +/** + * locomo_probe - probe for a single LoCoMo chip. + * @phys_addr: physical address of device. + * + * Probe for a LoCoMo chip. This must be called + * before any other locomo-specific code. + * + * Returns: + * %-ENODEV device not found. + * %-EBUSY physical address already marked in-use. + * %0 successful. + */ +static int +__locomo_probe(struct device *me, struct resource *mem, int irq) +{ + struct locomo_platform_data *pdata = me->platform_data; + struct locomo *lchip; + unsigned long r; + int i, ret = -ENODEV; + + lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); + if (!lchip) + return -ENOMEM; + + spin_lock_init(&lchip->lock); + + lchip->dev = me; + dev_set_drvdata(lchip->dev, lchip); + + lchip->phys = mem->start; + lchip->irq = irq; + lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; + + /* + * Map the whole region. This also maps the + * registers for our children. + */ + lchip->base = ioremap(mem->start, PAGE_SIZE); + if (!lchip->base) { + ret = -ENOMEM; + goto out; + } + + /* locomo initialize */ + locomo_writel(0, lchip->base + LOCOMO_ICR); + /* KEYBOARD */ + locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); + + /* GPIO */ + locomo_writel(0, lchip->base + LOCOMO_GPO); + locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) + , lchip->base + LOCOMO_GPE); + locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) + , lchip->base + LOCOMO_GPD); + locomo_writel(0, lchip->base + LOCOMO_GIE); + + /* Frontlight */ + locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); + locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); + + /* Longtime timer */ + locomo_writel(0, lchip->base + LOCOMO_LTINT); + /* SPI */ + locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); + + locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); + r = locomo_readl(lchip->base + LOCOMO_ASD); + r |= 0x8000; + locomo_writel(r, lchip->base + LOCOMO_ASD); + + locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); + r = locomo_readl(lchip->base + LOCOMO_HSD); + r |= 0x8000; + locomo_writel(r, lchip->base + LOCOMO_HSD); + + locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); + + /* XON */ + locomo_writel(0x80, lchip->base + LOCOMO_TADC); + udelay(1000); + /* CLK9MEN */ + r = locomo_readl(lchip->base + LOCOMO_TADC); + r |= 0x10; + locomo_writel(r, lchip->base + LOCOMO_TADC); + udelay(100); + + /* init DAC */ + r = locomo_readl(lchip->base + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; + locomo_writel(r, lchip->base + LOCOMO_DAC); + + r = locomo_readl(lchip->base + LOCOMO_VER); + printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); + + /* + * The interrupt controller must be initialised before any + * other device to ensure that the interrupts are available. + */ + if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) + locomo_setup_irq(lchip); + + for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) + locomo_init_one_child(lchip, &locomo_devices[i]); + return 0; + + out: + kfree(lchip); + return ret; +} + +static int locomo_remove_child(struct device *dev, void *data) +{ + device_unregister(dev); + return 0; +} + +static void __locomo_remove(struct locomo *lchip) +{ + device_for_each_child(lchip->dev, NULL, locomo_remove_child); + + if (lchip->irq != NO_IRQ) { + irq_set_chained_handler(lchip->irq, NULL); + irq_set_handler_data(lchip->irq, NULL); + } + + iounmap(lchip->base); + kfree(lchip); +} + +static int locomo_probe(struct platform_device *dev) +{ + struct resource *mem; + int irq; + + mem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + irq = platform_get_irq(dev, 0); + if (irq < 0) + return -ENXIO; + + return __locomo_probe(&dev->dev, mem, irq); +} + +static int locomo_remove(struct platform_device *dev) +{ + struct locomo *lchip = platform_get_drvdata(dev); + + if (lchip) { + __locomo_remove(lchip); + platform_set_drvdata(dev, NULL); + } + + return 0; +} + +/* + * Not sure if this should be on the system bus or not yet. + * We really want some way to register a system device at + * the per-machine level, and then have this driver pick + * up the registered devices. + */ +static struct platform_driver locomo_device_driver = { + .probe = locomo_probe, + .remove = locomo_remove, +#ifdef CONFIG_PM + .suspend = locomo_suspend, + .resume = locomo_resume, +#endif + .driver = { + .name = "locomo", + }, +}; + +/* + * Get the parent device driver (us) structure + * from a child function device + */ +static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) +{ + return (struct locomo *)dev_get_drvdata(ldev->dev.parent); +} + +void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) +{ + struct locomo *lchip = dev_get_drvdata(dev); + unsigned long flags; + unsigned int r; + + if (!lchip) + return; + + spin_lock_irqsave(&lchip->lock, flags); + + r = locomo_readl(lchip->base + LOCOMO_GPD); + if (dir) + r |= bits; + else + r &= ~bits; + locomo_writel(r, lchip->base + LOCOMO_GPD); + + r = locomo_readl(lchip->base + LOCOMO_GPE); + if (dir) + r |= bits; + else + r &= ~bits; + locomo_writel(r, lchip->base + LOCOMO_GPE); + + spin_unlock_irqrestore(&lchip->lock, flags); +} +EXPORT_SYMBOL(locomo_gpio_set_dir); + +int locomo_gpio_read_level(struct device *dev, unsigned int bits) +{ + struct locomo *lchip = dev_get_drvdata(dev); + unsigned long flags; + unsigned int ret; + + if (!lchip) + return -ENODEV; + + spin_lock_irqsave(&lchip->lock, flags); + ret = locomo_readl(lchip->base + LOCOMO_GPL); + spin_unlock_irqrestore(&lchip->lock, flags); + + ret &= bits; + return ret; +} +EXPORT_SYMBOL(locomo_gpio_read_level); + +int locomo_gpio_read_output(struct device *dev, unsigned int bits) +{ + struct locomo *lchip = dev_get_drvdata(dev); + unsigned long flags; + unsigned int ret; + + if (!lchip) + return -ENODEV; + + spin_lock_irqsave(&lchip->lock, flags); + ret = locomo_readl(lchip->base + LOCOMO_GPO); + spin_unlock_irqrestore(&lchip->lock, flags); + + ret &= bits; + return ret; +} +EXPORT_SYMBOL(locomo_gpio_read_output); + +void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) +{ + struct locomo *lchip = dev_get_drvdata(dev); + unsigned long flags; + unsigned int r; + + if (!lchip) + return; + + spin_lock_irqsave(&lchip->lock, flags); + + r = locomo_readl(lchip->base + LOCOMO_GPO); + if (set) + r |= bits; + else + r &= ~bits; + locomo_writel(r, lchip->base + LOCOMO_GPO); + + spin_unlock_irqrestore(&lchip->lock, flags); +} +EXPORT_SYMBOL(locomo_gpio_write); + +static void locomo_m62332_sendbit(void *mapbase, int bit) +{ + unsigned int r; + + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + + if (bit & 1) { + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SDAOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + } else { + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SDAOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + } + + udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ +} + +void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) +{ + struct locomo *lchip = locomo_chip_driver(ldev); + int i; + unsigned char data; + unsigned int r; + void *mapbase = lchip->base; + unsigned long flags; + + spin_lock_irqsave(&lchip->lock, flags); + + /* Start */ + udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SDAOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ + udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ + + /* Send slave address and W bit (LSB is W bit) */ + data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; + for (i = 1; i <= 8; i++) { + locomo_m62332_sendbit(mapbase, data >> (8 - i)); + } + + /* Check A bit */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SDAOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ + if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ + printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); + goto out; + } + + /* Send Sub address (LSB is channel select) */ + /* channel = 0 : ch1 select */ + /* = 1 : ch2 select */ + data = M62332_SUB_ADDR + channel; + for (i = 1; i <= 8; i++) { + locomo_m62332_sendbit(mapbase, data >> (8 - i)); + } + + /* Check A bit */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SDAOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ + if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ + printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); + goto out; + } + + /* Send DAC data */ + for (i = 1; i <= 8; i++) { + locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); + } + + /* Check A bit */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SDAOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ + if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ + printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); + } + +out: + /* stop */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r &= ~(LOCOMO_DAC_SCLOEB); + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SDAOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ + + r = locomo_readl(mapbase + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; + locomo_writel(r, mapbase + LOCOMO_DAC); + udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ + udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ + + spin_unlock_irqrestore(&lchip->lock, flags); +} +EXPORT_SYMBOL(locomo_m62332_senddata); + +/* + * Frontlight control + */ + +void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) +{ + unsigned long flags; + struct locomo *lchip = locomo_chip_driver(dev); + + if (vr) + locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); + else + locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); + + spin_lock_irqsave(&lchip->lock, flags); + locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); + udelay(100); + locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); + locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); + spin_unlock_irqrestore(&lchip->lock, flags); +} +EXPORT_SYMBOL(locomo_frontlight_set); + +/* + * LoCoMo "Register Access Bus." + * + * We model this as a regular bus type, and hang devices directly + * off this. + */ +static int locomo_match(struct device *_dev, struct device_driver *_drv) +{ + struct locomo_dev *dev = LOCOMO_DEV(_dev); + struct locomo_driver *drv = LOCOMO_DRV(_drv); + + return dev->devid == drv->devid; +} + +static int locomo_bus_suspend(struct device *dev, pm_message_t state) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv && drv->suspend) + ret = drv->suspend(ldev, state); + return ret; +} + +static int locomo_bus_resume(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv && drv->resume) + ret = drv->resume(ldev); + return ret; +} + +static int locomo_bus_probe(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = -ENODEV; + + if (drv->probe) + ret = drv->probe(ldev); + return ret; +} + +static int locomo_bus_remove(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv->remove) + ret = drv->remove(ldev); + return ret; +} + +struct bus_type locomo_bus_type = { + .name = "locomo-bus", + .match = locomo_match, + .probe = locomo_bus_probe, + .remove = locomo_bus_remove, + .suspend = locomo_bus_suspend, + .resume = locomo_bus_resume, +}; + +int locomo_driver_register(struct locomo_driver *driver) +{ + driver->drv.bus = &locomo_bus_type; + return driver_register(&driver->drv); +} +EXPORT_SYMBOL(locomo_driver_register); + +void locomo_driver_unregister(struct locomo_driver *driver) +{ + driver_unregister(&driver->drv); +} +EXPORT_SYMBOL(locomo_driver_unregister); + +static int __init locomo_init(void) +{ + int ret = bus_register(&locomo_bus_type); + if (ret == 0) + platform_driver_register(&locomo_device_driver); + return ret; +} + +static void __exit locomo_exit(void) +{ + platform_driver_unregister(&locomo_device_driver); + bus_unregister(&locomo_bus_type); +} + +module_init(locomo_init); +module_exit(locomo_exit); + +MODULE_DESCRIPTION("Sharp LoCoMo core driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); diff --git a/ANDROID_3.4.5/arch/arm/common/sa1111.c b/ANDROID_3.4.5/arch/arm/common/sa1111.c new file mode 100644 index 00000000..9173d112 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/sa1111.c @@ -0,0 +1,1459 @@ +/* + * linux/arch/arm/common/sa1111.c + * + * SA1111 support + * + * Original code by John Dorsey + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains all generic SA1111 support. + * + * All initialization functions provided here are intended to be called + * from machine specific code with proper arguments when required. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/clk.h> +#include <linux/io.h> + +#include <mach/hardware.h> +#include <asm/mach/irq.h> +#include <asm/mach-types.h> +#include <asm/sizes.h> + +#include <asm/hardware/sa1111.h> + +/* SA1111 IRQs */ +#define IRQ_GPAIN0 (0) +#define IRQ_GPAIN1 (1) +#define IRQ_GPAIN2 (2) +#define IRQ_GPAIN3 (3) +#define IRQ_GPBIN0 (4) +#define IRQ_GPBIN1 (5) +#define IRQ_GPBIN2 (6) +#define IRQ_GPBIN3 (7) +#define IRQ_GPBIN4 (8) +#define IRQ_GPBIN5 (9) +#define IRQ_GPCIN0 (10) +#define IRQ_GPCIN1 (11) +#define IRQ_GPCIN2 (12) +#define IRQ_GPCIN3 (13) +#define IRQ_GPCIN4 (14) +#define IRQ_GPCIN5 (15) +#define IRQ_GPCIN6 (16) +#define IRQ_GPCIN7 (17) +#define IRQ_MSTXINT (18) +#define IRQ_MSRXINT (19) +#define IRQ_MSSTOPERRINT (20) +#define IRQ_TPTXINT (21) +#define IRQ_TPRXINT (22) +#define IRQ_TPSTOPERRINT (23) +#define SSPXMTINT (24) +#define SSPRCVINT (25) +#define SSPROR (26) +#define AUDXMTDMADONEA (32) +#define AUDRCVDMADONEA (33) +#define AUDXMTDMADONEB (34) +#define AUDRCVDMADONEB (35) +#define AUDTFSR (36) +#define AUDRFSR (37) +#define AUDTUR (38) +#define AUDROR (39) +#define AUDDTS (40) +#define AUDRDD (41) +#define AUDSTO (42) +#define IRQ_USBPWR (43) +#define IRQ_HCIM (44) +#define IRQ_HCIBUFFACC (45) +#define IRQ_HCIRMTWKP (46) +#define IRQ_NHCIMFCIR (47) +#define IRQ_USB_PORT_RESUME (48) +#define IRQ_S0_READY_NINT (49) +#define IRQ_S1_READY_NINT (50) +#define IRQ_S0_CD_VALID (51) +#define IRQ_S1_CD_VALID (52) +#define IRQ_S0_BVD1_STSCHG (53) +#define IRQ_S1_BVD1_STSCHG (54) +#define SA1111_IRQ_NR (55) + +extern void sa1110_mb_enable(void); +extern void sa1110_mb_disable(void); + +/* + * We keep the following data for the overall SA1111. Note that the + * struct device and struct resource are "fake"; they should be supplied + * by the bus above us. However, in the interests of getting all SA1111 + * drivers converted over to the device model, we provide this as an + * anchor point for all the other drivers. + */ +struct sa1111 { + struct device *dev; + struct clk *clk; + unsigned long phys; + int irq; + int irq_base; /* base for cascaded on-chip IRQs */ + spinlock_t lock; + void __iomem *base; + struct sa1111_platform_data *pdata; +#ifdef CONFIG_PM + void *saved_state; +#endif +}; + +/* + * We _really_ need to eliminate this. Its only users + * are the PWM and DMA checking code. + */ +static struct sa1111 *g_sa1111; + +struct sa1111_dev_info { + unsigned long offset; + unsigned long skpcr_mask; + bool dma; + unsigned int devid; + unsigned int irq[6]; +}; + +static struct sa1111_dev_info sa1111_devices[] = { + { + .offset = SA1111_USB, + .skpcr_mask = SKPCR_UCLKEN, + .dma = true, + .devid = SA1111_DEVID_USB, + .irq = { + IRQ_USBPWR, + IRQ_HCIM, + IRQ_HCIBUFFACC, + IRQ_HCIRMTWKP, + IRQ_NHCIMFCIR, + IRQ_USB_PORT_RESUME + }, + }, + { + .offset = 0x0600, + .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, + .dma = true, + .devid = SA1111_DEVID_SAC, + .irq = { + AUDXMTDMADONEA, + AUDXMTDMADONEB, + AUDRCVDMADONEA, + AUDRCVDMADONEB + }, + }, + { + .offset = 0x0800, + .skpcr_mask = SKPCR_SCLKEN, + .devid = SA1111_DEVID_SSP, + }, + { + .offset = SA1111_KBD, + .skpcr_mask = SKPCR_PTCLKEN, + .devid = SA1111_DEVID_PS2_KBD, + .irq = { + IRQ_TPRXINT, + IRQ_TPTXINT + }, + }, + { + .offset = SA1111_MSE, + .skpcr_mask = SKPCR_PMCLKEN, + .devid = SA1111_DEVID_PS2_MSE, + .irq = { + IRQ_MSRXINT, + IRQ_MSTXINT + }, + }, + { + .offset = 0x1800, + .skpcr_mask = 0, + .devid = SA1111_DEVID_PCMCIA, + .irq = { + IRQ_S0_READY_NINT, + IRQ_S0_CD_VALID, + IRQ_S0_BVD1_STSCHG, + IRQ_S1_READY_NINT, + IRQ_S1_CD_VALID, + IRQ_S1_BVD1_STSCHG, + }, + }, +}; + +/* + * SA1111 interrupt support. Since clearing an IRQ while there are + * active IRQs causes the interrupt output to pulse, the upper levels + * will call us again if there are more interrupts to process. + */ +static void +sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + unsigned int stat0, stat1, i; + struct sa1111 *sachip = irq_get_handler_data(irq); + void __iomem *mapbase = sachip->base + SA1111_INTC; + + stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); + stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1); + + sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0); + + desc->irq_data.chip->irq_ack(&desc->irq_data); + + sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); + + if (stat0 == 0 && stat1 == 0) { + do_bad_IRQ(irq, desc); + return; + } + + for (i = 0; stat0; i++, stat0 >>= 1) + if (stat0 & 1) + generic_handle_irq(i + sachip->irq_base); + + for (i = 32; stat1; i++, stat1 >>= 1) + if (stat1 & 1) + generic_handle_irq(i + sachip->irq_base); + + /* For level-based interrupts */ + desc->irq_data.chip->irq_unmask(&desc->irq_data); +} + +#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base)) +#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32)) + +static void sa1111_ack_irq(struct irq_data *d) +{ +} + +static void sa1111_mask_lowirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned long ie0; + + ie0 = sa1111_readl(mapbase + SA1111_INTEN0); + ie0 &= ~SA1111_IRQMASK_LO(d->irq); + writel(ie0, mapbase + SA1111_INTEN0); +} + +static void sa1111_unmask_lowirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned long ie0; + + ie0 = sa1111_readl(mapbase + SA1111_INTEN0); + ie0 |= SA1111_IRQMASK_LO(d->irq); + sa1111_writel(ie0, mapbase + SA1111_INTEN0); +} + +/* + * Attempt to re-trigger the interrupt. The SA1111 contains a register + * (INTSET) which claims to do this. However, in practice no amount of + * manipulation of INTEN and INTSET guarantees that the interrupt will + * be triggered. In fact, its very difficult, if not impossible to get + * INTSET to re-trigger the interrupt. + */ +static int sa1111_retrigger_lowirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_LO(d->irq); + unsigned long ip0; + int i; + + ip0 = sa1111_readl(mapbase + SA1111_INTPOL0); + for (i = 0; i < 8; i++) { + sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0); + sa1111_writel(ip0, mapbase + SA1111_INTPOL0); + if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask) + break; + } + + if (i == 8) + printk(KERN_ERR "Danger Will Robinson: failed to " + "re-trigger IRQ%d\n", d->irq); + return i == 8 ? -1 : 0; +} + +static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_LO(d->irq); + unsigned long ip0; + + if (flags == IRQ_TYPE_PROBE) + return 0; + + if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) + return -EINVAL; + + ip0 = sa1111_readl(mapbase + SA1111_INTPOL0); + if (flags & IRQ_TYPE_EDGE_RISING) + ip0 &= ~mask; + else + ip0 |= mask; + sa1111_writel(ip0, mapbase + SA1111_INTPOL0); + sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0); + + return 0; +} + +static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_LO(d->irq); + unsigned long we0; + + we0 = sa1111_readl(mapbase + SA1111_WAKEEN0); + if (on) + we0 |= mask; + else + we0 &= ~mask; + sa1111_writel(we0, mapbase + SA1111_WAKEEN0); + + return 0; +} + +static struct irq_chip sa1111_low_chip = { + .name = "SA1111-l", + .irq_ack = sa1111_ack_irq, + .irq_mask = sa1111_mask_lowirq, + .irq_unmask = sa1111_unmask_lowirq, + .irq_retrigger = sa1111_retrigger_lowirq, + .irq_set_type = sa1111_type_lowirq, + .irq_set_wake = sa1111_wake_lowirq, +}; + +static void sa1111_mask_highirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned long ie1; + + ie1 = sa1111_readl(mapbase + SA1111_INTEN1); + ie1 &= ~SA1111_IRQMASK_HI(d->irq); + sa1111_writel(ie1, mapbase + SA1111_INTEN1); +} + +static void sa1111_unmask_highirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned long ie1; + + ie1 = sa1111_readl(mapbase + SA1111_INTEN1); + ie1 |= SA1111_IRQMASK_HI(d->irq); + sa1111_writel(ie1, mapbase + SA1111_INTEN1); +} + +/* + * Attempt to re-trigger the interrupt. The SA1111 contains a register + * (INTSET) which claims to do this. However, in practice no amount of + * manipulation of INTEN and INTSET guarantees that the interrupt will + * be triggered. In fact, its very difficult, if not impossible to get + * INTSET to re-trigger the interrupt. + */ +static int sa1111_retrigger_highirq(struct irq_data *d) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_HI(d->irq); + unsigned long ip1; + int i; + + ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); + for (i = 0; i < 8; i++) { + sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1); + sa1111_writel(ip1, mapbase + SA1111_INTPOL1); + if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask) + break; + } + + if (i == 8) + printk(KERN_ERR "Danger Will Robinson: failed to " + "re-trigger IRQ%d\n", d->irq); + return i == 8 ? -1 : 0; +} + +static int sa1111_type_highirq(struct irq_data *d, unsigned int flags) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_HI(d->irq); + unsigned long ip1; + + if (flags == IRQ_TYPE_PROBE) + return 0; + + if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) + return -EINVAL; + + ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); + if (flags & IRQ_TYPE_EDGE_RISING) + ip1 &= ~mask; + else + ip1 |= mask; + sa1111_writel(ip1, mapbase + SA1111_INTPOL1); + sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1); + + return 0; +} + +static int sa1111_wake_highirq(struct irq_data *d, unsigned int on) +{ + struct sa1111 *sachip = irq_data_get_irq_chip_data(d); + void __iomem *mapbase = sachip->base + SA1111_INTC; + unsigned int mask = SA1111_IRQMASK_HI(d->irq); + unsigned long we1; + + we1 = sa1111_readl(mapbase + SA1111_WAKEEN1); + if (on) + we1 |= mask; + else + we1 &= ~mask; + sa1111_writel(we1, mapbase + SA1111_WAKEEN1); + + return 0; +} + +static struct irq_chip sa1111_high_chip = { + .name = "SA1111-h", + .irq_ack = sa1111_ack_irq, + .irq_mask = sa1111_mask_highirq, + .irq_unmask = sa1111_unmask_highirq, + .irq_retrigger = sa1111_retrigger_highirq, + .irq_set_type = sa1111_type_highirq, + .irq_set_wake = sa1111_wake_highirq, +}; + +static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) +{ + void __iomem *irqbase = sachip->base + SA1111_INTC; + unsigned i, irq; + int ret; + + /* + * We're guaranteed that this region hasn't been taken. + */ + request_mem_region(sachip->phys + SA1111_INTC, 512, "irq"); + + ret = irq_alloc_descs(-1, irq_base, SA1111_IRQ_NR, -1); + if (ret <= 0) { + dev_err(sachip->dev, "unable to allocate %u irqs: %d\n", + SA1111_IRQ_NR, ret); + if (ret == 0) + ret = -EINVAL; + return ret; + } + + sachip->irq_base = ret; + + /* disable all IRQs */ + sa1111_writel(0, irqbase + SA1111_INTEN0); + sa1111_writel(0, irqbase + SA1111_INTEN1); + sa1111_writel(0, irqbase + SA1111_WAKEEN0); + sa1111_writel(0, irqbase + SA1111_WAKEEN1); + + /* + * detect on rising edge. Note: Feb 2001 Errata for SA1111 + * specifies that S0ReadyInt and S1ReadyInt should be '1'. + */ + sa1111_writel(0, irqbase + SA1111_INTPOL0); + sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) | + SA1111_IRQMASK_HI(IRQ_S1_READY_NINT), + irqbase + SA1111_INTPOL1); + + /* clear all IRQs */ + sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0); + sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); + + for (i = IRQ_GPAIN0; i <= SSPROR; i++) { + irq = sachip->irq_base + i; + irq_set_chip_and_handler(irq, &sa1111_low_chip, + handle_edge_irq); + irq_set_chip_data(irq, sachip); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + + for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) { + irq = sachip->irq_base + i; + irq_set_chip_and_handler(irq, &sa1111_high_chip, + handle_edge_irq); + irq_set_chip_data(irq, sachip); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + + /* + * Register SA1111 interrupt + */ + irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); + irq_set_handler_data(sachip->irq, sachip); + irq_set_chained_handler(sachip->irq, sa1111_irq_handler); + + dev_info(sachip->dev, "Providing IRQ%u-%u\n", + sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1); + + return 0; +} + +/* + * Bring the SA1111 out of reset. This requires a set procedure: + * 1. nRESET asserted (by hardware) + * 2. CLK turned on from SA1110 + * 3. nRESET deasserted + * 4. VCO turned on, PLL_BYPASS turned off + * 5. Wait lock time, then assert RCLKEn + * 7. PCR set to allow clocking of individual functions + * + * Until we've done this, the only registers we can access are: + * SBI_SKCR + * SBI_SMCR + * SBI_SKID + */ +static void sa1111_wake(struct sa1111 *sachip) +{ + unsigned long flags, r; + + spin_lock_irqsave(&sachip->lock, flags); + + clk_enable(sachip->clk); + + /* + * Turn VCO on, and disable PLL Bypass. + */ + r = sa1111_readl(sachip->base + SA1111_SKCR); + r &= ~SKCR_VCO_OFF; + sa1111_writel(r, sachip->base + SA1111_SKCR); + r |= SKCR_PLL_BYPASS | SKCR_OE_EN; + sa1111_writel(r, sachip->base + SA1111_SKCR); + + /* + * Wait lock time. SA1111 manual _doesn't_ + * specify a figure for this! We choose 100us. + */ + udelay(100); + + /* + * Enable RCLK. We also ensure that RDYEN is set. + */ + r |= SKCR_RCLKEN | SKCR_RDYEN; + sa1111_writel(r, sachip->base + SA1111_SKCR); + + /* + * Wait 14 RCLK cycles for the chip to finish coming out + * of reset. (RCLK=24MHz). This is 590ns. + */ + udelay(1); + + /* + * Ensure all clocks are initially off. + */ + sa1111_writel(0, sachip->base + SA1111_SKPCR); + + spin_unlock_irqrestore(&sachip->lock, flags); +} + +#ifdef CONFIG_ARCH_SA1100 + +static u32 sa1111_dma_mask[] = { + ~0, + ~(1 << 20), + ~(1 << 23), + ~(1 << 24), + ~(1 << 25), + ~(1 << 20), + ~(1 << 20), + 0, +}; + +/* + * Configure the SA1111 shared memory controller. + */ +void +sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, + unsigned int cas_latency) +{ + unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC); + + if (cas_latency == 3) + smcr |= SMCR_CLAT; + + sa1111_writel(smcr, sachip->base + SA1111_SMCR); + + /* + * Now clear the bits in the DMA mask to work around the SA1111 + * DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion + * Chip Specification Update, June 2000, Erratum #7). + */ + if (sachip->dev->dma_mask) + *sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2]; + + sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; +} +#endif + +static void sa1111_dev_release(struct device *_dev) +{ + struct sa1111_dev *dev = SA1111_DEV(_dev); + + kfree(dev); +} + +static int +sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, + struct sa1111_dev_info *info) +{ + struct sa1111_dev *dev; + unsigned i; + int ret; + + dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto err_alloc; + } + + device_initialize(&dev->dev); + dev_set_name(&dev->dev, "%4.4lx", info->offset); + dev->devid = info->devid; + dev->dev.parent = sachip->dev; + dev->dev.bus = &sa1111_bus_type; + dev->dev.release = sa1111_dev_release; + dev->res.start = sachip->phys + info->offset; + dev->res.end = dev->res.start + 511; + dev->res.name = dev_name(&dev->dev); + dev->res.flags = IORESOURCE_MEM; + dev->mapbase = sachip->base + info->offset; + dev->skpcr_mask = info->skpcr_mask; + + for (i = 0; i < ARRAY_SIZE(info->irq); i++) + dev->irq[i] = sachip->irq_base + info->irq[i]; + + /* + * If the parent device has a DMA mask associated with it, and + * this child supports DMA, propagate it down to the children. + */ + if (info->dma && sachip->dev->dma_mask) { + dev->dma_mask = *sachip->dev->dma_mask; + dev->dev.dma_mask = &dev->dma_mask; + dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask; + } + + ret = request_resource(parent, &dev->res); + if (ret) { + dev_err(sachip->dev, "failed to allocate resource for %s\n", + dev->res.name); + goto err_resource; + } + + ret = device_add(&dev->dev); + if (ret) + goto err_add; + return 0; + + err_add: + release_resource(&dev->res); + err_resource: + put_device(&dev->dev); + err_alloc: + return ret; +} + +/** + * sa1111_probe - probe for a single SA1111 chip. + * @phys_addr: physical address of device. + * + * Probe for a SA1111 chip. This must be called + * before any other SA1111-specific code. + * + * Returns: + * %-ENODEV device not found. + * %-EBUSY physical address already marked in-use. + * %-EINVAL no platform data passed + * %0 successful. + */ +static int __devinit +__sa1111_probe(struct device *me, struct resource *mem, int irq) +{ + struct sa1111_platform_data *pd = me->platform_data; + struct sa1111 *sachip; + unsigned long id; + unsigned int has_devs; + int i, ret = -ENODEV; + + if (!pd) + return -EINVAL; + + sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL); + if (!sachip) + return -ENOMEM; + + sachip->clk = clk_get(me, "SA1111_CLK"); + if (IS_ERR(sachip->clk)) { + ret = PTR_ERR(sachip->clk); + goto err_free; + } + + ret = clk_prepare(sachip->clk); + if (ret) + goto err_clkput; + + spin_lock_init(&sachip->lock); + + sachip->dev = me; + dev_set_drvdata(sachip->dev, sachip); + + sachip->pdata = pd; + sachip->phys = mem->start; + sachip->irq = irq; + + /* + * Map the whole region. This also maps the + * registers for our children. + */ + sachip->base = ioremap(mem->start, PAGE_SIZE * 2); + if (!sachip->base) { + ret = -ENOMEM; + goto err_clk_unprep; + } + + /* + * Probe for the chip. Only touch the SBI registers. + */ + id = sa1111_readl(sachip->base + SA1111_SKID); + if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { + printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id); + ret = -ENODEV; + goto err_unmap; + } + + printk(KERN_INFO "SA1111 Microprocessor Companion Chip: " + "silicon revision %lx, metal revision %lx\n", + (id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK)); + + /* + * We found it. Wake the chip up, and initialise. + */ + sa1111_wake(sachip); + + /* + * The interrupt controller must be initialised before any + * other device to ensure that the interrupts are available. + */ + if (sachip->irq != NO_IRQ) { + ret = sa1111_setup_irq(sachip, pd->irq_base); + if (ret) + goto err_unmap; + } + +#ifdef CONFIG_ARCH_SA1100 + { + unsigned int val; + + /* + * The SDRAM configuration of the SA1110 and the SA1111 must + * match. This is very important to ensure that SA1111 accesses + * don't corrupt the SDRAM. Note that this ungates the SA1111's + * MBGNT signal, so we must have called sa1110_mb_disable() + * beforehand. + */ + sa1111_configure_smc(sachip, 1, + FExtr(MDCNFG, MDCNFG_SA1110_DRAC0), + FExtr(MDCNFG, MDCNFG_SA1110_TDL0)); + + /* + * We only need to turn on DCLK whenever we want to use the + * DMA. It can otherwise be held firmly in the off position. + * (currently, we always enable it.) + */ + val = sa1111_readl(sachip->base + SA1111_SKPCR); + sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR); + + /* + * Enable the SA1110 memory bus request and grant signals. + */ + sa1110_mb_enable(); + } +#endif + + g_sa1111 = sachip; + + has_devs = ~0; + if (pd) + has_devs &= ~pd->disable_devs; + + for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++) + if (sa1111_devices[i].devid & has_devs) + sa1111_init_one_child(sachip, mem, &sa1111_devices[i]); + + return 0; + + err_unmap: + iounmap(sachip->base); + err_clk_unprep: + clk_unprepare(sachip->clk); + err_clkput: + clk_put(sachip->clk); + err_free: + kfree(sachip); + return ret; +} + +static int sa1111_remove_one(struct device *dev, void *data) +{ + struct sa1111_dev *sadev = SA1111_DEV(dev); + device_del(&sadev->dev); + release_resource(&sadev->res); + put_device(&sadev->dev); + return 0; +} + +static void __sa1111_remove(struct sa1111 *sachip) +{ + void __iomem *irqbase = sachip->base + SA1111_INTC; + + device_for_each_child(sachip->dev, NULL, sa1111_remove_one); + + /* disable all IRQs */ + sa1111_writel(0, irqbase + SA1111_INTEN0); + sa1111_writel(0, irqbase + SA1111_INTEN1); + sa1111_writel(0, irqbase + SA1111_WAKEEN0); + sa1111_writel(0, irqbase + SA1111_WAKEEN1); + + clk_disable(sachip->clk); + clk_unprepare(sachip->clk); + + if (sachip->irq != NO_IRQ) { + irq_set_chained_handler(sachip->irq, NULL); + irq_set_handler_data(sachip->irq, NULL); + irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); + + release_mem_region(sachip->phys + SA1111_INTC, 512); + } + + iounmap(sachip->base); + clk_put(sachip->clk); + kfree(sachip); +} + +struct sa1111_save_data { + unsigned int skcr; + unsigned int skpcr; + unsigned int skcdr; + unsigned char skaud; + unsigned char skpwm0; + unsigned char skpwm1; + + /* + * Interrupt controller + */ + unsigned int intpol0; + unsigned int intpol1; + unsigned int inten0; + unsigned int inten1; + unsigned int wakepol0; + unsigned int wakepol1; + unsigned int wakeen0; + unsigned int wakeen1; +}; + +#ifdef CONFIG_PM + +static int sa1111_suspend(struct platform_device *dev, pm_message_t state) +{ + struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags; + unsigned int val; + void __iomem *base; + + save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL); + if (!save) + return -ENOMEM; + sachip->saved_state = save; + + spin_lock_irqsave(&sachip->lock, flags); + + /* + * Save state. + */ + base = sachip->base; + save->skcr = sa1111_readl(base + SA1111_SKCR); + save->skpcr = sa1111_readl(base + SA1111_SKPCR); + save->skcdr = sa1111_readl(base + SA1111_SKCDR); + save->skaud = sa1111_readl(base + SA1111_SKAUD); + save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0); + save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1); + + sa1111_writel(0, sachip->base + SA1111_SKPWM0); + sa1111_writel(0, sachip->base + SA1111_SKPWM1); + + base = sachip->base + SA1111_INTC; + save->intpol0 = sa1111_readl(base + SA1111_INTPOL0); + save->intpol1 = sa1111_readl(base + SA1111_INTPOL1); + save->inten0 = sa1111_readl(base + SA1111_INTEN0); + save->inten1 = sa1111_readl(base + SA1111_INTEN1); + save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0); + save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1); + save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0); + save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1); + + /* + * Disable. + */ + val = sa1111_readl(sachip->base + SA1111_SKCR); + sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR); + + clk_disable(sachip->clk); + + spin_unlock_irqrestore(&sachip->lock, flags); + +#ifdef CONFIG_ARCH_SA1100 + sa1110_mb_disable(); +#endif + + return 0; +} + +/* + * sa1111_resume - Restore the SA1111 device state. + * @dev: device to restore + * + * Restore the general state of the SA1111; clock control and + * interrupt controller. Other parts of the SA1111 must be + * restored by their respective drivers, and must be called + * via LDM after this function. + */ +static int sa1111_resume(struct platform_device *dev) +{ + struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags, id; + void __iomem *base; + + save = sachip->saved_state; + if (!save) + return 0; + + /* + * Ensure that the SA1111 is still here. + * FIXME: shouldn't do this here. + */ + id = sa1111_readl(sachip->base + SA1111_SKID); + if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { + __sa1111_remove(sachip); + platform_set_drvdata(dev, NULL); + kfree(save); + return 0; + } + + /* + * First of all, wake up the chip. + */ + sa1111_wake(sachip); + +#ifdef CONFIG_ARCH_SA1100 + /* Enable the memory bus request/grant signals */ + sa1110_mb_enable(); +#endif + + /* + * Only lock for write ops. Also, sa1111_wake must be called with + * released spinlock! + */ + spin_lock_irqsave(&sachip->lock, flags); + + sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0); + sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1); + + base = sachip->base; + sa1111_writel(save->skcr, base + SA1111_SKCR); + sa1111_writel(save->skpcr, base + SA1111_SKPCR); + sa1111_writel(save->skcdr, base + SA1111_SKCDR); + sa1111_writel(save->skaud, base + SA1111_SKAUD); + sa1111_writel(save->skpwm0, base + SA1111_SKPWM0); + sa1111_writel(save->skpwm1, base + SA1111_SKPWM1); + + base = sachip->base + SA1111_INTC; + sa1111_writel(save->intpol0, base + SA1111_INTPOL0); + sa1111_writel(save->intpol1, base + SA1111_INTPOL1); + sa1111_writel(save->inten0, base + SA1111_INTEN0); + sa1111_writel(save->inten1, base + SA1111_INTEN1); + sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0); + sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1); + sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0); + sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1); + + spin_unlock_irqrestore(&sachip->lock, flags); + + sachip->saved_state = NULL; + kfree(save); + + return 0; +} + +#else +#define sa1111_suspend NULL +#define sa1111_resume NULL +#endif + +static int __devinit sa1111_probe(struct platform_device *pdev) +{ + struct resource *mem; + int irq; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENXIO; + + return __sa1111_probe(&pdev->dev, mem, irq); +} + +static int sa1111_remove(struct platform_device *pdev) +{ + struct sa1111 *sachip = platform_get_drvdata(pdev); + + if (sachip) { +#ifdef CONFIG_PM + kfree(sachip->saved_state); + sachip->saved_state = NULL; +#endif + __sa1111_remove(sachip); + platform_set_drvdata(pdev, NULL); + } + + return 0; +} + +/* + * Not sure if this should be on the system bus or not yet. + * We really want some way to register a system device at + * the per-machine level, and then have this driver pick + * up the registered devices. + * + * We also need to handle the SDRAM configuration for + * PXA250/SA1110 machine classes. + */ +static struct platform_driver sa1111_device_driver = { + .probe = sa1111_probe, + .remove = sa1111_remove, + .suspend = sa1111_suspend, + .resume = sa1111_resume, + .driver = { + .name = "sa1111", + .owner = THIS_MODULE, + }, +}; + +/* + * Get the parent device driver (us) structure + * from a child function device + */ +static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev) +{ + return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent); +} + +/* + * The bits in the opdiv field are non-linear. + */ +static unsigned char opdiv_table[] = { 1, 4, 2, 8 }; + +static unsigned int __sa1111_pll_clock(struct sa1111 *sachip) +{ + unsigned int skcdr, fbdiv, ipdiv, opdiv; + + skcdr = sa1111_readl(sachip->base + SA1111_SKCDR); + + fbdiv = (skcdr & 0x007f) + 2; + ipdiv = ((skcdr & 0x0f80) >> 7) + 2; + opdiv = opdiv_table[(skcdr & 0x3000) >> 12]; + + return 3686400 * fbdiv / (ipdiv * opdiv); +} + +/** + * sa1111_pll_clock - return the current PLL clock frequency. + * @sadev: SA1111 function block + * + * BUG: we should look at SKCR. We also blindly believe that + * the chip is being fed with the 3.6864MHz clock. + * + * Returns the PLL clock in Hz. + */ +unsigned int sa1111_pll_clock(struct sa1111_dev *sadev) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + + return __sa1111_pll_clock(sachip); +} +EXPORT_SYMBOL(sa1111_pll_clock); + +/** + * sa1111_select_audio_mode - select I2S or AC link mode + * @sadev: SA1111 function block + * @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S + * + * Frob the SKCR to select AC Link mode or I2S mode for + * the audio block. + */ +void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&sachip->lock, flags); + + val = sa1111_readl(sachip->base + SA1111_SKCR); + if (mode == SA1111_AUDIO_I2S) { + val &= ~SKCR_SELAC; + } else { + val |= SKCR_SELAC; + } + sa1111_writel(val, sachip->base + SA1111_SKCR); + + spin_unlock_irqrestore(&sachip->lock, flags); +} +EXPORT_SYMBOL(sa1111_select_audio_mode); + +/** + * sa1111_set_audio_rate - set the audio sample rate + * @sadev: SA1111 SAC function block + * @rate: sample rate to select + */ +int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned int div; + + if (sadev->devid != SA1111_DEVID_SAC) + return -EINVAL; + + div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate; + if (div == 0) + div = 1; + if (div > 128) + div = 128; + + sa1111_writel(div - 1, sachip->base + SA1111_SKAUD); + + return 0; +} +EXPORT_SYMBOL(sa1111_set_audio_rate); + +/** + * sa1111_get_audio_rate - get the audio sample rate + * @sadev: SA1111 SAC function block device + */ +int sa1111_get_audio_rate(struct sa1111_dev *sadev) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long div; + + if (sadev->devid != SA1111_DEVID_SAC) + return -EINVAL; + + div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1; + + return __sa1111_pll_clock(sachip) / (256 * div); +} +EXPORT_SYMBOL(sa1111_get_audio_rate); + +void sa1111_set_io_dir(struct sa1111_dev *sadev, + unsigned int bits, unsigned int dir, + unsigned int sleep_dir) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + void __iomem *gpio = sachip->base + SA1111_GPIO; + +#define MODIFY_BITS(port, mask, dir) \ + if (mask) { \ + val = sa1111_readl(port); \ + val &= ~(mask); \ + val |= (dir) & (mask); \ + sa1111_writel(val, port); \ + } + + spin_lock_irqsave(&sachip->lock, flags); + MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir); + MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8); + MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16); + + MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir); + MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8); + MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16); + spin_unlock_irqrestore(&sachip->lock, flags); +} +EXPORT_SYMBOL(sa1111_set_io_dir); + +void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + void __iomem *gpio = sachip->base + SA1111_GPIO; + + spin_lock_irqsave(&sachip->lock, flags); + MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v); + MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8); + MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16); + spin_unlock_irqrestore(&sachip->lock, flags); +} +EXPORT_SYMBOL(sa1111_set_io); + +void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + void __iomem *gpio = sachip->base + SA1111_GPIO; + + spin_lock_irqsave(&sachip->lock, flags); + MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v); + MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8); + MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16); + spin_unlock_irqrestore(&sachip->lock, flags); +} +EXPORT_SYMBOL(sa1111_set_sleep_io); + +/* + * Individual device operations. + */ + +/** + * sa1111_enable_device - enable an on-chip SA1111 function block + * @sadev: SA1111 function block device to enable + */ +int sa1111_enable_device(struct sa1111_dev *sadev) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + int ret = 0; + + if (sachip->pdata && sachip->pdata->enable) + ret = sachip->pdata->enable(sachip->pdata->data, sadev->devid); + + if (ret == 0) { + spin_lock_irqsave(&sachip->lock, flags); + val = sa1111_readl(sachip->base + SA1111_SKPCR); + sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); + spin_unlock_irqrestore(&sachip->lock, flags); + } + return ret; +} +EXPORT_SYMBOL(sa1111_enable_device); + +/** + * sa1111_disable_device - disable an on-chip SA1111 function block + * @sadev: SA1111 function block device to disable + */ +void sa1111_disable_device(struct sa1111_dev *sadev) +{ + struct sa1111 *sachip = sa1111_chip_driver(sadev); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&sachip->lock, flags); + val = sa1111_readl(sachip->base + SA1111_SKPCR); + sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); + spin_unlock_irqrestore(&sachip->lock, flags); + + if (sachip->pdata && sachip->pdata->disable) + sachip->pdata->disable(sachip->pdata->data, sadev->devid); +} +EXPORT_SYMBOL(sa1111_disable_device); + +/* + * SA1111 "Register Access Bus." + * + * We model this as a regular bus type, and hang devices directly + * off this. + */ +static int sa1111_match(struct device *_dev, struct device_driver *_drv) +{ + struct sa1111_dev *dev = SA1111_DEV(_dev); + struct sa1111_driver *drv = SA1111_DRV(_drv); + + return dev->devid & drv->devid; +} + +static int sa1111_bus_suspend(struct device *dev, pm_message_t state) +{ + struct sa1111_dev *sadev = SA1111_DEV(dev); + struct sa1111_driver *drv = SA1111_DRV(dev->driver); + int ret = 0; + + if (drv && drv->suspend) + ret = drv->suspend(sadev, state); + return ret; +} + +static int sa1111_bus_resume(struct device *dev) +{ + struct sa1111_dev *sadev = SA1111_DEV(dev); + struct sa1111_driver *drv = SA1111_DRV(dev->driver); + int ret = 0; + + if (drv && drv->resume) + ret = drv->resume(sadev); + return ret; +} + +static void sa1111_bus_shutdown(struct device *dev) +{ + struct sa1111_driver *drv = SA1111_DRV(dev->driver); + + if (drv && drv->shutdown) + drv->shutdown(SA1111_DEV(dev)); +} + +static int sa1111_bus_probe(struct device *dev) +{ + struct sa1111_dev *sadev = SA1111_DEV(dev); + struct sa1111_driver *drv = SA1111_DRV(dev->driver); + int ret = -ENODEV; + + if (drv->probe) + ret = drv->probe(sadev); + return ret; +} + +static int sa1111_bus_remove(struct device *dev) +{ + struct sa1111_dev *sadev = SA1111_DEV(dev); + struct sa1111_driver *drv = SA1111_DRV(dev->driver); + int ret = 0; + + if (drv->remove) + ret = drv->remove(sadev); + return ret; +} + +struct bus_type sa1111_bus_type = { + .name = "sa1111-rab", + .match = sa1111_match, + .probe = sa1111_bus_probe, + .remove = sa1111_bus_remove, + .suspend = sa1111_bus_suspend, + .resume = sa1111_bus_resume, + .shutdown = sa1111_bus_shutdown, +}; +EXPORT_SYMBOL(sa1111_bus_type); + +int sa1111_driver_register(struct sa1111_driver *driver) +{ + driver->drv.bus = &sa1111_bus_type; + return driver_register(&driver->drv); +} +EXPORT_SYMBOL(sa1111_driver_register); + +void sa1111_driver_unregister(struct sa1111_driver *driver) +{ + driver_unregister(&driver->drv); +} +EXPORT_SYMBOL(sa1111_driver_unregister); + +#ifdef CONFIG_DMABOUNCE +/* + * According to the "Intel StrongARM SA-1111 Microprocessor Companion + * Chip Specification Update" (June 2000), erratum #7, there is a + * significant bug in the SA1111 SDRAM shared memory controller. If + * an access to a region of memory above 1MB relative to the bank base, + * it is important that address bit 10 _NOT_ be asserted. Depending + * on the configuration of the RAM, bit 10 may correspond to one + * of several different (processor-relative) address bits. + * + * This routine only identifies whether or not a given DMA address + * is susceptible to the bug. + * + * This should only get called for sa1111_device types due to the + * way we configure our device dma_masks. + */ +static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) +{ + /* + * Section 4.6 of the "Intel StrongARM SA-1111 Development Module + * User's Guide" mentions that jumpers R51 and R52 control the + * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or + * SDRAM bank 1 on Neponset). The default configuration selects + * Assabet, so any address in bank 1 is necessarily invalid. + */ + return (machine_is_assabet() || machine_is_pfs168()) && + (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); +} + +static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, + void *data) +{ + struct sa1111_dev *dev = SA1111_DEV(data); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) { + int ret = dmabounce_register_dev(&dev->dev, 1024, 4096, + sa1111_needs_bounce); + if (ret) + dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret); + } + break; + + case BUS_NOTIFY_DEL_DEVICE: + if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) + dmabounce_unregister_dev(&dev->dev); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block sa1111_bus_notifier = { + .notifier_call = sa1111_notifier_call, +}; +#endif + +static int __init sa1111_init(void) +{ + int ret = bus_register(&sa1111_bus_type); +#ifdef CONFIG_DMABOUNCE + if (ret == 0) + bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier); +#endif + if (ret == 0) + platform_driver_register(&sa1111_device_driver); + return ret; +} + +static void __exit sa1111_exit(void) +{ + platform_driver_unregister(&sa1111_device_driver); +#ifdef CONFIG_DMABOUNCE + bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier); +#endif + bus_unregister(&sa1111_bus_type); +} + +subsys_initcall(sa1111_init); +module_exit(sa1111_exit); + +MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); +MODULE_LICENSE("GPL"); diff --git a/ANDROID_3.4.5/arch/arm/common/scoop.c b/ANDROID_3.4.5/arch/arm/common/scoop.c new file mode 100644 index 00000000..0c616d5f --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/scoop.c @@ -0,0 +1,284 @@ +/* + * Support code for the SCOOP interface found on various Sharp PDAs + * + * Copyright (c) 2004 Richard Purdie + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/device.h> +#include <linux/gpio.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/export.h> +#include <linux/io.h> +#include <asm/hardware/scoop.h> + +/* PCMCIA to Scoop linkage + + There is no easy way to link multiple scoop devices into one + single entity for the pxa2xx_pcmcia device so this structure + is used which is setup by the platform code. + + This file is never modular so this symbol is always + accessile to the board support files. +*/ +struct scoop_pcmcia_config *platform_scoop_config; +EXPORT_SYMBOL(platform_scoop_config); + +struct scoop_dev { + void __iomem *base; + struct gpio_chip gpio; + spinlock_t scoop_lock; + unsigned short suspend_clr; + unsigned short suspend_set; + u32 scoop_gpwr; +}; + +void reset_scoop(struct device *dev) +{ + struct scoop_dev *sdev = dev_get_drvdata(dev); + + iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */ + iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */ + iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */ + iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */ + iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */ + iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */ + iowrite16(0x0000, sdev->base + SCOOP_IRM); +} + +static void __scoop_gpio_set(struct scoop_dev *sdev, + unsigned offset, int value) +{ + unsigned short gpwr; + + gpwr = ioread16(sdev->base + SCOOP_GPWR); + if (value) + gpwr |= 1 << (offset + 1); + else + gpwr &= ~(1 << (offset + 1)); + iowrite16(gpwr, sdev->base + SCOOP_GPWR); +} + +static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); + unsigned long flags; + + spin_lock_irqsave(&sdev->scoop_lock, flags); + + __scoop_gpio_set(sdev, offset, value); + + spin_unlock_irqrestore(&sdev->scoop_lock, flags); +} + +static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); + + /* XXX: I'm unsure, but it seems so */ + return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1)); +} + +static int scoop_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); + unsigned long flags; + unsigned short gpcr; + + spin_lock_irqsave(&sdev->scoop_lock, flags); + + gpcr = ioread16(sdev->base + SCOOP_GPCR); + gpcr &= ~(1 << (offset + 1)); + iowrite16(gpcr, sdev->base + SCOOP_GPCR); + + spin_unlock_irqrestore(&sdev->scoop_lock, flags); + + return 0; +} + +static int scoop_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); + unsigned long flags; + unsigned short gpcr; + + spin_lock_irqsave(&sdev->scoop_lock, flags); + + __scoop_gpio_set(sdev, offset, value); + + gpcr = ioread16(sdev->base + SCOOP_GPCR); + gpcr |= 1 << (offset + 1); + iowrite16(gpcr, sdev->base + SCOOP_GPCR); + + spin_unlock_irqrestore(&sdev->scoop_lock, flags); + + return 0; +} + +unsigned short read_scoop_reg(struct device *dev, unsigned short reg) +{ + struct scoop_dev *sdev = dev_get_drvdata(dev); + return ioread16(sdev->base + reg); +} + +void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data) +{ + struct scoop_dev *sdev = dev_get_drvdata(dev); + iowrite16(data, sdev->base + reg); +} + +EXPORT_SYMBOL(reset_scoop); +EXPORT_SYMBOL(read_scoop_reg); +EXPORT_SYMBOL(write_scoop_reg); + +#ifdef CONFIG_PM +static void check_scoop_reg(struct scoop_dev *sdev) +{ + unsigned short mcr; + + mcr = ioread16(sdev->base + SCOOP_MCR); + if ((mcr & 0x100) == 0) + iowrite16(0x0101, sdev->base + SCOOP_MCR); +} + +static int scoop_suspend(struct platform_device *dev, pm_message_t state) +{ + struct scoop_dev *sdev = platform_get_drvdata(dev); + + check_scoop_reg(sdev); + sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR); + iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR); + + return 0; +} + +static int scoop_resume(struct platform_device *dev) +{ + struct scoop_dev *sdev = platform_get_drvdata(dev); + + check_scoop_reg(sdev); + iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR); + + return 0; +} +#else +#define scoop_suspend NULL +#define scoop_resume NULL +#endif + +static int __devinit scoop_probe(struct platform_device *pdev) +{ + struct scoop_dev *devptr; + struct scoop_config *inf; + struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int ret; + int temp; + + if (!mem) + return -EINVAL; + + devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL); + if (!devptr) + return -ENOMEM; + + spin_lock_init(&devptr->scoop_lock); + + inf = pdev->dev.platform_data; + devptr->base = ioremap(mem->start, resource_size(mem)); + + if (!devptr->base) { + ret = -ENOMEM; + goto err_ioremap; + } + + platform_set_drvdata(pdev, devptr); + + printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base); + + iowrite16(0x0140, devptr->base + SCOOP_MCR); + reset_scoop(&pdev->dev); + iowrite16(0x0000, devptr->base + SCOOP_CPR); + iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR); + iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR); + + devptr->suspend_clr = inf->suspend_clr; + devptr->suspend_set = inf->suspend_set; + + devptr->gpio.base = -1; + + if (inf->gpio_base != 0) { + devptr->gpio.label = dev_name(&pdev->dev); + devptr->gpio.base = inf->gpio_base; + devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */ + devptr->gpio.set = scoop_gpio_set; + devptr->gpio.get = scoop_gpio_get; + devptr->gpio.direction_input = scoop_gpio_direction_input; + devptr->gpio.direction_output = scoop_gpio_direction_output; + + ret = gpiochip_add(&devptr->gpio); + if (ret) + goto err_gpio; + } + + return 0; + + if (devptr->gpio.base != -1) + temp = gpiochip_remove(&devptr->gpio); +err_gpio: + platform_set_drvdata(pdev, NULL); +err_ioremap: + iounmap(devptr->base); + kfree(devptr); + + return ret; +} + +static int __devexit scoop_remove(struct platform_device *pdev) +{ + struct scoop_dev *sdev = platform_get_drvdata(pdev); + int ret; + + if (!sdev) + return -EINVAL; + + if (sdev->gpio.base != -1) { + ret = gpiochip_remove(&sdev->gpio); + if (ret) { + dev_err(&pdev->dev, "Can't remove gpio chip: %d\n", ret); + return ret; + } + } + + platform_set_drvdata(pdev, NULL); + iounmap(sdev->base); + kfree(sdev); + + return 0; +} + +static struct platform_driver scoop_driver = { + .probe = scoop_probe, + .remove = __devexit_p(scoop_remove), + .suspend = scoop_suspend, + .resume = scoop_resume, + .driver = { + .name = "sharp-scoop", + }, +}; + +static int __init scoop_init(void) +{ + return platform_driver_register(&scoop_driver); +} + +subsys_initcall(scoop_init); diff --git a/ANDROID_3.4.5/arch/arm/common/sharpsl_param.c b/ANDROID_3.4.5/arch/arm/common/sharpsl_param.c new file mode 100644 index 00000000..d56c9325 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/sharpsl_param.c @@ -0,0 +1,62 @@ +/* + * Hardware parameter area specific to Sharp SL series devices + * + * Copyright (c) 2005 Richard Purdie + * + * Based on Sharp's 2.4 kernel patches + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <asm/mach/sharpsl_param.h> + +/* + * Certain hardware parameters determined at the time of device manufacture, + * typically including LCD parameters are loaded by the bootloader at the + * address PARAM_BASE. As the kernel will overwrite them, we need to store + * them early in the boot process, then pass them to the appropriate drivers. + * Not all devices use all parameters but the format is common to all. + */ +#ifdef CONFIG_ARCH_SA1100 +#define PARAM_BASE 0xe8ffc000 +#else +#define PARAM_BASE 0xa0000a00 +#endif +#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) + +#define COMADJ_MAGIC MAGIC_CHG('C','M','A','D') +#define UUID_MAGIC MAGIC_CHG('U','U','I','D') +#define TOUCH_MAGIC MAGIC_CHG('T','U','C','H') +#define AD_MAGIC MAGIC_CHG('B','V','A','D') +#define PHAD_MAGIC MAGIC_CHG('P','H','A','D') + +struct sharpsl_param_info sharpsl_param; +EXPORT_SYMBOL(sharpsl_param); + +void sharpsl_save_param(void) +{ + memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); + + if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) + sharpsl_param.comadj=-1; + + if (sharpsl_param.phad_keyword != PHAD_MAGIC) + sharpsl_param.phadadj=-1; + + if (sharpsl_param.uuid_keyword != UUID_MAGIC) + sharpsl_param.uuid[0]=-1; + + if (sharpsl_param.touch_keyword != TOUCH_MAGIC) + sharpsl_param.touch_xp=-1; + + if (sharpsl_param.adadj_keyword != AD_MAGIC) + sharpsl_param.adadj=-1; +} + + diff --git a/ANDROID_3.4.5/arch/arm/common/timer-sp.c b/ANDROID_3.4.5/arch/arm/common/timer-sp.c new file mode 100644 index 00000000..df13a3ff --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/timer-sp.c @@ -0,0 +1,191 @@ +/* + * linux/arch/arm/common/timer-sp.c + * + * Copyright (C) 1999 - 2003 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <linux/clk.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/sched_clock.h> +#include <asm/hardware/arm_timer.h> + +static long __init sp804_get_clock_rate(const char *name) +{ + struct clk *clk; + long rate; + int err; + + clk = clk_get_sys("sp804", name); + if (IS_ERR(clk)) { + pr_err("sp804: %s clock not found: %d\n", name, + (int)PTR_ERR(clk)); + return PTR_ERR(clk); + } + + err = clk_prepare(clk); + if (err) { + pr_err("sp804: %s clock failed to prepare: %d\n", name, err); + clk_put(clk); + return err; + } + + err = clk_enable(clk); + if (err) { + pr_err("sp804: %s clock failed to enable: %d\n", name, err); + clk_unprepare(clk); + clk_put(clk); + return err; + } + + rate = clk_get_rate(clk); + if (rate < 0) { + pr_err("sp804: %s clock failed to get rate: %ld\n", name, rate); + clk_disable(clk); + clk_unprepare(clk); + clk_put(clk); + } + + return rate; +} + +static void __iomem *sched_clock_base; + +static u32 sp804_read(void) +{ + return ~readl_relaxed(sched_clock_base + TIMER_VALUE); +} + +void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name, + int use_sched_clock) +{ + long rate = sp804_get_clock_rate(name); + + if (rate < 0) + return; + + /* setup timer 0 as free-running clocksource */ + writel(0, base + TIMER_CTRL); + writel(0xffffffff, base + TIMER_LOAD); + writel(0xffffffff, base + TIMER_VALUE); + writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, + base + TIMER_CTRL); + + clocksource_mmio_init(base + TIMER_VALUE, name, + rate, 200, 32, clocksource_mmio_readl_down); + + if (use_sched_clock) { + sched_clock_base = base; + setup_sched_clock(sp804_read, 32, rate); + } +} + + +static void __iomem *clkevt_base; +static unsigned long clkevt_reload; + +/* + * IRQ handler for the timer + */ +static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + /* clear the interrupt */ + writel(1, clkevt_base + TIMER_INTCLR); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static void sp804_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE; + + writel(ctrl, clkevt_base + TIMER_CTRL); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + writel(clkevt_reload, clkevt_base + TIMER_LOAD); + ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; + break; + + case CLOCK_EVT_MODE_ONESHOT: + /* period set, and timer enabled in 'next_event' hook */ + ctrl |= TIMER_CTRL_ONESHOT; + break; + + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + break; + } + + writel(ctrl, clkevt_base + TIMER_CTRL); +} + +static int sp804_set_next_event(unsigned long next, + struct clock_event_device *evt) +{ + unsigned long ctrl = readl(clkevt_base + TIMER_CTRL); + + writel(next, clkevt_base + TIMER_LOAD); + writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL); + + return 0; +} + +static struct clock_event_device sp804_clockevent = { + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = sp804_set_mode, + .set_next_event = sp804_set_next_event, + .rating = 300, + .cpumask = cpu_all_mask, +}; + +static struct irqaction sp804_timer_irq = { + .name = "timer", + .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .handler = sp804_timer_interrupt, + .dev_id = &sp804_clockevent, +}; + +void __init sp804_clockevents_init(void __iomem *base, unsigned int irq, + const char *name) +{ + struct clock_event_device *evt = &sp804_clockevent; + long rate = sp804_get_clock_rate(name); + + if (rate < 0) + return; + + clkevt_base = base; + clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); + evt->name = name; + evt->irq = irq; + + setup_irq(irq, &sp804_timer_irq); + clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); +} diff --git a/ANDROID_3.4.5/arch/arm/common/uengine.c b/ANDROID_3.4.5/arch/arm/common/uengine.c new file mode 100644 index 00000000..bef408f3 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/uengine.c @@ -0,0 +1,507 @@ +/* + * Generic library functions for the microengines found on the Intel + * IXP2000 series of network processors. + * + * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> + * Dedicated to Marija Kulikova. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of the + * License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/io.h> +#include <mach/hardware.h> +#include <asm/hardware/uengine.h> + +#if defined(CONFIG_ARCH_IXP2000) +#define IXP_UENGINE_CSR_VIRT_BASE IXP2000_UENGINE_CSR_VIRT_BASE +#define IXP_PRODUCT_ID IXP2000_PRODUCT_ID +#define IXP_MISC_CONTROL IXP2000_MISC_CONTROL +#define IXP_RESET1 IXP2000_RESET1 +#else +#if defined(CONFIG_ARCH_IXP23XX) +#define IXP_UENGINE_CSR_VIRT_BASE IXP23XX_UENGINE_CSR_VIRT_BASE +#define IXP_PRODUCT_ID IXP23XX_PRODUCT_ID +#define IXP_MISC_CONTROL IXP23XX_MISC_CONTROL +#define IXP_RESET1 IXP23XX_RESET1 +#else +#error unknown platform +#endif +#endif + +#define USTORE_ADDRESS 0x000 +#define USTORE_DATA_LOWER 0x004 +#define USTORE_DATA_UPPER 0x008 +#define CTX_ENABLES 0x018 +#define CC_ENABLE 0x01c +#define CSR_CTX_POINTER 0x020 +#define INDIRECT_CTX_STS 0x040 +#define ACTIVE_CTX_STS 0x044 +#define INDIRECT_CTX_SIG_EVENTS 0x048 +#define INDIRECT_CTX_WAKEUP_EVENTS 0x050 +#define NN_PUT 0x080 +#define NN_GET 0x084 +#define TIMESTAMP_LOW 0x0c0 +#define TIMESTAMP_HIGH 0x0c4 +#define T_INDEX_BYTE_INDEX 0x0f4 +#define LOCAL_CSR_STATUS 0x180 + +u32 ixp2000_uengine_mask; + +static void *ixp2000_uengine_csr_area(int uengine) +{ + return ((void *)IXP_UENGINE_CSR_VIRT_BASE) + (uengine << 10); +} + +/* + * LOCAL_CSR_STATUS=1 after a read or write to a microengine's CSR + * space means that the microengine we tried to access was also trying + * to access its own CSR space on the same clock cycle as we did. When + * this happens, we lose the arbitration process by default, and the + * read or write we tried to do was not actually performed, so we try + * again until it succeeds. + */ +u32 ixp2000_uengine_csr_read(int uengine, int offset) +{ + void *uebase; + u32 *local_csr_status; + u32 *reg; + u32 value; + + uebase = ixp2000_uengine_csr_area(uengine); + + local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); + reg = (u32 *)(uebase + offset); + do { + value = ixp2000_reg_read(reg); + } while (ixp2000_reg_read(local_csr_status) & 1); + + return value; +} +EXPORT_SYMBOL(ixp2000_uengine_csr_read); + +void ixp2000_uengine_csr_write(int uengine, int offset, u32 value) +{ + void *uebase; + u32 *local_csr_status; + u32 *reg; + + uebase = ixp2000_uengine_csr_area(uengine); + + local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); + reg = (u32 *)(uebase + offset); + do { + ixp2000_reg_write(reg, value); + } while (ixp2000_reg_read(local_csr_status) & 1); +} +EXPORT_SYMBOL(ixp2000_uengine_csr_write); + +void ixp2000_uengine_reset(u32 uengine_mask) +{ + u32 value; + + value = ixp2000_reg_read(IXP_RESET1) & ~ixp2000_uengine_mask; + + uengine_mask &= ixp2000_uengine_mask; + ixp2000_reg_wrb(IXP_RESET1, value | uengine_mask); + ixp2000_reg_wrb(IXP_RESET1, value); +} +EXPORT_SYMBOL(ixp2000_uengine_reset); + +void ixp2000_uengine_set_mode(int uengine, u32 mode) +{ + /* + * CTL_STR_PAR_EN: unconditionally enable parity checking on + * control store. + */ + mode |= 0x10000000; + ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mode); + + /* + * Enable updating of condition codes. + */ + ixp2000_uengine_csr_write(uengine, CC_ENABLE, 0x00002000); + + /* + * Initialise other per-microengine registers. + */ + ixp2000_uengine_csr_write(uengine, NN_PUT, 0x00); + ixp2000_uengine_csr_write(uengine, NN_GET, 0x00); + ixp2000_uengine_csr_write(uengine, T_INDEX_BYTE_INDEX, 0); +} +EXPORT_SYMBOL(ixp2000_uengine_set_mode); + +static int make_even_parity(u32 x) +{ + return hweight32(x) & 1; +} + +static void ustore_write(int uengine, u64 insn) +{ + /* + * Generate even parity for top and bottom 20 bits. + */ + insn |= (u64)make_even_parity((insn >> 20) & 0x000fffff) << 41; + insn |= (u64)make_even_parity(insn & 0x000fffff) << 40; + + /* + * Write to microstore. The second write auto-increments + * the USTORE_ADDRESS index register. + */ + ixp2000_uengine_csr_write(uengine, USTORE_DATA_LOWER, (u32)insn); + ixp2000_uengine_csr_write(uengine, USTORE_DATA_UPPER, (u32)(insn >> 32)); +} + +void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns) +{ + int i; + + /* + * Start writing to microstore at address 0. + */ + ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x80000000); + for (i = 0; i < insns; i++) { + u64 insn; + + insn = (((u64)ucode[0]) << 32) | + (((u64)ucode[1]) << 24) | + (((u64)ucode[2]) << 16) | + (((u64)ucode[3]) << 8) | + ((u64)ucode[4]); + ucode += 5; + + ustore_write(uengine, insn); + } + + /* + * Pad with a few NOPs at the end (to avoid the microengine + * aborting as it prefetches beyond the last instruction), unless + * we run off the end of the instruction store first, at which + * point the address register will wrap back to zero. + */ + for (i = 0; i < 4; i++) { + u32 addr; + + addr = ixp2000_uengine_csr_read(uengine, USTORE_ADDRESS); + if (addr == 0x80000000) + break; + ustore_write(uengine, 0xf0000c0300ULL); + } + + /* + * End programming. + */ + ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x00000000); +} +EXPORT_SYMBOL(ixp2000_uengine_load_microcode); + +void ixp2000_uengine_init_context(int uengine, int context, int pc) +{ + /* + * Select the right context for indirect access. + */ + ixp2000_uengine_csr_write(uengine, CSR_CTX_POINTER, context); + + /* + * Initialise signal masks to immediately go to Ready state. + */ + ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_SIG_EVENTS, 1); + ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_WAKEUP_EVENTS, 1); + + /* + * Set program counter. + */ + ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_STS, pc); +} +EXPORT_SYMBOL(ixp2000_uengine_init_context); + +void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask) +{ + u32 mask; + + /* + * Enable the specified context to go to Executing state. + */ + mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES); + mask |= ctx_mask << 8; + ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask); +} +EXPORT_SYMBOL(ixp2000_uengine_start_contexts); + +void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask) +{ + u32 mask; + + /* + * Disable the Ready->Executing transition. Note that this + * does not stop the context until it voluntarily yields. + */ + mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES); + mask &= ~(ctx_mask << 8); + ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask); +} +EXPORT_SYMBOL(ixp2000_uengine_stop_contexts); + +static int check_ixp_type(struct ixp2000_uengine_code *c) +{ + u32 product_id; + u32 rev; + + product_id = ixp2000_reg_read(IXP_PRODUCT_ID); + if (((product_id >> 16) & 0x1f) != 0) + return 0; + + switch ((product_id >> 8) & 0xff) { +#ifdef CONFIG_ARCH_IXP2000 + case 0: /* IXP2800 */ + if (!(c->cpu_model_bitmask & 4)) + return 0; + break; + + case 1: /* IXP2850 */ + if (!(c->cpu_model_bitmask & 8)) + return 0; + break; + + case 2: /* IXP2400 */ + if (!(c->cpu_model_bitmask & 2)) + return 0; + break; +#endif + +#ifdef CONFIG_ARCH_IXP23XX + case 4: /* IXP23xx */ + if (!(c->cpu_model_bitmask & 0x3f0)) + return 0; + break; +#endif + + default: + return 0; + } + + rev = product_id & 0xff; + if (rev < c->cpu_min_revision || rev > c->cpu_max_revision) + return 0; + + return 1; +} + +static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b) +{ + int offset; + int i; + + offset = 0; + + for (i = 0; i < 128; i++) { + u8 b3; + u8 b2; + u8 b1; + u8 b0; + + b3 = (gpr_a[i] >> 24) & 0xff; + b2 = (gpr_a[i] >> 16) & 0xff; + b1 = (gpr_a[i] >> 8) & 0xff; + b0 = gpr_a[i] & 0xff; + + /* immed[@ai, (b1 << 8) | b0] */ + /* 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII */ + ucode[offset++] = 0xf0; + ucode[offset++] = (b1 >> 4); + ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6); + ucode[offset++] = (b0 << 2); + ucode[offset++] = 0x80 | i; + + /* immed_w1[@ai, (b3 << 8) | b2] */ + /* 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII */ + ucode[offset++] = 0xf4; + ucode[offset++] = 0x40 | (b3 >> 4); + ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6); + ucode[offset++] = (b2 << 2); + ucode[offset++] = 0x80 | i; + } + + for (i = 0; i < 128; i++) { + u8 b3; + u8 b2; + u8 b1; + u8 b0; + + b3 = (gpr_b[i] >> 24) & 0xff; + b2 = (gpr_b[i] >> 16) & 0xff; + b1 = (gpr_b[i] >> 8) & 0xff; + b0 = gpr_b[i] & 0xff; + + /* immed[@bi, (b1 << 8) | b0] */ + /* 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV */ + ucode[offset++] = 0xf0; + ucode[offset++] = (b1 >> 4); + ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6); + ucode[offset++] = (i << 2) | 0x03; + ucode[offset++] = b0; + + /* immed_w1[@bi, (b3 << 8) | b2] */ + /* 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV */ + ucode[offset++] = 0xf4; + ucode[offset++] = 0x40 | (b3 >> 4); + ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6); + ucode[offset++] = (i << 2) | 0x03; + ucode[offset++] = b2; + } + + /* ctx_arb[kill] */ + ucode[offset++] = 0xe0; + ucode[offset++] = 0x00; + ucode[offset++] = 0x01; + ucode[offset++] = 0x00; + ucode[offset++] = 0x00; +} + +static int set_initial_registers(int uengine, struct ixp2000_uengine_code *c) +{ + int per_ctx_regs; + u32 *gpr_a; + u32 *gpr_b; + u8 *ucode; + int i; + + gpr_a = kzalloc(128 * sizeof(u32), GFP_KERNEL); + gpr_b = kzalloc(128 * sizeof(u32), GFP_KERNEL); + ucode = kmalloc(513 * 5, GFP_KERNEL); + if (gpr_a == NULL || gpr_b == NULL || ucode == NULL) { + kfree(ucode); + kfree(gpr_b); + kfree(gpr_a); + return 1; + } + + per_ctx_regs = 16; + if (c->uengine_parameters & IXP2000_UENGINE_4_CONTEXTS) + per_ctx_regs = 32; + + for (i = 0; i < 256; i++) { + struct ixp2000_reg_value *r = c->initial_reg_values + i; + u32 *bank; + int inc; + int j; + + if (r->reg == -1) + break; + + bank = (r->reg & 0x400) ? gpr_b : gpr_a; + inc = (r->reg & 0x80) ? 128 : per_ctx_regs; + + j = r->reg & 0x7f; + while (j < 128) { + bank[j] = r->value; + j += inc; + } + } + + generate_ucode(ucode, gpr_a, gpr_b); + ixp2000_uengine_load_microcode(uengine, ucode, 513); + ixp2000_uengine_init_context(uengine, 0, 0); + ixp2000_uengine_start_contexts(uengine, 0x01); + for (i = 0; i < 100; i++) { + u32 status; + + status = ixp2000_uengine_csr_read(uengine, ACTIVE_CTX_STS); + if (!(status & 0x80000000)) + break; + } + ixp2000_uengine_stop_contexts(uengine, 0x01); + + kfree(ucode); + kfree(gpr_b); + kfree(gpr_a); + + return !!(i == 100); +} + +int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c) +{ + int ctx; + + if (!check_ixp_type(c)) + return 1; + + if (!(ixp2000_uengine_mask & (1 << uengine))) + return 1; + + ixp2000_uengine_reset(1 << uengine); + ixp2000_uengine_set_mode(uengine, c->uengine_parameters); + if (set_initial_registers(uengine, c)) + return 1; + ixp2000_uengine_load_microcode(uengine, c->insns, c->num_insns); + + for (ctx = 0; ctx < 8; ctx++) + ixp2000_uengine_init_context(uengine, ctx, 0); + + return 0; +} +EXPORT_SYMBOL(ixp2000_uengine_load); + + +static int __init ixp2000_uengine_init(void) +{ + int uengine; + u32 value; + + /* + * Determine number of microengines present. + */ + switch ((ixp2000_reg_read(IXP_PRODUCT_ID) >> 8) & 0x1fff) { +#ifdef CONFIG_ARCH_IXP2000 + case 0: /* IXP2800 */ + case 1: /* IXP2850 */ + ixp2000_uengine_mask = 0x00ff00ff; + break; + + case 2: /* IXP2400 */ + ixp2000_uengine_mask = 0x000f000f; + break; +#endif + +#ifdef CONFIG_ARCH_IXP23XX + case 4: /* IXP23xx */ + ixp2000_uengine_mask = (*IXP23XX_EXP_CFG_FUSE >> 8) & 0xf; + break; +#endif + + default: + printk(KERN_INFO "Detected unknown IXP2000 model (%.8x)\n", + (unsigned int)ixp2000_reg_read(IXP_PRODUCT_ID)); + ixp2000_uengine_mask = 0x00000000; + break; + } + + /* + * Reset microengines. + */ + ixp2000_uengine_reset(ixp2000_uengine_mask); + + /* + * Synchronise timestamp counters across all microengines. + */ + value = ixp2000_reg_read(IXP_MISC_CONTROL); + ixp2000_reg_wrb(IXP_MISC_CONTROL, value & ~0x80); + for (uengine = 0; uengine < 32; uengine++) { + if (ixp2000_uengine_mask & (1 << uengine)) { + ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0); + ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0); + } + } + ixp2000_reg_wrb(IXP_MISC_CONTROL, value | 0x80); + + return 0; +} + +subsys_initcall(ixp2000_uengine_init); diff --git a/ANDROID_3.4.5/arch/arm/common/via82c505.c b/ANDROID_3.4.5/arch/arm/common/via82c505.c new file mode 100644 index 00000000..1171a501 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/via82c505.c @@ -0,0 +1,92 @@ +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/io.h> + + +#include <asm/mach/pci.h> + +#define MAX_SLOTS 7 + +#define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) + +static int +via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ + outl(CONFIG_CMD(bus,devfn,where),0xCF8); + switch (size) { + case 1: + *value=inb(0xCFC + (where&3)); + break; + case 2: + *value=inw(0xCFC + (where&2)); + break; + case 4: + *value=inl(0xCFC); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int +via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 value) +{ + outl(CONFIG_CMD(bus,devfn,where),0xCF8); + switch (size) { + case 1: + outb(value, 0xCFC + (where&3)); + break; + case 2: + outw(value, 0xCFC + (where&2)); + break; + case 4: + outl(value, 0xCFC); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops via82c505_ops = { + .read = via82c505_read_config, + .write = via82c505_write_config, +}; + +void __init via82c505_preinit(void) +{ + printk(KERN_DEBUG "PCI: VIA 82c505\n"); + if (!request_region(0xA8,2,"via config")) { + printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n"); + return; + } + if (!request_region(0xCF8,8,"pci config")) { + printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n"); + release_region(0xA8, 2); + return; + } + + /* Enable compatible Mode */ + outb(0x96,0xA8); + outb(0x18,0xA9); + outb(0x93,0xA8); + outb(0xd0,0xA9); + +} + +int __init via82c505_setup(int nr, struct pci_sys_data *sys) +{ + return (nr == 0); +} + +struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata) +{ + if (nr == 0) + return pci_scan_root_bus(NULL, 0, &via82c505_ops, sysdata, + &sysdata->resources); + + return NULL; +} diff --git a/ANDROID_3.4.5/arch/arm/common/vic.c b/ANDROID_3.4.5/arch/arm/common/vic.c new file mode 100644 index 00000000..7e288f96 --- /dev/null +++ b/ANDROID_3.4.5/arch/arm/common/vic.c @@ -0,0 +1,460 @@ +/* + * linux/arch/arm/common/vic.c + * + * Copyright (C) 1999 - 2003 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> +#include <linux/device.h> +#include <linux/amba/bus.h> + +#include <asm/exception.h> +#include <asm/mach/irq.h> +#include <asm/hardware/vic.h> + +/** + * struct vic_device - VIC PM device + * @irq: The IRQ number for the base of the VIC. + * @base: The register base for the VIC. + * @resume_sources: A bitmask of interrupts for resume. + * @resume_irqs: The IRQs enabled for resume. + * @int_select: Save for VIC_INT_SELECT. + * @int_enable: Save for VIC_INT_ENABLE. + * @soft_int: Save for VIC_INT_SOFT. + * @protect: Save for VIC_PROTECT. + * @domain: The IRQ domain for the VIC. + */ +struct vic_device { + void __iomem *base; + int irq; + u32 resume_sources; + u32 resume_irqs; + u32 int_select; + u32 int_enable; + u32 soft_int; + u32 protect; + struct irq_domain *domain; +}; + +/* we cannot allocate memory when VICs are initially registered */ +static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; + +static int vic_id; + +/** + * vic_init2 - common initialisation code + * @base: Base of the VIC. + * + * Common initialisation code for registration + * and resume. +*/ +static void vic_init2(void __iomem *base) +{ + int i; + + for (i = 0; i < 16; i++) { + void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); + writel(VIC_VECT_CNTL_ENABLE | i, reg); + } + + writel(32, base + VIC_PL190_DEF_VECT_ADDR); +} + +#ifdef CONFIG_PM +static void resume_one_vic(struct vic_device *vic) +{ + void __iomem *base = vic->base; + + printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); + + /* re-initialise static settings */ + vic_init2(base); + + writel(vic->int_select, base + VIC_INT_SELECT); + writel(vic->protect, base + VIC_PROTECT); + + /* set the enabled ints and then clear the non-enabled */ + writel(vic->int_enable, base + VIC_INT_ENABLE); + writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR); + + /* and the same for the soft-int register */ + + writel(vic->soft_int, base + VIC_INT_SOFT); + writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); +} + +static void vic_resume(void) +{ + int id; + + for (id = vic_id - 1; id >= 0; id--) + resume_one_vic(vic_devices + id); +} + +static void suspend_one_vic(struct vic_device *vic) +{ + void __iomem *base = vic->base; + + printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); + + vic->int_select = readl(base + VIC_INT_SELECT); + vic->int_enable = readl(base + VIC_INT_ENABLE); + vic->soft_int = readl(base + VIC_INT_SOFT); + vic->protect = readl(base + VIC_PROTECT); + + /* set the interrupts (if any) that are used for + * resuming the system */ + + writel(vic->resume_irqs, base + VIC_INT_ENABLE); + writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); +} + +static int vic_suspend(void) +{ + int id; + + for (id = 0; id < vic_id; id++) + suspend_one_vic(vic_devices + id); + + return 0; +} + +struct syscore_ops vic_syscore_ops = { + .suspend = vic_suspend, + .resume = vic_resume, +}; + +/** + * vic_pm_init - initicall to register VIC pm + * + * This is called via late_initcall() to register + * the resources for the VICs due to the early + * nature of the VIC's registration. +*/ +static int __init vic_pm_init(void) +{ + if (vic_id > 0) + register_syscore_ops(&vic_syscore_ops); + + return 0; +} +late_initcall(vic_pm_init); +#endif /* CONFIG_PM */ + +/** + * vic_register() - Register a VIC. + * @base: The base address of the VIC. + * @irq: The base IRQ for the VIC. + * @resume_sources: bitmask of interrupts allowed for resume sources. + * @node: The device tree node associated with the VIC. + * + * Register the VIC with the system device tree so that it can be notified + * of suspend and resume requests and ensure that the correct actions are + * taken to re-instate the settings on resume. + * + * This also configures the IRQ domain for the VIC. + */ +static void __init vic_register(void __iomem *base, unsigned int irq, + u32 resume_sources, struct device_node *node) +{ + struct vic_device *v; + + if (vic_id >= ARRAY_SIZE(vic_devices)) { + printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__); + return; + } + + v = &vic_devices[vic_id]; + v->base = base; + v->resume_sources = resume_sources; + v->irq = irq; + vic_id++; + v->domain = irq_domain_add_legacy(node, 32, irq, 0, + &irq_domain_simple_ops, v); +} + +static void vic_ack_irq(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); + /* moreover, clear the soft-triggered, in case it was the reason */ + writel(1 << irq, base + VIC_INT_SOFT_CLEAR); +} + +static void vic_mask_irq(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); +} + +static void vic_unmask_irq(struct irq_data *d) +{ + void __iomem *base = irq_data_get_irq_chip_data(d); + unsigned int irq = d->hwirq; + writel(1 << irq, base + VIC_INT_ENABLE); +} + +#if defined(CONFIG_PM) +static struct vic_device *vic_from_irq(unsigned int irq) +{ + struct vic_device *v = vic_devices; + unsigned int base_irq = irq & ~31; + int id; + + for (id = 0; id < vic_id; id++, v++) { + if (v->irq == base_irq) + return v; + } + + return NULL; +} + +static int vic_set_wake(struct irq_data *d, unsigned int on) +{ + struct vic_device *v = vic_from_irq(d->irq); + unsigned int off = d->hwirq; + u32 bit = 1 << off; + + if (!v) + return -EINVAL; + + if (!(bit & v->resume_sources)) + return -EINVAL; + + if (on) + v->resume_irqs |= bit; + else + v->resume_irqs &= ~bit; + + return 0; +} +#else +#define vic_set_wake NULL +#endif /* CONFIG_PM */ + +static struct irq_chip vic_chip = { + .name = "VIC", + .irq_ack = vic_ack_irq, + .irq_mask = vic_mask_irq, + .irq_unmask = vic_unmask_irq, + .irq_set_wake = vic_set_wake, +}; + +static void __init vic_disable(void __iomem *base) +{ + writel(0, base + VIC_INT_SELECT); + writel(0, base + VIC_INT_ENABLE); + writel(~0, base + VIC_INT_ENABLE_CLEAR); + writel(0, base + VIC_ITCR); + writel(~0, base + VIC_INT_SOFT_CLEAR); +} + +static void __init vic_clear_interrupts(void __iomem *base) +{ + unsigned int i; + + writel(0, base + VIC_PL190_VECT_ADDR); + for (i = 0; i < 19; i++) { + unsigned int value; + + value = readl(base + VIC_PL190_VECT_ADDR); + writel(value, base + VIC_PL190_VECT_ADDR); + } +} + +static void __init vic_set_irq_sources(void __iomem *base, + unsigned int irq_start, u32 vic_sources) +{ + unsigned int i; + + for (i = 0; i < 32; i++) { + if (vic_sources & (1 << i)) { + unsigned int irq = irq_start + i; + + irq_set_chip_and_handler(irq, &vic_chip, + handle_level_irq); + irq_set_chip_data(irq, base); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + } +} + +/* + * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. + * The original cell has 32 interrupts, while the modified one has 64, + * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case + * the probe function is called twice, with base set to offset 000 + * and 020 within the page. We call this "second block". + */ +static void __init vic_init_st(void __iomem *base, unsigned int irq_start, + u32 vic_sources, struct device_node *node) +{ + unsigned int i; + int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0; + + /* Disable all interrupts initially. */ + vic_disable(base); + + /* + * Make sure we clear all existing interrupts. The vector registers + * in this cell are after the second block of general registers, + * so we can address them using standard offsets, but only from + * the second base address, which is 0x20 in the page + */ + if (vic_2nd_block) { + vic_clear_interrupts(base); + + /* ST has 16 vectors as well, but we don't enable them by now */ + for (i = 0; i < 16; i++) { + void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); + writel(0, reg); + } + + writel(32, base + VIC_PL190_DEF_VECT_ADDR); + } + + vic_set_irq_sources(base, irq_start, vic_sources); + vic_register(base, irq_start, 0, node); +} + +void __init __vic_init(void __iomem *base, unsigned int irq_start, + u32 vic_sources, u32 resume_sources, + struct device_node *node) +{ + unsigned int i; + u32 cellid = 0; + enum amba_vendor vendor; + + /* Identify which VIC cell this one is, by reading the ID */ + for (i = 0; i < 4; i++) { + void __iomem *addr; + addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4); + cellid |= (readl(addr) & 0xff) << (8 * i); + } + vendor = (cellid >> 12) & 0xff; + printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n", + base, cellid, vendor); + + switch(vendor) { + case AMBA_VENDOR_ST: + vic_init_st(base, irq_start, vic_sources, node); + return; + default: + printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); + /* fall through */ + case AMBA_VENDOR_ARM: + break; + } + + /* Disable all interrupts initially. */ + vic_disable(base); + + /* Make sure we clear all existing interrupts */ + vic_clear_interrupts(base); + + vic_init2(base); + + vic_set_irq_sources(base, irq_start, vic_sources); + + vic_register(base, irq_start, resume_sources, node); +} + +/** + * vic_init() - initialise a vectored interrupt controller + * @base: iomem base address + * @irq_start: starting interrupt number, must be muliple of 32 + * @vic_sources: bitmask of interrupt sources to allow + * @resume_sources: bitmask of interrupt sources to allow for resume + */ +void __init vic_init(void __iomem *base, unsigned int irq_start, + u32 vic_sources, u32 resume_sources) +{ + __vic_init(base, irq_start, vic_sources, resume_sources, NULL); +} + +#ifdef CONFIG_OF +int __init vic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *regs; + int irq_base; + + if (WARN(parent, "non-root VICs are not supported")) + return -EINVAL; + + regs = of_iomap(node, 0); + if (WARN_ON(!regs)) + return -EIO; + + irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); + if (WARN_ON(irq_base < 0)) + goto out_unmap; + + __vic_init(regs, irq_base, ~0, ~0, node); + + return 0; + + out_unmap: + iounmap(regs); + + return -EIO; +} +#endif /* CONFIG OF */ + +/* + * Handle each interrupt in a single VIC. Returns non-zero if we've + * handled at least one interrupt. This reads the status register + * before handling each interrupt, which is necessary given that + * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. + */ +static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) +{ + u32 stat, irq; + int handled = 0; + + while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { + irq = ffs(stat) - 1; + handle_IRQ(irq_find_mapping(vic->domain, irq), regs); + handled = 1; + } + + return handled; +} + +/* + * Keep iterating over all registered VIC's until there are no pending + * interrupts. + */ +asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) +{ + int i, handled; + + do { + for (i = 0, handled = 0; i < vic_id; ++i) + handled |= handle_one_vic(&vic_devices[i], regs); + } while (handled); +} |