summaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel/cplb-mpu
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /arch/blackfin/kernel/cplb-mpu
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'arch/blackfin/kernel/cplb-mpu')
-rw-r--r--arch/blackfin/kernel/cplb-mpu/Makefile10
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c102
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c379
3 files changed, 491 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
new file mode 100644
index 00000000..394d0b1b
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -0,0 +1,10 @@
+#
+# arch/blackfin/kernel/cplb-nompu/Makefile
+#
+
+obj-y := cplbinit.o cplbmgr.o
+
+CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
+ -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
+ -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
+ -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
new file mode 100644
index 00000000..c15fd05f
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -0,0 +1,102 @@
+/*
+ * Blackfin CPLB initialization
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/cplb.h>
+#include <asm/cplbinit.h>
+#include <asm/mem_map.h>
+
+struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
+struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
+
+int first_switched_icplb, first_switched_dcplb;
+int first_mask_dcplb;
+
+void __init generate_cplb_tables_cpu(unsigned int cpu)
+{
+ int i_d, i_i;
+ unsigned long addr;
+ unsigned long d_data, i_data;
+ unsigned long d_cache = 0, i_cache = 0;
+
+ printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
+
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#endif
+
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_cache = CPLB_L1_CHBL;
+#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_cache |= CPLB_L1_AOW | CPLB_WT;
+#endif
+#endif
+
+ i_d = i_i = 0;
+
+ /* Set up the zero page. */
+ dcplb_tbl[cpu][i_d].addr = 0;
+ dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+
+ icplb_tbl[cpu][i_i].addr = 0;
+ icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB;
+
+ /* Cover kernel memory with 4M pages. */
+ addr = 0;
+ d_data = d_cache | CPLB_SUPV_WR | CPLB_VALID | PAGE_SIZE_4MB | CPLB_DIRTY;
+ i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB;
+
+ for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
+ }
+
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
+#endif
+
+ /* Cover L1 memory. One 4M area for code and data each is enough. */
+#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
+ dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
+ dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+#endif
+#if L1_CODE_LENGTH > 0
+ icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu);
+ icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+#endif
+
+ /* Cover L2 memory */
+#if L2_LENGTH > 0
+ dcplb_tbl[cpu][i_d].addr = L2_START;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
+ icplb_tbl[cpu][i_i].addr = L2_START;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
+#endif
+
+ first_mask_dcplb = i_d;
+ first_switched_dcplb = i_d + (1 << page_mask_order);
+ first_switched_icplb = i_i;
+
+ while (i_d < MAX_CPLBS)
+ dcplb_tbl[cpu][i_d++].data = 0;
+ while (i_i < MAX_CPLBS)
+ icplb_tbl[cpu][i_i++].data = 0;
+}
+
+void __init generate_cplb_tables_all(void)
+{
+}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
new file mode 100644
index 00000000..b56bd851
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -0,0 +1,379 @@
+/*
+ * Blackfin CPLB exception handling for when MPU in on
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/cplb.h>
+#include <asm/cplbinit.h>
+#include <asm/mmu_context.h>
+
+/*
+ * WARNING
+ *
+ * This file is compiled with certain -ffixed-reg options. We have to
+ * make sure not to call any functions here that could clobber these
+ * registers.
+ */
+
+int page_mask_nelts;
+int page_mask_order;
+unsigned long *current_rwx_mask[NR_CPUS];
+
+int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
+int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
+int nr_cplb_flush[NR_CPUS];
+
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+#define MGR_ATTR __attribute__((l1_text))
+#else
+#define MGR_ATTR
+#endif
+
+/*
+ * Given the contents of the status register, return the index of the
+ * CPLB that caused the fault.
+ */
+static inline int faulting_cplb_index(int status)
+{
+ int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
+ return 30 - signbits;
+}
+
+/*
+ * Given the contents of the status register and the DCPLB_DATA contents,
+ * return true if a write access should be permitted.
+ */
+static inline int write_permitted(int status, unsigned long data)
+{
+ if (status & FAULT_USERSUPV)
+ return !!(data & CPLB_SUPV_WR);
+ else
+ return !!(data & CPLB_USER_WR);
+}
+
+/* Counters to implement round-robin replacement. */
+static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
+
+/*
+ * Find an ICPLB entry to be evicted and return its index.
+ */
+MGR_ATTR static int evict_one_icplb(unsigned int cpu)
+{
+ int i;
+ for (i = first_switched_icplb; i < MAX_CPLBS; i++)
+ if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
+ return i;
+ i = first_switched_icplb + icplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_icplb;
+ icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
+ }
+ icplb_rr_index[cpu]++;
+ return i;
+}
+
+MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
+{
+ int i;
+ for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
+ if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
+ return i;
+ i = first_switched_dcplb + dcplb_rr_index[cpu];
+ if (i >= MAX_CPLBS) {
+ i -= MAX_CPLBS - first_switched_dcplb;
+ dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
+ }
+ dcplb_rr_index[cpu]++;
+ return i;
+}
+
+MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
+{
+ unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
+ int status = bfin_read_DCPLB_STATUS();
+ unsigned long *mask;
+ int idx;
+ unsigned long d_data;
+
+ nr_dcplb_miss[cpu]++;
+
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ if (bfin_addr_dcacheable(addr)) {
+ d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
+ }
+#endif
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
+ mask = current_rwx_mask[cpu];
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_RD;
+ }
+#endif
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ d_data &= ~PAGE_SIZE_4KB;
+ d_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ if (reserved_mem_dcache_on)
+ d_data |= CPLB_L1_CHBL;
+ } else {
+ mask = current_rwx_mask[cpu];
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_RD;
+
+ mask += page_mask_nelts;
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_WR;
+ }
+ }
+ idx = evict_one_dcplb(cpu);
+
+ addr &= PAGE_MASK;
+ dcplb_tbl[cpu][idx].addr = addr;
+ dcplb_tbl[cpu][idx].data = d_data;
+
+ _disable_dcplb();
+ bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
+ bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
+ _enable_dcplb();
+
+ return 0;
+}
+
+MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
+{
+ unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
+ int status = bfin_read_ICPLB_STATUS();
+ int idx;
+ unsigned long i_data;
+
+ nr_icplb_miss[cpu]++;
+
+ /* If inside the uncached DMA region, fault. */
+ if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
+ return CPLB_PROT_VIOL;
+
+ if (status & FAULT_USERSUPV)
+ nr_icplb_supv_miss[cpu]++;
+
+ /*
+ * First, try to find a CPLB that matches this address. If we
+ * find one, then the fact that we're in the miss handler means
+ * that the instruction crosses a page boundary.
+ */
+ for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
+ if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
+ unsigned long this_addr = icplb_tbl[cpu][idx].addr;
+ if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
+ addr += PAGE_SIZE;
+ break;
+ }
+ }
+ }
+
+ i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
+
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ /*
+ * Normal RAM, and possibly the reserved memory area, are
+ * cacheable.
+ */
+ if (addr < _ramend ||
+ (addr < physical_mem_end && reserved_mem_icache_on))
+ i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
+#endif
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask[cpu];
+
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[idx] & bit)
+ i_data |= CPLB_USER_RD;
+ }
+ }
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ && (status & FAULT_USERSUPV)) {
+ addr &= ~(1 * 1024 * 1024 - 1);
+ i_data &= ~PAGE_SIZE_4KB;
+ i_data |= PAGE_SIZE_1MB;
+ } else
+ return CPLB_PROT_VIOL;
+ } else if (addr >= _ramend) {
+ i_data |= CPLB_USER_RD;
+ if (reserved_mem_icache_on)
+ i_data |= CPLB_L1_CHBL;
+ } else {
+ /*
+ * Two cases to distinguish - a supervisor access must
+ * necessarily be for a module page; we grant it
+ * unconditionally (could do better here in the future).
+ * Otherwise, check the x bitmap of the current process.
+ */
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask[cpu];
+
+ if (mask) {
+ int page = addr >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[idx] & bit)
+ i_data |= CPLB_USER_RD;
+ }
+ }
+ }
+ idx = evict_one_icplb(cpu);
+ addr &= PAGE_MASK;
+ icplb_tbl[cpu][idx].addr = addr;
+ icplb_tbl[cpu][idx].data = i_data;
+
+ _disable_icplb();
+ bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
+ bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
+ _enable_icplb();
+
+ return 0;
+}
+
+MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
+{
+ int status = bfin_read_DCPLB_STATUS();
+
+ nr_dcplb_prot[cpu]++;
+
+ if (status & FAULT_RW) {
+ int idx = faulting_cplb_index(status);
+ unsigned long data = dcplb_tbl[cpu][idx].data;
+ if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
+ write_permitted(status, data)) {
+ data |= CPLB_DIRTY;
+ dcplb_tbl[cpu][idx].data = data;
+ bfin_write32(DCPLB_DATA0 + idx * 4, data);
+ return 0;
+ }
+ }
+ return CPLB_PROT_VIOL;
+}
+
+MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
+{
+ int cause = seqstat & 0x3f;
+ unsigned int cpu = raw_smp_processor_id();
+ switch (cause) {
+ case 0x23:
+ return dcplb_protection_fault(cpu);
+ case 0x2C:
+ return icplb_miss(cpu);
+ case 0x26:
+ return dcplb_miss(cpu);
+ default:
+ return 1;
+ }
+}
+
+void flush_switched_cplbs(unsigned int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ nr_cplb_flush[cpu]++;
+
+ flags = hard_local_irq_save();
+ _disable_icplb();
+ for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
+ icplb_tbl[cpu][i].data = 0;
+ bfin_write32(ICPLB_DATA0 + i * 4, 0);
+ }
+ _enable_icplb();
+
+ _disable_dcplb();
+ for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
+ dcplb_tbl[cpu][i].data = 0;
+ bfin_write32(DCPLB_DATA0 + i * 4, 0);
+ }
+ _enable_dcplb();
+ hard_local_irq_restore(flags);
+
+}
+
+void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
+{
+ int i;
+ unsigned long addr = (unsigned long)masks;
+ unsigned long d_data;
+ unsigned long flags;
+
+ if (!masks) {
+ current_rwx_mask[cpu] = masks;
+ return;
+ }
+
+ flags = hard_local_irq_save();
+ current_rwx_mask[cpu] = masks;
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
+#endif
+ }
+
+ _disable_dcplb();
+ for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
+ dcplb_tbl[cpu][i].addr = addr;
+ dcplb_tbl[cpu][i].data = d_data;
+ bfin_write32(DCPLB_DATA0 + i * 4, d_data);
+ bfin_write32(DCPLB_ADDR0 + i * 4, addr);
+ addr += PAGE_SIZE;
+ }
+ _enable_dcplb();
+ hard_local_irq_restore(flags);
+}