summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/arch/mips/mm')
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/Makefile39
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/c-octeon.c327
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/c-r3k.c347
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/c-r4k.c1468
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/c-tx39.c440
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/cache.c227
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/cerr-sb1.c586
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/cex-gen.S42
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/cex-oct.S70
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/cex-sb1.S175
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/dma-default.c351
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/extable.c25
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/fault.c313
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/gup.c315
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/highmem.c137
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/hugetlbpage.c98
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/init.c487
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/ioremap.c192
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/mmap.c271
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/page.c690
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/pgtable-32.c70
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/pgtable-64.c80
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/sc-ip22.c176
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/sc-mips.c145
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/sc-r5k.c107
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/sc-rm7k.c270
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/tlb-r3k.c284
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/tlb-r4k.c432
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/tlb-r8k.c248
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/tlbex-fault.S28
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/tlbex.c2160
-rw-r--r--ANDROID_3.4.5/arch/mips/mm/uasm.c699
32 files changed, 0 insertions, 11299 deletions
diff --git a/ANDROID_3.4.5/arch/mips/mm/Makefile b/ANDROID_3.4.5/arch/mips/mm/Makefile
deleted file mode 100644
index 4aa20280..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/Makefile
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Makefile for the Linux/MIPS-specific parts of the memory manager.
-#
-
-obj-y += cache.o dma-default.o extable.o fault.o \
- gup.o init.o mmap.o page.o tlbex.o \
- tlbex-fault.o uasm.o
-
-obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
-obj-$(CONFIG_64BIT) += pgtable-64.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-
-obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_MIPS64) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_NEVADA) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R10000) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
-obj-$(CONFIG_CPU_R4300) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R4X00) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R5000) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R5500) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
-obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
-obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
-obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o
-obj-$(CONFIG_CPU_XLP) += c-r4k.o tlb-r4k.o cex-gen.o
-
-obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
-obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
diff --git a/ANDROID_3.4.5/arch/mips/mm/c-octeon.c b/ANDROID_3.4.5/arch/mips/mm/c-octeon.c
deleted file mode 100644
index 47037ec5..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/c-octeon.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005-2007 Cavium Networks
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/cpu.h>
-#include <linux/io.h>
-
-#include <asm/bcache.h>
-#include <asm/bootinfo.h>
-#include <asm/cacheops.h>
-#include <asm/cpu-features.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/r4kcache.h>
-#include <asm/mmu_context.h>
-#include <asm/war.h>
-
-#include <asm/octeon/octeon.h>
-
-unsigned long long cache_err_dcache[NR_CPUS];
-
-/**
- * Octeon automatically flushes the dcache on tlb changes, so
- * from Linux's viewpoint it acts much like a physically
- * tagged cache. No flushing is needed
- *
- */
-static void octeon_flush_data_cache_page(unsigned long addr)
-{
- /* Nothing to do */
-}
-
-static inline void octeon_local_flush_icache(void)
-{
- asm volatile ("synci 0($0)");
-}
-
-/*
- * Flush local I-cache for the specified range.
- */
-static void local_octeon_flush_icache_range(unsigned long start,
- unsigned long end)
-{
- octeon_local_flush_icache();
-}
-
-/**
- * Flush caches as necessary for all cores affected by a
- * vma. If no vma is supplied, all cores are flushed.
- *
- * @vma: VMA to flush or NULL to flush all icaches.
- */
-static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
-{
- extern void octeon_send_ipi_single(int cpu, unsigned int action);
-#ifdef CONFIG_SMP
- int cpu;
- cpumask_t mask;
-#endif
-
- mb();
- octeon_local_flush_icache();
-#ifdef CONFIG_SMP
- preempt_disable();
- cpu = smp_processor_id();
-
- /*
- * If we have a vma structure, we only need to worry about
- * cores it has been used on
- */
- if (vma)
- mask = *mm_cpumask(vma->vm_mm);
- else
- mask = *cpu_online_mask;
- cpumask_clear_cpu(cpu, &mask);
- for_each_cpu(cpu, &mask)
- octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
-
- preempt_enable();
-#endif
-}
-
-
-/**
- * Called to flush the icache on all cores
- */
-static void octeon_flush_icache_all(void)
-{
- octeon_flush_icache_all_cores(NULL);
-}
-
-
-/**
- * Called to flush all memory associated with a memory
- * context.
- *
- * @mm: Memory context to flush
- */
-static void octeon_flush_cache_mm(struct mm_struct *mm)
-{
- /*
- * According to the R4K version of this file, CPUs without
- * dcache aliases don't need to do anything here
- */
-}
-
-
-/**
- * Flush a range of kernel addresses out of the icache
- *
- */
-static void octeon_flush_icache_range(unsigned long start, unsigned long end)
-{
- octeon_flush_icache_all_cores(NULL);
-}
-
-
-/**
- * Flush the icache for a trampoline. These are used for interrupt
- * and exception hooking.
- *
- * @addr: Address to flush
- */
-static void octeon_flush_cache_sigtramp(unsigned long addr)
-{
- struct vm_area_struct *vma;
-
- vma = find_vma(current->mm, addr);
- octeon_flush_icache_all_cores(vma);
-}
-
-
-/**
- * Flush a range out of a vma
- *
- * @vma: VMA to flush
- * @start:
- * @end:
- */
-static void octeon_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_flags & VM_EXEC)
- octeon_flush_icache_all_cores(vma);
-}
-
-
-/**
- * Flush a specific page of a vma
- *
- * @vma: VMA to flush page for
- * @page: Page to flush
- * @pfn:
- */
-static void octeon_flush_cache_page(struct vm_area_struct *vma,
- unsigned long page, unsigned long pfn)
-{
- if (vma->vm_flags & VM_EXEC)
- octeon_flush_icache_all_cores(vma);
-}
-
-static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
- BUG();
-}
-
-/**
- * Probe Octeon's caches
- *
- */
-static void __cpuinit probe_octeon(void)
-{
- unsigned long icache_size;
- unsigned long dcache_size;
- unsigned int config1;
- struct cpuinfo_mips *c = &current_cpu_data;
-
- config1 = read_c0_config1();
- switch (c->cputype) {
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- c->icache.linesz = 2 << ((config1 >> 19) & 7);
- c->icache.sets = 64 << ((config1 >> 22) & 7);
- c->icache.ways = 1 + ((config1 >> 16) & 7);
- c->icache.flags |= MIPS_CACHE_VTAG;
- icache_size =
- c->icache.sets * c->icache.ways * c->icache.linesz;
- c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
- c->dcache.linesz = 128;
- if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
- c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
- else
- c->dcache.sets = 1; /* CN3XXX has one Dcache set */
- c->dcache.ways = 64;
- dcache_size =
- c->dcache.sets * c->dcache.ways * c->dcache.linesz;
- c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
- c->options |= MIPS_CPU_PREFETCH;
- break;
-
- case CPU_CAVIUM_OCTEON2:
- c->icache.linesz = 2 << ((config1 >> 19) & 7);
- c->icache.sets = 8;
- c->icache.ways = 37;
- c->icache.flags |= MIPS_CACHE_VTAG;
- icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
-
- c->dcache.linesz = 128;
- c->dcache.ways = 32;
- c->dcache.sets = 8;
- dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
- c->options |= MIPS_CPU_PREFETCH;
- break;
-
- default:
- panic("Unsupported Cavium Networks CPU type");
- break;
- }
-
- /* compute a couple of other cache variables */
- c->icache.waysize = icache_size / c->icache.ways;
- c->dcache.waysize = dcache_size / c->dcache.ways;
-
- c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
- c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
-
- if (smp_processor_id() == 0) {
- pr_notice("Primary instruction cache %ldkB, %s, %d way, "
- "%d sets, linesize %d bytes.\n",
- icache_size >> 10,
- cpu_has_vtag_icache ?
- "virtually tagged" : "physically tagged",
- c->icache.ways, c->icache.sets, c->icache.linesz);
-
- pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
- "linesize %d bytes.\n",
- dcache_size >> 10, c->dcache.ways,
- c->dcache.sets, c->dcache.linesz);
- }
-}
-
-
-/**
- * Setup the Octeon cache flush routines
- *
- */
-void __cpuinit octeon_cache_init(void)
-{
- extern unsigned long ebase;
- extern char except_vec2_octeon;
-
- memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
- octeon_flush_cache_sigtramp(ebase + 0x100);
-
- probe_octeon();
-
- shm_align_mask = PAGE_SIZE - 1;
-
- flush_cache_all = octeon_flush_icache_all;
- __flush_cache_all = octeon_flush_icache_all;
- flush_cache_mm = octeon_flush_cache_mm;
- flush_cache_page = octeon_flush_cache_page;
- flush_cache_range = octeon_flush_cache_range;
- flush_cache_sigtramp = octeon_flush_cache_sigtramp;
- flush_icache_all = octeon_flush_icache_all;
- flush_data_cache_page = octeon_flush_data_cache_page;
- flush_icache_range = octeon_flush_icache_range;
- local_flush_icache_range = local_octeon_flush_icache_range;
-
- __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
-
- build_clear_page();
- build_copy_page();
-}
-
-/**
- * Handle a cache error exception
- */
-
-static void cache_parity_error_octeon(int non_recoverable)
-{
- unsigned long coreid = cvmx_get_core_num();
- uint64_t icache_err = read_octeon_c0_icacheerr();
-
- pr_err("Cache error exception:\n");
- pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
- if (icache_err & 1) {
- pr_err("CacheErr (Icache) == %llx\n",
- (unsigned long long)icache_err);
- write_octeon_c0_icacheerr(0);
- }
- if (cache_err_dcache[coreid] & 1) {
- pr_err("CacheErr (Dcache) == %llx\n",
- (unsigned long long)cache_err_dcache[coreid]);
- cache_err_dcache[coreid] = 0;
- }
-
- if (non_recoverable)
- panic("Can't handle cache error: nested exception");
-}
-
-/**
- * Called when the the exception is recoverable
- */
-
-asmlinkage void cache_parity_error_octeon_recoverable(void)
-{
- cache_parity_error_octeon(0);
-}
-
-/**
- * Called when the the exception is not recoverable
- */
-
-asmlinkage void cache_parity_error_octeon_non_recoverable(void)
-{
- cache_parity_error_octeon(1);
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/c-r3k.c b/ANDROID_3.4.5/arch/mips/mm/c-r3k.c
deleted file mode 100644
index 031c4c2c..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/c-r3k.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * r2300.c: R2000 and R3000 specific mmu/cache code.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- *
- * with a lot of changes to make this thing work for R3000s
- * Tx39XX R4k style caches added. HK
- * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
- * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
- * Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/isadep.h>
-#include <asm/io.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-
-static unsigned long icache_size, dcache_size; /* Size in bytes */
-static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
-
-unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
-{
- unsigned long flags, status, dummy, size;
- volatile unsigned long *p;
-
- p = (volatile unsigned long *) KSEG0;
-
- flags = read_c0_status();
-
- /* isolate cache space */
- write_c0_status((ca_flags|flags)&~ST0_IEC);
-
- *p = 0xa5a55a5a;
- dummy = *p;
- status = read_c0_status();
-
- if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
- size = 0;
- } else {
- for (size = 128; size <= 0x40000; size <<= 1)
- *(p + size) = 0;
- *p = -1;
- for (size = 128;
- (size <= 0x40000) && (*(p + size) == 0);
- size <<= 1)
- ;
- if (size > 0x40000)
- size = 0;
- }
-
- write_c0_status(flags);
-
- return size * sizeof(*p);
-}
-
-unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
-{
- unsigned long flags, status, lsize, i;
- volatile unsigned long *p;
-
- p = (volatile unsigned long *) KSEG0;
-
- flags = read_c0_status();
-
- /* isolate cache space */
- write_c0_status((ca_flags|flags)&~ST0_IEC);
-
- for (i = 0; i < 128; i++)
- *(p + i) = 0;
- *(volatile unsigned char *)p = 0;
- for (lsize = 1; lsize < 128; lsize <<= 1) {
- *(p + lsize);
- status = read_c0_status();
- if (!(status & ST0_CM))
- break;
- }
- for (i = 0; i < 128; i += lsize)
- *(volatile unsigned char *)(p + i) = 0;
-
- write_c0_status(flags);
-
- return lsize * sizeof(*p);
-}
-
-static void __cpuinit r3k_probe_cache(void)
-{
- dcache_size = r3k_cache_size(ST0_ISC);
- if (dcache_size)
- dcache_lsize = r3k_cache_lsize(ST0_ISC);
-
- icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
- if (icache_size)
- icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
-}
-
-static void r3k_flush_icache_range(unsigned long start, unsigned long end)
-{
- unsigned long size, i, flags;
- volatile unsigned char *p;
-
- size = end - start;
- if (size > icache_size || KSEGX(start) != KSEG0) {
- start = KSEG0;
- size = icache_size;
- }
- p = (char *)start;
-
- flags = read_c0_status();
-
- /* isolate cache space */
- write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
-
- for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
- "sb\t$0, 0x004(%0)\n\t"
- "sb\t$0, 0x008(%0)\n\t"
- "sb\t$0, 0x00c(%0)\n\t"
- "sb\t$0, 0x010(%0)\n\t"
- "sb\t$0, 0x014(%0)\n\t"
- "sb\t$0, 0x018(%0)\n\t"
- "sb\t$0, 0x01c(%0)\n\t"
- "sb\t$0, 0x020(%0)\n\t"
- "sb\t$0, 0x024(%0)\n\t"
- "sb\t$0, 0x028(%0)\n\t"
- "sb\t$0, 0x02c(%0)\n\t"
- "sb\t$0, 0x030(%0)\n\t"
- "sb\t$0, 0x034(%0)\n\t"
- "sb\t$0, 0x038(%0)\n\t"
- "sb\t$0, 0x03c(%0)\n\t"
- "sb\t$0, 0x040(%0)\n\t"
- "sb\t$0, 0x044(%0)\n\t"
- "sb\t$0, 0x048(%0)\n\t"
- "sb\t$0, 0x04c(%0)\n\t"
- "sb\t$0, 0x050(%0)\n\t"
- "sb\t$0, 0x054(%0)\n\t"
- "sb\t$0, 0x058(%0)\n\t"
- "sb\t$0, 0x05c(%0)\n\t"
- "sb\t$0, 0x060(%0)\n\t"
- "sb\t$0, 0x064(%0)\n\t"
- "sb\t$0, 0x068(%0)\n\t"
- "sb\t$0, 0x06c(%0)\n\t"
- "sb\t$0, 0x070(%0)\n\t"
- "sb\t$0, 0x074(%0)\n\t"
- "sb\t$0, 0x078(%0)\n\t"
- "sb\t$0, 0x07c(%0)\n\t"
- : : "r" (p) );
- p += 0x080;
- }
-
- write_c0_status(flags);
-}
-
-static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
-{
- unsigned long size, i, flags;
- volatile unsigned char *p;
-
- size = end - start;
- if (size > dcache_size || KSEGX(start) != KSEG0) {
- start = KSEG0;
- size = dcache_size;
- }
- p = (char *)start;
-
- flags = read_c0_status();
-
- /* isolate cache space */
- write_c0_status((ST0_ISC|flags)&~ST0_IEC);
-
- for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
- "sb\t$0, 0x004(%0)\n\t"
- "sb\t$0, 0x008(%0)\n\t"
- "sb\t$0, 0x00c(%0)\n\t"
- "sb\t$0, 0x010(%0)\n\t"
- "sb\t$0, 0x014(%0)\n\t"
- "sb\t$0, 0x018(%0)\n\t"
- "sb\t$0, 0x01c(%0)\n\t"
- "sb\t$0, 0x020(%0)\n\t"
- "sb\t$0, 0x024(%0)\n\t"
- "sb\t$0, 0x028(%0)\n\t"
- "sb\t$0, 0x02c(%0)\n\t"
- "sb\t$0, 0x030(%0)\n\t"
- "sb\t$0, 0x034(%0)\n\t"
- "sb\t$0, 0x038(%0)\n\t"
- "sb\t$0, 0x03c(%0)\n\t"
- "sb\t$0, 0x040(%0)\n\t"
- "sb\t$0, 0x044(%0)\n\t"
- "sb\t$0, 0x048(%0)\n\t"
- "sb\t$0, 0x04c(%0)\n\t"
- "sb\t$0, 0x050(%0)\n\t"
- "sb\t$0, 0x054(%0)\n\t"
- "sb\t$0, 0x058(%0)\n\t"
- "sb\t$0, 0x05c(%0)\n\t"
- "sb\t$0, 0x060(%0)\n\t"
- "sb\t$0, 0x064(%0)\n\t"
- "sb\t$0, 0x068(%0)\n\t"
- "sb\t$0, 0x06c(%0)\n\t"
- "sb\t$0, 0x070(%0)\n\t"
- "sb\t$0, 0x074(%0)\n\t"
- "sb\t$0, 0x078(%0)\n\t"
- "sb\t$0, 0x07c(%0)\n\t"
- : : "r" (p) );
- p += 0x080;
- }
-
- write_c0_status(flags);
-}
-
-static inline void r3k_flush_cache_all(void)
-{
-}
-
-static inline void r3k___flush_cache_all(void)
-{
- r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
- r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
-}
-
-static void r3k_flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static void r3k_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
-
-static void r3k_flush_cache_page(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
- int exec = vma->vm_flags & VM_EXEC;
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- pr_debug("cpage[%08lx,%08lx]\n",
- cpu_context(smp_processor_id(), mm), addr);
-
- /* No ASID => no such page in the cache. */
- if (cpu_context(smp_processor_id(), mm) == 0)
- return;
-
- pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
-
- /* Invalid => no such page in the cache. */
- if (!(pte_val(*ptep) & _PAGE_PRESENT))
- return;
-
- r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
- if (exec)
- r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
-}
-
-static void local_r3k_flush_data_cache_page(void *addr)
-{
-}
-
-static void r3k_flush_data_cache_page(unsigned long addr)
-{
-}
-
-static void r3k_flush_cache_sigtramp(unsigned long addr)
-{
- unsigned long flags;
-
- pr_debug("csigtramp[%08lx]\n", addr);
-
- flags = read_c0_status();
-
- write_c0_status(flags&~ST0_IEC);
-
- /* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
- "lw\t$0, 0x004(%0)\n\t"
- : : "r" (addr) );
-
- write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
-
- asm( "sb\t$0, 0x000(%0)\n\t"
- "sb\t$0, 0x004(%0)\n\t"
- : : "r" (addr) );
-
- write_c0_status(flags);
-}
-
-static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
- BUG();
-}
-
-static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
-{
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- iob();
- r3k_flush_dcache_range(start, start + size);
-}
-
-void __cpuinit r3k_cache_init(void)
-{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
-
- r3k_probe_cache();
-
- flush_cache_all = r3k_flush_cache_all;
- __flush_cache_all = r3k___flush_cache_all;
- flush_cache_mm = r3k_flush_cache_mm;
- flush_cache_range = r3k_flush_cache_range;
- flush_cache_page = r3k_flush_cache_page;
- flush_icache_range = r3k_flush_icache_range;
- local_flush_icache_range = r3k_flush_icache_range;
-
- __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
-
- flush_cache_sigtramp = r3k_flush_cache_sigtramp;
- local_flush_data_cache_page = local_r3k_flush_data_cache_page;
- flush_data_cache_page = r3k_flush_data_cache_page;
-
- _dma_cache_wback_inv = r3k_dma_cache_wback_inv;
- _dma_cache_wback = r3k_dma_cache_wback_inv;
- _dma_cache_inv = r3k_dma_cache_wback_inv;
-
- printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
- icache_size >> 10, icache_lsize);
- printk("Primary data cache %ldkB, linesize %ld bytes.\n",
- dcache_size >> 10, dcache_lsize);
-
- build_clear_page();
- build_copy_page();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/c-r4k.c b/ANDROID_3.4.5/arch/mips/mm/c-r4k.c
deleted file mode 100644
index bda8eb26..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/c-r4k.c
+++ /dev/null
@@ -1,1468 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- */
-#include <linux/hardirq.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-#include <asm/bcache.h>
-#include <asm/bootinfo.h>
-#include <asm/cache.h>
-#include <asm/cacheops.h>
-#include <asm/cpu.h>
-#include <asm/cpu-features.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/r4kcache.h>
-#include <asm/sections.h>
-#include <asm/mmu_context.h>
-#include <asm/war.h>
-#include <asm/cacheflush.h> /* for run_uncached() */
-
-
-/*
- * Special Variant of smp_call_function for use by cache functions:
- *
- * o No return value
- * o collapses to normal function call on UP kernels
- * o collapses to normal function call on systems with a single shared
- * primary cache.
- * o doesn't disable interrupts on the local CPU
- */
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
-{
- preempt_disable();
-
-#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, 1);
-#endif
- func(info);
- preempt_enable();
-}
-
-#if defined(CONFIG_MIPS_CMP)
-#define cpu_has_safe_index_cacheops 0
-#else
-#define cpu_has_safe_index_cacheops 1
-#endif
-
-/*
- * Must die.
- */
-static unsigned long icache_size __read_mostly;
-static unsigned long dcache_size __read_mostly;
-static unsigned long scache_size __read_mostly;
-
-/*
- * Dummy cache handling routines for machines without boardcaches
- */
-static void cache_noop(void) {}
-
-static struct bcache_ops no_sc_ops = {
- .bc_enable = (void *)cache_noop,
- .bc_disable = (void *)cache_noop,
- .bc_wback_inv = (void *)cache_noop,
- .bc_inv = (void *)cache_noop
-};
-
-struct bcache_ops *bcops = &no_sc_ops;
-
-#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
-#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-
-#define R4600_HIT_CACHEOP_WAR_IMPL \
-do { \
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
- *(volatile unsigned long *)CKSEG1; \
- if (R4600_V1_HIT_CACHEOP_WAR) \
- __asm__ __volatile__("nop;nop;nop;nop"); \
-} while (0)
-
-static void (*r4k_blast_dcache_page)(unsigned long addr);
-
-static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
-{
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache32_page(addr);
-}
-
-static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
-{
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache64_page(addr);
-}
-
-static void __cpuinit r4k_blast_dcache_page_setup(void)
-{
- unsigned long dc_lsize = cpu_dcache_line_size();
-
- if (dc_lsize == 0)
- r4k_blast_dcache_page = (void *)cache_noop;
- else if (dc_lsize == 16)
- r4k_blast_dcache_page = blast_dcache16_page;
- else if (dc_lsize == 32)
- r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
- else if (dc_lsize == 64)
- r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
-}
-
-static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-
-static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
-{
- unsigned long dc_lsize = cpu_dcache_line_size();
-
- if (dc_lsize == 0)
- r4k_blast_dcache_page_indexed = (void *)cache_noop;
- else if (dc_lsize == 16)
- r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
- else if (dc_lsize == 32)
- r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
- else if (dc_lsize == 64)
- r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
-}
-
-static void (* r4k_blast_dcache)(void);
-
-static void __cpuinit r4k_blast_dcache_setup(void)
-{
- unsigned long dc_lsize = cpu_dcache_line_size();
-
- if (dc_lsize == 0)
- r4k_blast_dcache = (void *)cache_noop;
- else if (dc_lsize == 16)
- r4k_blast_dcache = blast_dcache16;
- else if (dc_lsize == 32)
- r4k_blast_dcache = blast_dcache32;
- else if (dc_lsize == 64)
- r4k_blast_dcache = blast_dcache64;
-}
-
-/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
-#define JUMP_TO_ALIGN(order) \
- __asm__ __volatile__( \
- "b\t1f\n\t" \
- ".align\t" #order "\n\t" \
- "1:\n\t" \
- )
-#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
-
-static inline void blast_r4600_v1_icache32(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_icache32();
- local_irq_restore(flags);
-}
-
-static inline void tx49_blast_icache32(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + current_cpu_data.icache.waysize;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- CACHE32_UNROLL32_ALIGN2;
- /* I'm in even chunk. blast odd chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
- CACHE32_UNROLL32_ALIGN;
- /* I'm in odd chunk. blast even chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
-}
-
-static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_icache32_page_indexed(page);
- local_irq_restore(flags);
-}
-
-static inline void tx49_blast_icache32_page_indexed(unsigned long page)
-{
- unsigned long indexmask = current_cpu_data.icache.waysize - 1;
- unsigned long start = INDEX_BASE + (page & indexmask);
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- CACHE32_UNROLL32_ALIGN2;
- /* I'm in even chunk. blast odd chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
- CACHE32_UNROLL32_ALIGN;
- /* I'm in odd chunk. blast even chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
-}
-
-static void (* r4k_blast_icache_page)(unsigned long addr);
-
-static void __cpuinit r4k_blast_icache_page_setup(void)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
- r4k_blast_icache_page = (void *)cache_noop;
- else if (ic_lsize == 16)
- r4k_blast_icache_page = blast_icache16_page;
- else if (ic_lsize == 32)
- r4k_blast_icache_page = blast_icache32_page;
- else if (ic_lsize == 64)
- r4k_blast_icache_page = blast_icache64_page;
-}
-
-
-static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-
-static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
- r4k_blast_icache_page_indexed = (void *)cache_noop;
- else if (ic_lsize == 16)
- r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
- else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
- r4k_blast_icache_page_indexed =
- blast_icache32_r4600_v1_page_indexed;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache_page_indexed =
- tx49_blast_icache32_page_indexed;
- else
- r4k_blast_icache_page_indexed =
- blast_icache32_page_indexed;
- } else if (ic_lsize == 64)
- r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
-}
-
-static void (* r4k_blast_icache)(void);
-
-static void __cpuinit r4k_blast_icache_setup(void)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
- r4k_blast_icache = (void *)cache_noop;
- else if (ic_lsize == 16)
- r4k_blast_icache = blast_icache16;
- else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
- r4k_blast_icache = blast_r4600_v1_icache32;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache = tx49_blast_icache32;
- else
- r4k_blast_icache = blast_icache32;
- } else if (ic_lsize == 64)
- r4k_blast_icache = blast_icache64;
-}
-
-static void (* r4k_blast_scache_page)(unsigned long addr);
-
-static void __cpuinit r4k_blast_scache_page_setup(void)
-{
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (scache_size == 0)
- r4k_blast_scache_page = (void *)cache_noop;
- else if (sc_lsize == 16)
- r4k_blast_scache_page = blast_scache16_page;
- else if (sc_lsize == 32)
- r4k_blast_scache_page = blast_scache32_page;
- else if (sc_lsize == 64)
- r4k_blast_scache_page = blast_scache64_page;
- else if (sc_lsize == 128)
- r4k_blast_scache_page = blast_scache128_page;
-}
-
-static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-
-static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
-{
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (scache_size == 0)
- r4k_blast_scache_page_indexed = (void *)cache_noop;
- else if (sc_lsize == 16)
- r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
- else if (sc_lsize == 32)
- r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
- else if (sc_lsize == 64)
- r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
- else if (sc_lsize == 128)
- r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
-}
-
-static void (* r4k_blast_scache)(void);
-
-static void __cpuinit r4k_blast_scache_setup(void)
-{
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (scache_size == 0)
- r4k_blast_scache = (void *)cache_noop;
- else if (sc_lsize == 16)
- r4k_blast_scache = blast_scache16;
- else if (sc_lsize == 32)
- r4k_blast_scache = blast_scache32;
- else if (sc_lsize == 64)
- r4k_blast_scache = blast_scache64;
- else if (sc_lsize == 128)
- r4k_blast_scache = blast_scache128;
-}
-
-static inline void local_r4k___flush_cache_all(void * args)
-{
-#if defined(CONFIG_CPU_LOONGSON2)
- r4k_blast_scache();
- return;
-#endif
- r4k_blast_dcache();
- r4k_blast_icache();
-
- switch (current_cpu_type()) {
- case CPU_R4000SC:
- case CPU_R4000MC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- r4k_blast_scache();
- }
-}
-
-static void r4k___flush_cache_all(void)
-{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
-}
-
-static inline int has_valid_asid(const struct mm_struct *mm)
-{
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
- int i;
-
- for_each_online_cpu(i)
- if (cpu_context(i, mm))
- return 1;
-
- return 0;
-#else
- return cpu_context(smp_processor_id(), mm);
-#endif
-}
-
-static void r4k__flush_cache_vmap(void)
-{
- r4k_blast_dcache();
-}
-
-static void r4k__flush_cache_vunmap(void)
-{
- r4k_blast_dcache();
-}
-
-static inline void local_r4k_flush_cache_range(void * args)
-{
- struct vm_area_struct *vma = args;
- int exec = vma->vm_flags & VM_EXEC;
-
- if (!(has_valid_asid(vma->vm_mm)))
- return;
-
- r4k_blast_dcache();
- if (exec)
- r4k_blast_icache();
-}
-
-static void r4k_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- int exec = vma->vm_flags & VM_EXEC;
-
- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
-}
-
-static inline void local_r4k_flush_cache_mm(void * args)
-{
- struct mm_struct *mm = args;
-
- if (!has_valid_asid(mm))
- return;
-
- /*
- * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
- * only flush the primary caches but R10000 and R12000 behave sane ...
- * R4000SC and R4400SC indexed S-cache ops also invalidate primary
- * caches, so we can bail out early.
- */
- if (current_cpu_type() == CPU_R4000SC ||
- current_cpu_type() == CPU_R4000MC ||
- current_cpu_type() == CPU_R4400SC ||
- current_cpu_type() == CPU_R4400MC) {
- r4k_blast_scache();
- return;
- }
-
- r4k_blast_dcache();
-}
-
-static void r4k_flush_cache_mm(struct mm_struct *mm)
-{
- if (!cpu_has_dc_aliases)
- return;
-
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
-}
-
-struct flush_cache_page_args {
- struct vm_area_struct *vma;
- unsigned long addr;
- unsigned long pfn;
-};
-
-static inline void local_r4k_flush_cache_page(void *args)
-{
- struct flush_cache_page_args *fcp_args = args;
- struct vm_area_struct *vma = fcp_args->vma;
- unsigned long addr = fcp_args->addr;
- struct page *page = pfn_to_page(fcp_args->pfn);
- int exec = vma->vm_flags & VM_EXEC;
- struct mm_struct *mm = vma->vm_mm;
- int map_coherent = 0;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- void *vaddr;
-
- /*
- * If ownes no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (!has_valid_asid(mm))
- return;
-
- addr &= PAGE_MASK;
- pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
-
- /*
- * If the page isn't marked valid, the page cannot possibly be
- * in the cache.
- */
- if (!(pte_present(*ptep)))
- return;
-
- if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
- vaddr = NULL;
- else {
- /*
- * Use kmap_coherent or kmap_atomic to do flushes for
- * another ASID than the current one.
- */
- map_coherent = (cpu_has_dc_aliases &&
- page_mapped(page) && !Page_dcache_dirty(page));
- if (map_coherent)
- vaddr = kmap_coherent(page, addr);
- else
- vaddr = kmap_atomic(page);
- addr = (unsigned long)vaddr;
- }
-
- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(addr);
- if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page(addr);
- }
- if (exec) {
- if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
- } else
- r4k_blast_icache_page(addr);
- }
-
- if (vaddr) {
- if (map_coherent)
- kunmap_coherent();
- else
- kunmap_atomic(vaddr);
- }
-}
-
-static void r4k_flush_cache_page(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- struct flush_cache_page_args args;
-
- args.vma = vma;
- args.addr = addr;
- args.pfn = pfn;
-
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
-}
-
-static inline void local_r4k_flush_data_cache_page(void * addr)
-{
- r4k_blast_dcache_page((unsigned long) addr);
-}
-
-static void r4k_flush_data_cache_page(unsigned long addr)
-{
- if (in_atomic())
- local_r4k_flush_data_cache_page((void *)addr);
- else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
-}
-
-struct flush_icache_range_args {
- unsigned long start;
- unsigned long end;
-};
-
-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
-{
- if (!cpu_has_ic_fills_f_dc) {
- if (end - start >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- protected_blast_dcache_range(start, end);
- }
- }
-
- if (end - start > icache_size)
- r4k_blast_icache();
- else
- protected_blast_icache_range(start, end);
-}
-
-static inline void local_r4k_flush_icache_range_ipi(void *args)
-{
- struct flush_icache_range_args *fir_args = args;
- unsigned long start = fir_args->start;
- unsigned long end = fir_args->end;
-
- local_r4k_flush_icache_range(start, end);
-}
-
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
-{
- struct flush_icache_range_args args;
-
- args.start = start;
- args.end = end;
-
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
- instruction_hazard();
-}
-
-#ifdef CONFIG_DMA_NONCOHERENT
-
-static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
-{
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else
- blast_scache_range(addr, addr + size);
- __sync();
- return;
- }
-
- /*
- * Either no secondary cache or the available caches don't have the
- * subset property so we have to flush the primary caches
- * explicitly
- */
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(addr, addr + size);
- }
-
- bc_wback_inv(addr, size);
- __sync();
-}
-
-static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
-{
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else {
- unsigned long lsize = cpu_scache_line_size();
- unsigned long almask = ~(lsize - 1);
-
- /*
- * There is no clearly documented alignment requirement
- * for the cache instruction on MIPS processors and
- * some processors, among them the RM5200 and RM7000
- * QED processors will throw an address error for cache
- * hit ops with insufficient alignment. Solved by
- * aligning the address to cache line size.
- */
- cache_op(Hit_Writeback_Inv_SD, addr & almask);
- cache_op(Hit_Writeback_Inv_SD,
- (addr + size - 1) & almask);
- blast_inv_scache_range(addr, addr + size);
- }
- __sync();
- return;
- }
-
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
- r4k_blast_dcache();
- } else {
- unsigned long lsize = cpu_dcache_line_size();
- unsigned long almask = ~(lsize - 1);
-
- R4600_HIT_CACHEOP_WAR_IMPL;
- cache_op(Hit_Writeback_Inv_D, addr & almask);
- cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
- blast_inv_dcache_range(addr, addr + size);
- }
-
- bc_inv(addr, size);
- __sync();
-}
-#endif /* CONFIG_DMA_NONCOHERENT */
-
-/*
- * While we're protected against bad userland addresses we don't care
- * very much about what happens in that case. Usually a segmentation
- * fault will dump the process later on anyway ...
- */
-static void local_r4k_flush_cache_sigtramp(void * arg)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
- unsigned long dc_lsize = cpu_dcache_line_size();
- unsigned long sc_lsize = cpu_scache_line_size();
- unsigned long addr = (unsigned long) arg;
-
- R4600_HIT_CACHEOP_WAR_IMPL;
- if (dc_lsize)
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- protected_writeback_scache_line(addr & ~(sc_lsize - 1));
- if (ic_lsize)
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
- if (MIPS4K_ICACHE_REFILL_WAR) {
- __asm__ __volatile__ (
- ".set push\n\t"
- ".set noat\n\t"
- ".set mips3\n\t"
-#ifdef CONFIG_32BIT
- "la $at,1f\n\t"
-#endif
-#ifdef CONFIG_64BIT
- "dla $at,1f\n\t"
-#endif
- "cache %0,($at)\n\t"
- "nop; nop; nop\n"
- "1:\n\t"
- ".set pop"
- :
- : "i" (Hit_Invalidate_I));
- }
- if (MIPS_CACHE_SYNC_WAR)
- __asm__ __volatile__ ("sync");
-}
-
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
-}
-
-static void r4k_flush_icache_all(void)
-{
- if (cpu_has_vtag_icache)
- r4k_blast_icache();
-}
-
-struct flush_kernel_vmap_range_args {
- unsigned long vaddr;
- int size;
-};
-
-static inline void local_r4k_flush_kernel_vmap_range(void *args)
-{
- struct flush_kernel_vmap_range_args *vmra = args;
- unsigned long vaddr = vmra->vaddr;
- int size = vmra->size;
-
- /*
- * Aliases only affect the primary caches so don't bother with
- * S-caches or T-caches.
- */
- if (cpu_has_safe_index_cacheops && size >= dcache_size)
- r4k_blast_dcache();
- else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(vaddr, vaddr + size);
- }
-}
-
-static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
- struct flush_kernel_vmap_range_args args;
-
- args.vaddr = (unsigned long) vaddr;
- args.size = size;
-
- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
-}
-
-static inline void rm7k_erratum31(void)
-{
- const unsigned long ic_lsize = 32;
- unsigned long addr;
-
- /* RM7000 erratum #31. The icache is screwed at startup. */
- write_c0_taglo(0);
- write_c0_taghi(0);
-
- for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
- __asm__ __volatile__ (
- ".set push\n\t"
- ".set noreorder\n\t"
- ".set mips3\n\t"
- "cache\t%1, 0(%0)\n\t"
- "cache\t%1, 0x1000(%0)\n\t"
- "cache\t%1, 0x2000(%0)\n\t"
- "cache\t%1, 0x3000(%0)\n\t"
- "cache\t%2, 0(%0)\n\t"
- "cache\t%2, 0x1000(%0)\n\t"
- "cache\t%2, 0x2000(%0)\n\t"
- "cache\t%2, 0x3000(%0)\n\t"
- "cache\t%1, 0(%0)\n\t"
- "cache\t%1, 0x1000(%0)\n\t"
- "cache\t%1, 0x2000(%0)\n\t"
- "cache\t%1, 0x3000(%0)\n\t"
- ".set pop\n"
- :
- : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
- }
-}
-
-static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
- "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
-};
-
-static void __cpuinit probe_pcache(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
- unsigned int config = read_c0_config();
- unsigned int prid = read_c0_prid();
- unsigned long config1;
- unsigned int lsize;
-
- switch (c->cputype) {
- case CPU_R4600: /* QED style two way caches? */
- case CPU_R4700:
- case CPU_R5000:
- case CPU_NEVADA:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit = __ffs(icache_size/2);
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit= __ffs(dcache_size/2);
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
- case CPU_R5432:
- case CPU_R5500:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit= 0;
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit = 0;
-
- c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
- break;
-
- case CPU_TX49XX:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 4;
- c->icache.waybit= 0;
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 4;
- c->dcache.waybit = 0;
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- c->options |= MIPS_CPU_PREFETCH;
- break;
-
- case CPU_R4000PC:
- case CPU_R4000SC:
- case CPU_R4000MC:
- case CPU_R4400PC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- case CPU_R4300:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
- c->icache.linesz = 64;
- c->icache.ways = 2;
- c->icache.waybit = 0;
-
- dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
- c->dcache.linesz = 32;
- c->dcache.ways = 2;
- c->dcache.waybit = 0;
-
- c->options |= MIPS_CPU_PREFETCH;
- break;
-
- case CPU_VR4133:
- write_c0_config(config & ~VR41_CONF_P4K);
- case CPU_VR4131:
- /* Workaround for cache instruction bug of VR4131 */
- if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
- c->processor_id == 0x0c82U) {
- config |= 0x00400000U;
- if (c->processor_id == 0x0c80U)
- config |= VR41_CONF_BP;
- write_c0_config(config);
- } else
- c->options |= MIPS_CPU_CACHE_CDEX_P;
-
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit = __ffs(icache_size/2);
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit = __ffs(dcache_size/2);
- break;
-
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
- case CPU_RM7000:
- rm7k_erratum31();
-
- case CPU_RM9000:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 4;
- c->icache.waybit = __ffs(icache_size / c->icache.ways);
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 4;
- c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
-
-#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
- c->options |= MIPS_CPU_CACHE_CDEX_P;
-#endif
- c->options |= MIPS_CPU_PREFETCH;
- break;
-
- case CPU_LOONGSON2:
- icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- if (prid & 0x3)
- c->icache.ways = 4;
- else
- c->icache.ways = 2;
- c->icache.waybit = 0;
-
- dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- if (prid & 0x3)
- c->dcache.ways = 4;
- else
- c->dcache.ways = 2;
- c->dcache.waybit = 0;
- break;
-
- default:
- if (!(config & MIPS_CONF_M))
- panic("Don't know how to probe P-caches on this cpu.");
-
- /*
- * So we seem to be a MIPS32 or MIPS64 CPU
- * So let's probe the I-cache ...
- */
- config1 = read_c0_config1();
-
- if ((lsize = ((config1 >> 19) & 7)))
- c->icache.linesz = 2 << lsize;
- else
- c->icache.linesz = lsize;
- c->icache.sets = 64 << ((config1 >> 22) & 7);
- c->icache.ways = 1 + ((config1 >> 16) & 7);
-
- icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
- c->icache.waybit = __ffs(icache_size/c->icache.ways);
-
- if (config & 0x8) /* VI bit */
- c->icache.flags |= MIPS_CACHE_VTAG;
-
- /*
- * Now probe the MIPS32 / MIPS64 data cache.
- */
- c->dcache.flags = 0;
-
- if ((lsize = ((config1 >> 10) & 7)))
- c->dcache.linesz = 2 << lsize;
- else
- c->dcache.linesz= lsize;
- c->dcache.sets = 64 << ((config1 >> 13) & 7);
- c->dcache.ways = 1 + ((config1 >> 7) & 7);
-
- dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
- c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
-
- c->options |= MIPS_CPU_PREFETCH;
- break;
- }
-
- /*
- * Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
- * to get a VCE exception anymore so we don't care about this
- * misconfiguration. The case is rather theoretical anyway;
- * presumably no vendor is shipping his hardware in the "bad"
- * configuration.
- */
- if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
- !(config & CONF_SC) && c->icache.linesz != 16 &&
- PAGE_SIZE <= 0x8000)
- panic("Improper R4000SC processor configuration detected");
-
- /* compute a couple of other cache variables */
- c->icache.waysize = icache_size / c->icache.ways;
- c->dcache.waysize = dcache_size / c->dcache.ways;
-
- c->icache.sets = c->icache.linesz ?
- icache_size / (c->icache.linesz * c->icache.ways) : 0;
- c->dcache.sets = c->dcache.linesz ?
- dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
-
- /*
- * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
- * 2-way virtually indexed so normally would suffer from aliases. So
- * normally they'd suffer from aliases but magic in the hardware deals
- * with that for us so we don't need to take care ourselves.
- */
- switch (c->cputype) {
- case CPU_20KC:
- case CPU_25KF:
- case CPU_SB1:
- case CPU_SB1A:
- case CPU_XLR:
- c->dcache.flags |= MIPS_CACHE_PINDEX;
- break;
-
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- break;
-
- case CPU_24K:
- case CPU_34K:
- case CPU_74K:
- case CPU_1004K:
- if ((read_c0_config7() & (1 << 16))) {
- /* effectively physically indexed dcache,
- thus no virtual aliases. */
- c->dcache.flags |= MIPS_CACHE_PINDEX;
- break;
- }
- default:
- if (c->dcache.waysize > PAGE_SIZE)
- c->dcache.flags |= MIPS_CACHE_ALIASES;
- }
-
- switch (c->cputype) {
- case CPU_20KC:
- /*
- * Some older 20Kc chips doesn't have the 'VI' bit in
- * the config register.
- */
- c->icache.flags |= MIPS_CACHE_VTAG;
- break;
-
- case CPU_ALCHEMY:
- c->icache.flags |= MIPS_CACHE_IC_F_DC;
- break;
- }
-
-#ifdef CONFIG_CPU_LOONGSON2
- /*
- * LOONGSON2 has 4 way icache, but when using indexed cache op,
- * one op will act on all 4 ways
- */
- c->icache.ways = 1;
-#endif
-
- printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
- icache_size >> 10,
- c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
- way_string[c->icache.ways], c->icache.linesz);
-
- printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
- dcache_size >> 10, way_string[c->dcache.ways],
- (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
- (c->dcache.flags & MIPS_CACHE_ALIASES) ?
- "cache aliases" : "no aliases",
- c->dcache.linesz);
-}
-
-/*
- * If you even _breathe_ on this function, look at the gcc output and make sure
- * it does not pop things on and off the stack for the cache sizing loop that
- * executes in KSEG1 space or else you will crash and burn badly. You have
- * been warned.
- */
-static int __cpuinit probe_scache(void)
-{
- unsigned long flags, addr, begin, end, pow2;
- unsigned int config = read_c0_config();
- struct cpuinfo_mips *c = &current_cpu_data;
-
- if (config & CONF_SC)
- return 0;
-
- begin = (unsigned long) &_stext;
- begin &= ~((4 * 1024 * 1024) - 1);
- end = begin + (4 * 1024 * 1024);
-
- /*
- * This is such a bitch, you'd think they would make it easy to do
- * this. Away you daemons of stupidity!
- */
- local_irq_save(flags);
-
- /* Fill each size-multiple cache line with a valid tag. */
- pow2 = (64 * 1024);
- for (addr = begin; addr < end; addr = (begin + pow2)) {
- unsigned long *p = (unsigned long *) addr;
- __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
- pow2 <<= 1;
- }
-
- /* Load first line with zero (therefore invalid) tag. */
- write_c0_taglo(0);
- write_c0_taghi(0);
- __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
- cache_op(Index_Store_Tag_I, begin);
- cache_op(Index_Store_Tag_D, begin);
- cache_op(Index_Store_Tag_SD, begin);
-
- /* Now search for the wrap around point. */
- pow2 = (128 * 1024);
- for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
- cache_op(Index_Load_Tag_SD, addr);
- __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
- if (!read_c0_taglo())
- break;
- pow2 <<= 1;
- }
- local_irq_restore(flags);
- addr -= begin;
-
- scache_size = addr;
- c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
- c->scache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- return 1;
-}
-
-#if defined(CONFIG_CPU_LOONGSON2)
-static void __init loongson2_sc_init(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
-
- scache_size = 512*1024;
- c->scache.linesz = 32;
- c->scache.ways = 4;
- c->scache.waybit = 0;
- c->scache.waysize = scache_size / (c->scache.ways);
- c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
- pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
- scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
-
- c->options |= MIPS_CPU_INCLUSIVE_CACHES;
-}
-#endif
-
-extern int r5k_sc_init(void);
-extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
-
-static void __cpuinit setup_scache(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
- unsigned int config = read_c0_config();
- int sc_present = 0;
-
- /*
- * Do the probing thing on R4000SC and R4400SC processors. Other
- * processors don't have a S-cache that would be relevant to the
- * Linux memory management.
- */
- switch (c->cputype) {
- case CPU_R4000SC:
- case CPU_R4000MC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- sc_present = run_uncached(probe_scache);
- if (sc_present)
- c->options |= MIPS_CPU_CACHE_CDEX_S;
- break;
-
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
- c->scache.linesz = 64 << ((config >> 13) & 1);
- c->scache.ways = 2;
- c->scache.waybit= 0;
- sc_present = 1;
- break;
-
- case CPU_R5000:
- case CPU_NEVADA:
-#ifdef CONFIG_R5000_CPU_SCACHE
- r5k_sc_init();
-#endif
- return;
-
- case CPU_RM7000:
- case CPU_RM9000:
-#ifdef CONFIG_RM7000_CPU_SCACHE
- rm7k_sc_init();
-#endif
- return;
-
-#if defined(CONFIG_CPU_LOONGSON2)
- case CPU_LOONGSON2:
- loongson2_sc_init();
- return;
-#endif
- case CPU_XLP:
- /* don't need to worry about L2, fully coherent */
- return;
-
- default:
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
-#ifdef CONFIG_MIPS_CPU_SCACHE
- if (mips_sc_init ()) {
- scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
- printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
- scache_size >> 10,
- way_string[c->scache.ways], c->scache.linesz);
- }
-#else
- if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
- panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
-#endif
- return;
- }
- sc_present = 0;
- }
-
- if (!sc_present)
- return;
-
- /* compute a couple of other cache variables */
- c->scache.waysize = scache_size / c->scache.ways;
-
- c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
-
- printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
- scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
-
- c->options |= MIPS_CPU_INCLUSIVE_CACHES;
-}
-
-void au1x00_fixup_config_od(void)
-{
- /*
- * c0_config.od (bit 19) was write only (and read as 0)
- * on the early revisions of Alchemy SOCs. It disables the bus
- * transaction overlapping and needs to be set to fix various errata.
- */
- switch (read_c0_prid()) {
- case 0x00030100: /* Au1000 DA */
- case 0x00030201: /* Au1000 HA */
- case 0x00030202: /* Au1000 HB */
- case 0x01030200: /* Au1500 AB */
- /*
- * Au1100 errata actually keeps silence about this bit, so we set it
- * just in case for those revisions that require it to be set according
- * to the (now gone) cpu table.
- */
- case 0x02030200: /* Au1100 AB */
- case 0x02030201: /* Au1100 BA */
- case 0x02030202: /* Au1100 BC */
- set_c0_config(1 << 19);
- break;
- }
-}
-
-/* CP0 hazard avoidance. */
-#define NXP_BARRIER() \
- __asm__ __volatile__( \
- ".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
-
-static void nxp_pr4450_fixup_config(void)
-{
- unsigned long config0;
-
- config0 = read_c0_config();
-
- /* clear all three cache coherency fields */
- config0 &= ~(0x7 | (7 << 25) | (7 << 28));
- config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
- ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
- ((_page_cachable_default >> _CACHE_SHIFT) << 28));
- write_c0_config(config0);
- NXP_BARRIER();
-}
-
-static int __cpuinitdata cca = -1;
-
-static int __init cca_setup(char *str)
-{
- get_option(&str, &cca);
-
- return 1;
-}
-
-__setup("cca=", cca_setup);
-
-static void __cpuinit coherency_setup(void)
-{
- if (cca < 0 || cca > 7)
- cca = read_c0_config() & CONF_CM_CMASK;
- _page_cachable_default = cca << _CACHE_SHIFT;
-
- pr_debug("Using cache attribute %d\n", cca);
- change_c0_config(CONF_CM_CMASK, cca);
-
- /*
- * c0_status.cu=0 specifies that updates by the sc instruction use
- * the coherency mode specified by the TLB; 1 means cachable
- * coherent update on write will be used. Not all processors have
- * this bit and; some wire it to zero, others like Toshiba had the
- * silly idea of putting something else there ...
- */
- switch (current_cpu_type()) {
- case CPU_R4000PC:
- case CPU_R4000SC:
- case CPU_R4000MC:
- case CPU_R4400PC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- clear_c0_config(CONF_CU);
- break;
- /*
- * We need to catch the early Alchemy SOCs with
- * the write-only co_config.od bit and set it back to one on:
- * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
- */
- case CPU_ALCHEMY:
- au1x00_fixup_config_od();
- break;
-
- case PRID_IMP_PR4450:
- nxp_pr4450_fixup_config();
- break;
- }
-}
-
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
- coherentio = 1;
-
- return 1;
-}
-
-__setup("coherentio", setcoherentio);
-#endif
-
-void __cpuinit r4k_cache_init(void)
-{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
- extern char __weak except_vec2_generic;
- extern char __weak except_vec2_sb1;
- struct cpuinfo_mips *c = &current_cpu_data;
-
- switch (c->cputype) {
- case CPU_SB1:
- case CPU_SB1A:
- set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
- break;
-
- default:
- set_uncached_handler(0x100, &except_vec2_generic, 0x80);
- break;
- }
-
- probe_pcache();
- setup_scache();
-
- r4k_blast_dcache_page_setup();
- r4k_blast_dcache_page_indexed_setup();
- r4k_blast_dcache_setup();
- r4k_blast_icache_page_setup();
- r4k_blast_icache_page_indexed_setup();
- r4k_blast_icache_setup();
- r4k_blast_scache_page_setup();
- r4k_blast_scache_page_indexed_setup();
- r4k_blast_scache_setup();
-
- /*
- * Some MIPS32 and MIPS64 processors have physically indexed caches.
- * This code supports virtually indexed processors and will be
- * unnecessarily inefficient on physically indexed processors.
- */
- if (c->dcache.linesz)
- shm_align_mask = max_t( unsigned long,
- c->dcache.sets * c->dcache.linesz - 1,
- PAGE_SIZE - 1);
- else
- shm_align_mask = PAGE_SIZE-1;
-
- __flush_cache_vmap = r4k__flush_cache_vmap;
- __flush_cache_vunmap = r4k__flush_cache_vunmap;
-
- flush_cache_all = cache_noop;
- __flush_cache_all = r4k___flush_cache_all;
- flush_cache_mm = r4k_flush_cache_mm;
- flush_cache_page = r4k_flush_cache_page;
- flush_cache_range = r4k_flush_cache_range;
-
- __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
-
- flush_cache_sigtramp = r4k_flush_cache_sigtramp;
- flush_icache_all = r4k_flush_icache_all;
- local_flush_data_cache_page = local_r4k_flush_data_cache_page;
- flush_data_cache_page = r4k_flush_data_cache_page;
- flush_icache_range = r4k_flush_icache_range;
- local_flush_icache_range = local_r4k_flush_icache_range;
-
-#if defined(CONFIG_DMA_NONCOHERENT)
- if (coherentio) {
- _dma_cache_wback_inv = (void *)cache_noop;
- _dma_cache_wback = (void *)cache_noop;
- _dma_cache_inv = (void *)cache_noop;
- } else {
- _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
- _dma_cache_wback = r4k_dma_cache_wback_inv;
- _dma_cache_inv = r4k_dma_cache_inv;
- }
-#endif
-
- build_clear_page();
- build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
- local_r4k___flush_cache_all(NULL);
-#endif
- coherency_setup();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/c-tx39.c b/ANDROID_3.4.5/arch/mips/mm/c-tx39.c
deleted file mode 100644
index 87d23cad..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/c-tx39.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * r2300.c: R2000 and R3000 specific mmu/cache code.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- *
- * with a lot of changes to make this thing work for R3000s
- * Tx39XX R4k style caches added. HK
- * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
- * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cacheops.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/isadep.h>
-#include <asm/io.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-
-/* For R3000 cores with R4000 style caches */
-static unsigned long icache_size, dcache_size; /* Size in bytes */
-
-#include <asm/r4kcache.h>
-
-extern int r3k_have_wired_reg; /* in r3k-tlb.c */
-
-/* This sequence is required to ensure icache is disabled immediately */
-#define TX39_STOP_STREAMING() \
-__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
- "nop\n\t" \
- "1:\n\t" \
- ".set pop" \
- )
-
-/* TX39H-style cache flush routines. */
-static void tx39h_flush_icache_all(void)
-{
- unsigned long flags, config;
-
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16();
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
-{
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- iob();
- blast_inv_dcache_range(addr, addr + size);
-}
-
-
-/* TX39H2,TX39H3 */
-static inline void tx39_blast_dcache_page(unsigned long addr)
-{
- if (current_cpu_type() != CPU_TX3912)
- blast_dcache16_page(addr);
-}
-
-static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
-{
- blast_dcache16_page_indexed(addr);
-}
-
-static inline void tx39_blast_dcache(void)
-{
- blast_dcache16();
-}
-
-static inline void tx39_blast_icache_page(unsigned long addr)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16_page(addr);
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static inline void tx39_blast_icache_page_indexed(unsigned long addr)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16_page_indexed(addr);
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static inline void tx39_blast_icache(void)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16();
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static void tx39__flush_cache_vmap(void)
-{
- tx39_blast_dcache();
-}
-
-static void tx39__flush_cache_vunmap(void)
-{
- tx39_blast_dcache();
-}
-
-static inline void tx39_flush_cache_all(void)
-{
- if (!cpu_has_dc_aliases)
- return;
-
- tx39_blast_dcache();
-}
-
-static inline void tx39___flush_cache_all(void)
-{
- tx39_blast_dcache();
- tx39_blast_icache();
-}
-
-static void tx39_flush_cache_mm(struct mm_struct *mm)
-{
- if (!cpu_has_dc_aliases)
- return;
-
- if (cpu_context(smp_processor_id(), mm) != 0)
- tx39_blast_dcache();
-}
-
-static void tx39_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (!cpu_has_dc_aliases)
- return;
- if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
- return;
-
- tx39_blast_dcache();
-}
-
-static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
-{
- int exec = vma->vm_flags & VM_EXEC;
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- /*
- * If ownes no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (cpu_context(smp_processor_id(), mm) == 0)
- return;
-
- page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- pudp = pud_offset(pgdp, page);
- pmdp = pmd_offset(pudp, page);
- ptep = pte_offset(pmdp, page);
-
- /*
- * If the page isn't marked valid, the page cannot possibly be
- * in the cache.
- */
- if (!(pte_val(*ptep) & _PAGE_PRESENT))
- return;
-
- /*
- * Doing flushes for another ASID than the current one is
- * too difficult since stupid R4k caches do a TLB translation
- * for every cache flush operation. So we do indexed flushes
- * in that case, which doesn't overly flush the cache too much.
- */
- if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache_page(page);
- if (exec)
- tx39_blast_icache_page(page);
-
- return;
- }
-
- /*
- * Do indexed flush, too much work to get the (possible) TLB refills
- * to work correctly.
- */
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache_page_indexed(page);
- if (exec)
- tx39_blast_icache_page_indexed(page);
-}
-
-static void local_tx39_flush_data_cache_page(void * addr)
-{
- tx39_blast_dcache_page((unsigned long)addr);
-}
-
-static void tx39_flush_data_cache_page(unsigned long addr)
-{
- tx39_blast_dcache_page(addr);
-}
-
-static void tx39_flush_icache_range(unsigned long start, unsigned long end)
-{
- if (end - start > dcache_size)
- tx39_blast_dcache();
- else
- protected_blast_dcache_range(start, end);
-
- if (end - start > icache_size)
- tx39_blast_icache();
- else {
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- protected_blast_icache_range(start, end);
- write_c0_conf(config);
- local_irq_restore(flags);
- }
-}
-
-static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
- BUG();
-}
-
-static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end;
-
- if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
- end = addr + size;
- do {
- tx39_blast_dcache_page(addr);
- addr += PAGE_SIZE;
- } while(addr != end);
- } else if (size > dcache_size) {
- tx39_blast_dcache();
- } else {
- blast_dcache_range(addr, addr + size);
- }
-}
-
-static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end;
-
- if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
- end = addr + size;
- do {
- tx39_blast_dcache_page(addr);
- addr += PAGE_SIZE;
- } while(addr != end);
- } else if (size > dcache_size) {
- tx39_blast_dcache();
- } else {
- blast_inv_dcache_range(addr, addr + size);
- }
-}
-
-static void tx39_flush_cache_sigtramp(unsigned long addr)
-{
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long config;
- unsigned long flags;
-
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
-
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static __init void tx39_probe_cache(void)
-{
- unsigned long config;
-
- config = read_c0_conf();
-
- icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
- TX39_CONF_ICS_SHIFT));
- dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
- TX39_CONF_DCS_SHIFT));
-
- current_cpu_data.icache.linesz = 16;
- switch (current_cpu_type()) {
- case CPU_TX3912:
- current_cpu_data.icache.ways = 1;
- current_cpu_data.dcache.ways = 1;
- current_cpu_data.dcache.linesz = 4;
- break;
-
- case CPU_TX3927:
- current_cpu_data.icache.ways = 2;
- current_cpu_data.dcache.ways = 2;
- current_cpu_data.dcache.linesz = 16;
- break;
-
- case CPU_TX3922:
- default:
- current_cpu_data.icache.ways = 1;
- current_cpu_data.dcache.ways = 1;
- current_cpu_data.dcache.linesz = 16;
- break;
- }
-}
-
-void __cpuinit tx39_cache_init(void)
-{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
- unsigned long config;
-
- config = read_c0_conf();
- config &= ~TX39_CONF_WBON;
- write_c0_conf(config);
-
- tx39_probe_cache();
-
- switch (current_cpu_type()) {
- case CPU_TX3912:
- /* TX39/H core (writethru direct-map cache) */
- __flush_cache_vmap = tx39__flush_cache_vmap;
- __flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
- __flush_cache_all = tx39h_flush_icache_all;
- flush_cache_mm = (void *) tx39h_flush_icache_all;
- flush_cache_range = (void *) tx39h_flush_icache_all;
- flush_cache_page = (void *) tx39h_flush_icache_all;
- flush_icache_range = (void *) tx39h_flush_icache_all;
- local_flush_icache_range = (void *) tx39h_flush_icache_all;
-
- flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
- local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
- flush_data_cache_page = (void *) tx39h_flush_icache_all;
-
- _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
-
- shm_align_mask = PAGE_SIZE - 1;
-
- break;
-
- case CPU_TX3922:
- case CPU_TX3927:
- default:
- /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
- r3k_have_wired_reg = 1;
- write_c0_wired(0); /* set 8 on reset... */
- /* board-dependent init code may set WBON */
-
- __flush_cache_vmap = tx39__flush_cache_vmap;
- __flush_cache_vunmap = tx39__flush_cache_vunmap;
-
- flush_cache_all = tx39_flush_cache_all;
- __flush_cache_all = tx39___flush_cache_all;
- flush_cache_mm = tx39_flush_cache_mm;
- flush_cache_range = tx39_flush_cache_range;
- flush_cache_page = tx39_flush_cache_page;
- flush_icache_range = tx39_flush_icache_range;
- local_flush_icache_range = tx39_flush_icache_range;
-
- __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
-
- flush_cache_sigtramp = tx39_flush_cache_sigtramp;
- local_flush_data_cache_page = local_tx39_flush_data_cache_page;
- flush_data_cache_page = tx39_flush_data_cache_page;
-
- _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
- _dma_cache_wback = tx39_dma_cache_wback_inv;
- _dma_cache_inv = tx39_dma_cache_inv;
-
- shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
-
- break;
- }
-
- current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
- current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
-
- current_cpu_data.icache.sets =
- current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
- current_cpu_data.dcache.sets =
- current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
-
- if (current_cpu_data.dcache.waysize > PAGE_SIZE)
- current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
-
- current_cpu_data.icache.waybit = 0;
- current_cpu_data.dcache.waybit = 0;
-
- printk("Primary instruction cache %ldkB, linesize %d bytes\n",
- icache_size >> 10, current_cpu_data.icache.linesz);
- printk("Primary data cache %ldkB, linesize %d bytes\n",
- dcache_size >> 10, current_cpu_data.dcache.linesz);
-
- build_clear_page();
- build_copy_page();
- tx39h_flush_icache_all();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/cache.c b/ANDROID_3.4.5/arch/mips/mm/cache.c
deleted file mode 100644
index 829320c7..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/cache.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2007 MIPS Technologies, Inc.
- */
-#include <linux/fs.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/linkage.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/processor.h>
-#include <asm/cpu.h>
-#include <asm/cpu-features.h>
-
-/* Cache operations. */
-void (*flush_cache_all)(void);
-void (*__flush_cache_all)(void);
-void (*flush_cache_mm)(struct mm_struct *mm);
-void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
- unsigned long pfn);
-void (*flush_icache_range)(unsigned long start, unsigned long end);
-void (*local_flush_icache_range)(unsigned long start, unsigned long end);
-
-void (*__flush_cache_vmap)(void);
-void (*__flush_cache_vunmap)(void);
-
-void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
-void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
-
-EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
-
-/* MIPS specific cache operations */
-void (*flush_cache_sigtramp)(unsigned long addr);
-void (*local_flush_data_cache_page)(void * addr);
-void (*flush_data_cache_page)(unsigned long addr);
-void (*flush_icache_all)(void);
-
-EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
-EXPORT_SYMBOL(flush_data_cache_page);
-
-#ifdef CONFIG_DMA_NONCOHERENT
-
-/* DMA cache operations. */
-void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
-void (*_dma_cache_wback)(unsigned long start, unsigned long size);
-void (*_dma_cache_inv)(unsigned long start, unsigned long size);
-
-EXPORT_SYMBOL(_dma_cache_wback_inv);
-
-#endif /* CONFIG_DMA_NONCOHERENT */
-
-/*
- * We could optimize the case where the cache argument is not BCACHE but
- * that seems very atypical use ...
- */
-SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
- unsigned int, cache)
-{
- if (bytes == 0)
- return 0;
- if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
- return -EFAULT;
-
- flush_icache_range(addr, addr + bytes);
-
- return 0;
-}
-
-void __flush_dcache_page(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
- unsigned long addr;
-
- if (PageHighMem(page))
- return;
- if (mapping && !mapping_mapped(mapping)) {
- SetPageDcacheDirty(page);
- return;
- }
-
- /*
- * We could delay the flush for the !page_mapping case too. But that
- * case is for exec env/arg pages and those are %99 certainly going to
- * get faulted into the tlb (and thus flushed) anyways.
- */
- addr = (unsigned long) page_address(page);
- flush_data_cache_page(addr);
-}
-
-EXPORT_SYMBOL(__flush_dcache_page);
-
-void __flush_anon_page(struct page *page, unsigned long vmaddr)
-{
- unsigned long addr = (unsigned long) page_address(page);
-
- if (pages_do_alias(addr, vmaddr)) {
- if (page_mapped(page) && !Page_dcache_dirty(page)) {
- void *kaddr;
-
- kaddr = kmap_coherent(page, vmaddr);
- flush_data_cache_page((unsigned long)kaddr);
- kunmap_coherent();
- } else
- flush_data_cache_page(addr);
- }
-}
-
-EXPORT_SYMBOL(__flush_anon_page);
-
-void __update_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t pte)
-{
- struct page *page;
- unsigned long pfn, addr;
- int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
-
- pfn = pte_pfn(pte);
- if (unlikely(!pfn_valid(pfn)))
- return;
- page = pfn_to_page(pfn);
- if (page_mapping(page) && Page_dcache_dirty(page)) {
- addr = (unsigned long) page_address(page);
- if (exec || pages_do_alias(addr, address & PAGE_MASK))
- flush_data_cache_page(addr);
- ClearPageDcacheDirty(page);
- }
-}
-
-unsigned long _page_cachable_default;
-EXPORT_SYMBOL(_page_cachable_default);
-
-static inline void setup_protection_map(void)
-{
- if (kernel_uses_smartmips_rixi) {
- protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
- protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
- protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-
- protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
- protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
- protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
- protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
-
- } else {
- protection_map[0] = PAGE_NONE;
- protection_map[1] = PAGE_READONLY;
- protection_map[2] = PAGE_COPY;
- protection_map[3] = PAGE_COPY;
- protection_map[4] = PAGE_READONLY;
- protection_map[5] = PAGE_READONLY;
- protection_map[6] = PAGE_COPY;
- protection_map[7] = PAGE_COPY;
- protection_map[8] = PAGE_NONE;
- protection_map[9] = PAGE_READONLY;
- protection_map[10] = PAGE_SHARED;
- protection_map[11] = PAGE_SHARED;
- protection_map[12] = PAGE_READONLY;
- protection_map[13] = PAGE_READONLY;
- protection_map[14] = PAGE_SHARED;
- protection_map[15] = PAGE_SHARED;
- }
-}
-
-void __cpuinit cpu_cache_init(void)
-{
- if (cpu_has_3k_cache) {
- extern void __weak r3k_cache_init(void);
-
- r3k_cache_init();
- }
- if (cpu_has_6k_cache) {
- extern void __weak r6k_cache_init(void);
-
- r6k_cache_init();
- }
- if (cpu_has_4k_cache) {
- extern void __weak r4k_cache_init(void);
-
- r4k_cache_init();
- }
- if (cpu_has_8k_cache) {
- extern void __weak r8k_cache_init(void);
-
- r8k_cache_init();
- }
- if (cpu_has_tx39_cache) {
- extern void __weak tx39_cache_init(void);
-
- tx39_cache_init();
- }
-
- if (cpu_has_octeon_cache) {
- extern void __weak octeon_cache_init(void);
-
- octeon_cache_init();
- }
-
- setup_protection_map();
-}
-
-int __weak __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory);
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/cerr-sb1.c b/ANDROID_3.4.5/arch/mips/mm/cerr-sb1.c
deleted file mode 100644
index 3571090b..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/cerr-sb1.c
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * Copyright (C) 2001,2002,2003 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#include <linux/sched.h>
-#include <asm/mipsregs.h>
-#include <asm/sibyte/sb1250.h>
-#include <asm/sibyte/sb1250_regs.h>
-
-#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
-#include <asm/io.h>
-#include <asm/sibyte/sb1250_scd.h>
-#endif
-
-/*
- * We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
- */
-#undef DUMP_L2_ECC_TAG_ON_ERROR
-
-/* SB1 definitions */
-
-/* XXX should come from config1 XXX */
-#define SB1_CACHE_INDEX_MASK 0x1fe0
-
-#define CP0_ERRCTL_RECOVERABLE (1 << 31)
-#define CP0_ERRCTL_DCACHE (1 << 30)
-#define CP0_ERRCTL_ICACHE (1 << 29)
-#define CP0_ERRCTL_MULTIBUS (1 << 23)
-#define CP0_ERRCTL_MC_TLB (1 << 15)
-#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
-
-#define CP0_CERRI_TAG_PARITY (1 << 29)
-#define CP0_CERRI_DATA_PARITY (1 << 28)
-#define CP0_CERRI_EXTERNAL (1 << 26)
-
-#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
-
-#define CP0_CERRD_MULTIPLE (1 << 31)
-#define CP0_CERRD_TAG_STATE (1 << 30)
-#define CP0_CERRD_TAG_ADDRESS (1 << 29)
-#define CP0_CERRD_DATA_SBE (1 << 28)
-#define CP0_CERRD_DATA_DBE (1 << 27)
-#define CP0_CERRD_EXTERNAL (1 << 26)
-#define CP0_CERRD_LOAD (1 << 25)
-#define CP0_CERRD_STORE (1 << 24)
-#define CP0_CERRD_FILLWB (1 << 23)
-#define CP0_CERRD_COHERENCY (1 << 22)
-#define CP0_CERRD_DUPTAG (1 << 21)
-
-#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
-#define CP0_CERRD_IDX_VALID(c) \
- (((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
-#define CP0_CERRD_CAUSES \
- (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
-#define CP0_CERRD_TYPES \
- (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
-
-static uint32_t extract_ic(unsigned short addr, int data);
-static uint32_t extract_dc(unsigned short addr, int data);
-
-static inline void breakout_errctl(unsigned int val)
-{
- if (val & CP0_ERRCTL_RECOVERABLE)
- printk(" recoverable");
- if (val & CP0_ERRCTL_DCACHE)
- printk(" dcache");
- if (val & CP0_ERRCTL_ICACHE)
- printk(" icache");
- if (val & CP0_ERRCTL_MULTIBUS)
- printk(" multiple-buserr");
- printk("\n");
-}
-
-static inline void breakout_cerri(unsigned int val)
-{
- if (val & CP0_CERRI_TAG_PARITY)
- printk(" tag-parity");
- if (val & CP0_CERRI_DATA_PARITY)
- printk(" data-parity");
- if (val & CP0_CERRI_EXTERNAL)
- printk(" external");
- printk("\n");
-}
-
-static inline void breakout_cerrd(unsigned int val)
-{
- switch (val & CP0_CERRD_CAUSES) {
- case CP0_CERRD_LOAD:
- printk(" load,");
- break;
- case CP0_CERRD_STORE:
- printk(" store,");
- break;
- case CP0_CERRD_FILLWB:
- printk(" fill/wb,");
- break;
- case CP0_CERRD_COHERENCY:
- printk(" coherency,");
- break;
- case CP0_CERRD_DUPTAG:
- printk(" duptags,");
- break;
- default:
- printk(" NO CAUSE,");
- break;
- }
- if (!(val & CP0_CERRD_TYPES))
- printk(" NO TYPE");
- else {
- if (val & CP0_CERRD_MULTIPLE)
- printk(" multi-err");
- if (val & CP0_CERRD_TAG_STATE)
- printk(" tag-state");
- if (val & CP0_CERRD_TAG_ADDRESS)
- printk(" tag-address");
- if (val & CP0_CERRD_DATA_SBE)
- printk(" data-SBE");
- if (val & CP0_CERRD_DATA_DBE)
- printk(" data-DBE");
- if (val & CP0_CERRD_EXTERNAL)
- printk(" external");
- }
- printk("\n");
-}
-
-#ifndef CONFIG_SIBYTE_BUS_WATCHER
-
-static void check_bus_watcher(void)
-{
- uint32_t status, l2_err, memio_err;
-#ifdef DUMP_L2_ECC_TAG_ON_ERROR
- uint64_t l2_tag;
-#endif
-
- /* Destructive read, clears register and interrupt */
- status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
- /* Bit 31 is always on, but there's no #define for that */
- if (status & ~(1UL << 31)) {
- l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
-#ifdef DUMP_L2_ECC_TAG_ON_ERROR
- l2_tag = in64(IOADDR(A_L2_ECC_TAG));
-#endif
- memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
- printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
- printk("\nLast recorded signature:\n");
- printk("Request %02x from %d, answered by %d with Dcode %d\n",
- (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
- (int)(G_SCD_BERR_TID(status) >> 6),
- (int)G_SCD_BERR_RID(status),
- (int)G_SCD_BERR_DCODE(status));
-#ifdef DUMP_L2_ECC_TAG_ON_ERROR
- printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
-#endif
- } else {
- printk("Bus watcher indicates no error\n");
- }
-}
-#else
-extern void check_bus_watcher(void);
-#endif
-
-asmlinkage void sb1_cache_error(void)
-{
- uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
- unsigned long long cerr_dpa;
-
-#ifdef CONFIG_SIBYTE_BW_TRACE
- /* Freeze the trace buffer now */
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
- csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
-#else
- csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
-#endif
- printk("Trace buffer frozen\n");
-#endif
-
- printk("Cache error exception on CPU %x:\n",
- (read_c0_prid() >> 25) & 0x7);
-
- __asm__ __volatile__ (
- " .set push\n\t"
- " .set mips64\n\t"
- " .set noat\n\t"
- " mfc0 %0, $26\n\t"
- " mfc0 %1, $27\n\t"
- " mfc0 %2, $27, 1\n\t"
- " dmfc0 $1, $27, 3\n\t"
- " dsrl32 %3, $1, 0 \n\t"
- " sll %4, $1, 0 \n\t"
- " mfc0 %5, $30\n\t"
- " .set pop"
- : "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
- "=r" (dpahi), "=r" (dpalo), "=r" (eepc));
-
- cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- printk(" c0_errorepc == %08x\n", eepc);
- printk(" c0_errctl == %08x", errctl);
- breakout_errctl(errctl);
- if (errctl & CP0_ERRCTL_ICACHE) {
- printk(" c0_cerr_i == %08x", cerr_i);
- breakout_cerri(cerr_i);
- if (CP0_CERRI_IDX_VALID(cerr_i)) {
- /* Check index of EPC, allowing for delay slot */
- if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
- ((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
- printk(" cerr_i idx doesn't match eepc\n");
- else {
- res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
- (cerr_i & CP0_CERRI_DATA) != 0);
- if (!(res & cerr_i))
- printk("...didn't see indicated icache problem\n");
- }
- }
- }
- if (errctl & CP0_ERRCTL_DCACHE) {
- printk(" c0_cerr_d == %08x", cerr_d);
- breakout_cerrd(cerr_d);
- if (CP0_CERRD_DPA_VALID(cerr_d)) {
- printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
- if (!CP0_CERRD_IDX_VALID(cerr_d)) {
- res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
- (cerr_d & CP0_CERRD_DATA) != 0);
- if (!(res & cerr_d))
- printk("...didn't see indicated dcache problem\n");
- } else {
- if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
- printk(" cerr_d idx doesn't match cerr_dpa\n");
- else {
- res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
- (cerr_d & CP0_CERRD_DATA) != 0);
- if (!(res & cerr_d))
- printk("...didn't see indicated problem\n");
- }
- }
- }
- }
-
- check_bus_watcher();
-
- /*
- * Calling panic() when a fatal cache error occurs scrambles the
- * state of the system (and the cache), making it difficult to
- * investigate after the fact. However, if you just stall the CPU,
- * the other CPU may keep on running, which is typically very
- * undesirable.
- */
-#ifdef CONFIG_SB1_CERR_STALL
- while (1)
- ;
-#else
- panic("unhandled cache error");
-#endif
-}
-
-
-/* Parity lookup table. */
-static const uint8_t parity[256] = {
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
-};
-
-/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
-static const uint64_t mask_72_64[8] = {
- 0x0738C808099264FFULL,
- 0x38C808099264FF07ULL,
- 0xC808099264FF0738ULL,
- 0x08099264FF0738C8ULL,
- 0x099264FF0738C808ULL,
- 0x9264FF0738C80809ULL,
- 0x64FF0738C8080992ULL,
- 0xFF0738C808099264ULL
-};
-
-/* Calculate the parity on a range of bits */
-static char range_parity(uint64_t dword, int max, int min)
-{
- char parity = 0;
- int i;
- dword >>= min;
- for (i=max-min; i>=0; i--) {
- if (dword & 0x1)
- parity = !parity;
- dword >>= 1;
- }
- return parity;
-}
-
-/* Calculate the 4-bit even byte-parity for an instruction */
-static unsigned char inst_parity(uint32_t word)
-{
- int i, j;
- char parity = 0;
- for (j=0; j<4; j++) {
- char byte_parity = 0;
- for (i=0; i<8; i++) {
- if (word & 0x80000000)
- byte_parity = !byte_parity;
- word <<= 1;
- }
- parity <<= 1;
- parity |= byte_parity;
- }
- return parity;
-}
-
-static uint32_t extract_ic(unsigned short addr, int data)
-{
- unsigned short way;
- int valid;
- uint32_t taghi, taglolo, taglohi;
- unsigned long long taglo, va;
- uint64_t tlo_tmp;
- uint8_t lru;
- int res = 0;
-
- printk("Icache index 0x%04x ", addr);
- for (way = 0; way < 4; way++) {
- /* Index-load-tag-I */
- __asm__ __volatile__ (
- " .set push \n\t"
- " .set noreorder \n\t"
- " .set mips64 \n\t"
- " .set noat \n\t"
- " cache 4, 0(%3) \n\t"
- " mfc0 %0, $29 \n\t"
- " dmfc0 $1, $28 \n\t"
- " dsrl32 %1, $1, 0 \n\t"
- " sll %2, $1, 0 \n\t"
- " .set pop"
- : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
- : "r" ((way << 13) | addr));
-
- taglo = ((unsigned long long)taglohi << 32) | taglolo;
- if (way == 0) {
- lru = (taghi >> 14) & 0xff;
- printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
- ((addr >> 5) & 0x3), /* bank */
- ((addr >> 7) & 0x3f), /* index */
- (lru & 0x3),
- ((lru >> 2) & 0x3),
- ((lru >> 4) & 0x3),
- ((lru >> 6) & 0x3));
- }
- va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
- if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
- va |= 0x3FFFF00000000000ULL;
- valid = ((taghi >> 29) & 1);
- if (valid) {
- tlo_tmp = taglo & 0xfff3ff;
- if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
- printk(" ** bad parity in VTag0/G/ASID\n");
- res |= CP0_CERRI_TAG_PARITY;
- }
- if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
- printk(" ** bad parity in R/VTag1\n");
- res |= CP0_CERRI_TAG_PARITY;
- }
- }
- if (valid ^ ((taghi >> 27) & 1)) {
- printk(" ** bad parity for valid bit\n");
- res |= CP0_CERRI_TAG_PARITY;
- }
- printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
- way, va, valid, taghi, taglo);
-
- if (data) {
- uint32_t datahi, insta, instb;
- uint8_t predecode;
- int offset;
-
- /* (hit all banks and ways) */
- for (offset = 0; offset < 4; offset++) {
- /* Index-load-data-I */
- __asm__ __volatile__ (
- " .set push\n\t"
- " .set noreorder\n\t"
- " .set mips64\n\t"
- " .set noat\n\t"
- " cache 6, 0(%3) \n\t"
- " mfc0 %0, $29, 1\n\t"
- " dmfc0 $1, $28, 1\n\t"
- " dsrl32 %1, $1, 0 \n\t"
- " sll %2, $1, 0 \n\t"
- " .set pop \n"
- : "=r" (datahi), "=r" (insta), "=r" (instb)
- : "r" ((way << 13) | addr | (offset << 3)));
- predecode = (datahi >> 8) & 0xff;
- if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
- printk(" ** bad parity in predecode\n");
- res |= CP0_CERRI_DATA_PARITY;
- }
- /* XXXKW should/could check predecode bits themselves */
- if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
- printk(" ** bad parity in instruction a\n");
- res |= CP0_CERRI_DATA_PARITY;
- }
- if ((datahi & 0xf) ^ inst_parity(instb)) {
- printk(" ** bad parity in instruction b\n");
- res |= CP0_CERRI_DATA_PARITY;
- }
- printk(" %05X-%08X%08X", datahi, insta, instb);
- }
- printk("\n");
- }
- }
- return res;
-}
-
-/* Compute the ECC for a data doubleword */
-static uint8_t dc_ecc(uint64_t dword)
-{
- uint64_t t;
- uint32_t w;
- uint8_t p;
- int i;
-
- p = 0;
- for (i = 7; i >= 0; i--)
- {
- p <<= 1;
- t = dword & mask_72_64[i];
- w = (uint32_t)(t >> 32);
- p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
- ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
- w = (uint32_t)(t & 0xFFFFFFFF);
- p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
- ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
- }
- return p;
-}
-
-struct dc_state {
- unsigned char val;
- char *name;
-};
-
-static struct dc_state dc_states[] = {
- { 0x00, "INVALID" },
- { 0x0f, "COH-SHD" },
- { 0x13, "NCO-E-C" },
- { 0x19, "NCO-E-D" },
- { 0x16, "COH-E-C" },
- { 0x1c, "COH-E-D" },
- { 0xff, "*ERROR*" }
-};
-
-#define DC_TAG_VALID(state) \
- (((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
- ((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
-
-static char *dc_state_str(unsigned char state)
-{
- struct dc_state *dsc = dc_states;
- while (dsc->val != 0xff) {
- if (dsc->val == state)
- break;
- dsc++;
- }
- return dsc->name;
-}
-
-static uint32_t extract_dc(unsigned short addr, int data)
-{
- int valid, way;
- unsigned char state;
- uint32_t taghi, taglolo, taglohi;
- unsigned long long taglo, pa;
- uint8_t ecc, lru;
- int res = 0;
-
- printk("Dcache index 0x%04x ", addr);
- for (way = 0; way < 4; way++) {
- __asm__ __volatile__ (
- " .set push\n\t"
- " .set noreorder\n\t"
- " .set mips64\n\t"
- " .set noat\n\t"
- " cache 5, 0(%3)\n\t" /* Index-load-tag-D */
- " mfc0 %0, $29, 2\n\t"
- " dmfc0 $1, $28, 2\n\t"
- " dsrl32 %1, $1, 0\n\t"
- " sll %2, $1, 0\n\t"
- " .set pop"
- : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
- : "r" ((way << 13) | addr));
-
- taglo = ((unsigned long long)taglohi << 32) | taglolo;
- pa = (taglo & 0xFFFFFFE000ULL) | addr;
- if (way == 0) {
- lru = (taghi >> 14) & 0xff;
- printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
- ((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
- ((addr >> 6) & 0x3f), /* index */
- (lru & 0x3),
- ((lru >> 2) & 0x3),
- ((lru >> 4) & 0x3),
- ((lru >> 6) & 0x3));
- }
- state = (taghi >> 25) & 0x1f;
- valid = DC_TAG_VALID(state);
- printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
- way, pa, dc_state_str(state), state, taghi, taglo);
- if (valid) {
- if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
- printk(" ** bad parity in PTag1\n");
- res |= CP0_CERRD_TAG_ADDRESS;
- }
- if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
- printk(" ** bad parity in PTag0\n");
- res |= CP0_CERRD_TAG_ADDRESS;
- }
- } else {
- res |= CP0_CERRD_TAG_STATE;
- }
-
- if (data) {
- uint32_t datalohi, datalolo, datahi;
- unsigned long long datalo;
- int offset;
- char bad_ecc = 0;
-
- for (offset = 0; offset < 4; offset++) {
- /* Index-load-data-D */
- __asm__ __volatile__ (
- " .set push\n\t"
- " .set noreorder\n\t"
- " .set mips64\n\t"
- " .set noat\n\t"
- " cache 7, 0(%3)\n\t" /* Index-load-data-D */
- " mfc0 %0, $29, 3\n\t"
- " dmfc0 $1, $28, 3\n\t"
- " dsrl32 %1, $1, 0 \n\t"
- " sll %2, $1, 0 \n\t"
- " .set pop"
- : "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
- : "r" ((way << 13) | addr | (offset << 3)));
- datalo = ((unsigned long long)datalohi << 32) | datalolo;
- ecc = dc_ecc(datalo);
- if (ecc != datahi) {
- int bits;
- bad_ecc |= 1 << (3-offset);
- ecc ^= datahi;
- bits = hweight8(ecc);
- res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
- }
- printk(" %02X-%016llX", datahi, datalo);
- }
- printk("\n");
- if (bad_ecc)
- printk(" dwords w/ bad ECC: %d %d %d %d\n",
- !!(bad_ecc & 8), !!(bad_ecc & 4),
- !!(bad_ecc & 2), !!(bad_ecc & 1));
- }
- }
- return res;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/cex-gen.S b/ANDROID_3.4.5/arch/mips/mm/cex-gen.S
deleted file mode 100644
index e743622f..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/cex-gen.S
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995 - 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- *
- * Cache error handler
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-
-/*
- * Game over. Go to the button. Press gently. Swear where allowed by
- * legislation.
- */
- LEAF(except_vec2_generic)
- .set noreorder
- .set noat
- .set mips0
- /*
- * This is a very bad place to be. Our cache error
- * detection has triggered. If we have write-back data
- * in the cache, we may not be able to recover. As a
- * first-order desperate measure, turn off KSEG0 cacheing.
- */
- mfc0 k0,CP0_CONFIG
- li k1,~CONF_CM_CMASK
- and k0,k0,k1
- ori k0,k0,CONF_CM_UNCACHED
- mtc0 k0,CP0_CONFIG
- /* Give it a few cycles to sink in... */
- nop
- nop
- nop
-
- j cache_parity_error
- nop
- END(except_vec2_generic)
diff --git a/ANDROID_3.4.5/arch/mips/mm/cex-oct.S b/ANDROID_3.4.5/arch/mips/mm/cex-oct.S
deleted file mode 100644
index 3db8553f..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/cex-oct.S
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Cavium Networks
- * Cache error handler
- */
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-
-/*
- * Handle cache error. Indicate to the second level handler whether
- * the exception is recoverable.
- */
- LEAF(except_vec2_octeon)
-
- .set push
- .set mips64r2
- .set noreorder
- .set noat
-
-
- /* due to an errata we need to read the COP0 CacheErr (Dcache)
- * before any cache/DRAM access */
-
- rdhwr k0, $0 /* get core_id */
- PTR_LA k1, cache_err_dcache
- sll k0, k0, 3
- PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
-
- dmfc0 k0, CP0_CACHEERR, 1
- sd k0, (k1)
- dmtc0 $0, CP0_CACHEERR, 1
-
- /* check whether this is a nested exception */
- mfc0 k1, CP0_STATUS
- andi k1, k1, ST0_EXL
- beqz k1, 1f
- nop
- j cache_parity_error_octeon_non_recoverable
- nop
-
- /* exception is recoverable */
-1: j handle_cache_err
- nop
-
- .set pop
- END(except_vec2_octeon)
-
- /* We need to jump to handle_cache_err so that the previous handler
- * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
- * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
- LEAF(handle_cache_err)
- .set push
- .set noreorder
- .set noat
-
- SAVE_ALL
- KMODE
- jal cache_parity_error_octeon_recoverable
- nop
- j ret_from_exception
- nop
-
- .set pop
- END(handle_cache_err)
diff --git a/ANDROID_3.4.5/arch/mips/mm/cex-sb1.S b/ANDROID_3.4.5/arch/mips/mm/cex-sb1.S
deleted file mode 100644
index 89c412bc..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/cex-sb1.S
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (C) 2001,2002,2003 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#include <linux/init.h>
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-#include <asm/cacheops.h>
-#include <asm/sibyte/board.h>
-
-#define C0_ERRCTL $26 /* CP0: Error info */
-#define C0_CERR_I $27 /* CP0: Icache error */
-#define C0_CERR_D $27,1 /* CP0: Dcache error */
-
- /*
- * Based on SiByte sample software cache-err/cerr.S
- * CVS revision 1.8. Only the 'unrecoverable' case
- * is changed.
- */
-
- .set mips64
- .set noreorder
- .set noat
-
- /*
- * sb1_cerr_vec: code to be copied to the Cache Error
- * Exception vector. The code must be pushed out to memory
- * (either by copying to Kseg0 and Kseg1 both, or by flushing
- * the L1 and L2) since it is fetched as 0xa0000100.
- *
- * NOTE: Be sure this handler is at most 28 instructions long
- * since the final 16 bytes of the exception vector memory
- * (0x170-0x17f) are used to preserve k0, k1, and ra.
- */
-
- __CPUINIT
-
-LEAF(except_vec2_sb1)
- /*
- * If this error is recoverable, we need to exit the handler
- * without having dirtied any registers. To do this,
- * save/restore k0 and k1 from low memory (Useg is direct
- * mapped while ERL=1). Note that we can't save to a
- * CPU-specific location without ruining a register in the
- * process. This means we are vulnerable to data corruption
- * whenever the handler is reentered by a second CPU.
- */
- sd k0,0x170($0)
- sd k1,0x178($0)
-
-#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
- j handle_vec2_sb1
- nop
-#else
- /*
- * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell
- * if we can fast-path out of here for a h/w-recovered error.
- */
- mfc0 k1,C0_ERRCTL
- bgtz k1,attempt_recovery
- sll k0,k1,1
-
-recovered_dcache:
- /*
- * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA).
- * Ought to log the occurrence of this recovered dcache error.
- */
- b recovered
- mtc0 $0,C0_CERR_D
-
-attempt_recovery:
- /*
- * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
- * Dcache errors we can recover from will take more extensive
- * processing. For now, they are considered "unrecoverable".
- * Note that 'DC' becoming set (outside of ERL mode) will
- * cause 'IC' to clear; so if there's an Icache error, we'll
- * only find out about it if we recover from this error and
- * continue executing.
- */
- bltz k0,unrecoverable
- sll k0,1
-
- /*
- * k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an
- * Icache error isn't indicated, I'm not sure why we got here.
- * Consider that case "unrecoverable" for now.
- */
- bgez k0,unrecoverable
-
-attempt_icache_recovery:
- /*
- * External icache errors are due to uncorrectable ECC errors
- * in the L2 cache or Memory Controller and cannot be
- * recovered here.
- */
- mfc0 k0,C0_CERR_I /* delay slot */
- li k1,1 << 26 /* ICACHE_EXTERNAL */
- and k1,k0
- bnez k1,unrecoverable
- andi k0,0x1fe0
-
- /*
- * Since the error is internal, the 'IDX' field from
- * CacheErr-I is valid and we can just invalidate all blocks
- * in that set.
- */
- cache Index_Invalidate_I,(0<<13)(k0)
- cache Index_Invalidate_I,(1<<13)(k0)
- cache Index_Invalidate_I,(2<<13)(k0)
- cache Index_Invalidate_I,(3<<13)(k0)
-
- /* Ought to log this recovered icache error */
-
-recovered:
- /* Restore the saved registers */
- ld k0,0x170($0)
- ld k1,0x178($0)
- eret
-
-unrecoverable:
- /* Unrecoverable Icache or Dcache error; log it and/or fail */
- j handle_vec2_sb1
- nop
-#endif
-
-END(except_vec2_sb1)
-
- __FINIT
-
- LEAF(handle_vec2_sb1)
- mfc0 k0,CP0_CONFIG
- li k1,~CONF_CM_CMASK
- and k0,k0,k1
- ori k0,k0,CONF_CM_UNCACHED
- mtc0 k0,CP0_CONFIG
-
- SSNOP
- SSNOP
- SSNOP
- SSNOP
- bnezl $0, 1f
-1:
- mfc0 k0, CP0_STATUS
- sll k0, k0, 3 # check CU0 (kernel?)
- bltz k0, 2f
- nop
-
- /* Get a valid Kseg0 stack pointer. Any task's stack pointer
- * will do, although if we ever want to resume execution we
- * better not have corrupted any state. */
- get_saved_sp
- move sp, k1
-
-2:
- j sb1_cache_error
- nop
-
- END(handle_vec2_sb1)
diff --git a/ANDROID_3.4.5/arch/mips/mm/dma-default.c b/ANDROID_3.4.5/arch/mips/mm/dma-default.c
deleted file mode 100644
index 3fab2046..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/dma-default.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
- * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-
-#include <dma-coherence.h>
-
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- */
-
-static inline int cpu_is_noncoherent_r10000(struct device *dev)
-{
- return !plat_device_is_coherent(dev) &&
- (current_cpu_type() == CPU_R10000 ||
- current_cpu_type() == CPU_R12000);
-}
-
-static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
-{
- gfp_t dma_flag;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
-#ifdef CONFIG_ISA
- if (dev == NULL)
- dma_flag = __GFP_DMA;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA;
- else
-#endif
- dma_flag = 0;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return gfp | dma_flag;
-}
-
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp)
-{
- void *ret;
-
- gfp = massage_gfp_flags(dev, gfp);
-
- ret = (void *) __get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = plat_map_dma_mem(dev, ret, size);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
-
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
-{
- void *ret;
-
- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
- return ret;
-
- gfp = massage_gfp_flags(dev, gfp);
-
- ret = (void *) __get_free_pages(gfp, get_order(size));
-
- if (ret) {
- memset(ret, 0, size);
- *dma_handle = plat_map_dma_mem(dev, ret, size);
-
- if (!plat_device_is_coherent(dev)) {
- dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
- }
- }
-
- return ret;
-}
-
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
- free_pages((unsigned long) vaddr, get_order(size));
-}
-EXPORT_SYMBOL(dma_free_noncoherent);
-
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
-{
- unsigned long addr = (unsigned long) vaddr;
- int order = get_order(size);
-
- if (dma_release_from_coherent(dev, order, vaddr))
- return;
-
- plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
-
- if (!plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- free_pages(addr, get_order(size));
-}
-
-static inline void __dma_sync_virtual(void *addr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- dma_cache_wback((unsigned long)addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- dma_cache_inv((unsigned long)addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv((unsigned long)addr, size);
- break;
-
- default:
- BUG();
- }
-}
-
-/*
- * A single sg entry may refer to multiple physically contiguous
- * pages. But we still need to process highmem pages individually.
- * If highmem is not configured then the bulk of this loop gets
- * optimized out.
- */
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- size_t left = size;
-
- do {
- size_t len = left;
-
- if (PageHighMem(page)) {
- void *addr;
-
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
- len = PAGE_SIZE - offset;
- }
-
- addr = kmap_atomic(page);
- __dma_sync_virtual(addr + offset, len, direction);
- kunmap_atomic(addr);
- } else
- __dma_sync_virtual(page_address(page) + offset,
- size, direction);
- offset = 0;
- page++;
- left -= len;
- } while (left);
-}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
-
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- if (!plat_device_is_coherent(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- int i;
-
- for (i = 0; i < nhwentries; i++, sg++) {
- if (!plat_device_is_coherent(dev) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- plat_extra_sync_for_device(dev);
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (!plat_device_is_coherent(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
-}
-
-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return plat_dma_mapping_error(dev, dma_addr);
-}
-
-int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
-
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- plat_extra_sync_for_device(dev);
- if (!plat_device_is_coherent(dev))
- __dma_sync_virtual(vaddr, size, direction);
-}
-
-EXPORT_SYMBOL(dma_cache_sync);
-
-static struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .mapping_error = mips_dma_mapping_error,
- .dma_supported = mips_dma_supported
-};
-
-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-EXPORT_SYMBOL(mips_dma_map_ops);
-
-#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-
-static int __init mips_dma_init(void)
-{
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
-
- return 0;
-}
-fs_initcall(mips_dma_init);
diff --git a/ANDROID_3.4.5/arch/mips/mm/extable.c b/ANDROID_3.4.5/arch/mips/mm/extable.c
deleted file mode 100644
index 9d25d2ba..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/extable.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
- */
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <asm/branch.h>
-#include <asm/uaccess.h>
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(exception_epc(regs));
- if (fixup) {
- regs->cp0_epc = fixup->nextinsn;
-
- return 1;
- }
-
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/fault.c b/ANDROID_3.4.5/arch/mips/mm/fault.c
deleted file mode 100644
index c14f6dfe..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/fault.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995 - 2000 by Ralf Baechle
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include <linux/perf_event.h>
-
-#include <asm/branch.h>
-#include <asm/mmu_context.h>
-#include <asm/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/highmem.h> /* For VMALLOC_END */
-#include <linux/kdebug.h>
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- */
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
- unsigned long address)
-{
- struct vm_area_struct * vma = NULL;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- const int field = sizeof(unsigned long) * 2;
- siginfo_t info;
- int fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
-
-#if 0
- printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
- current->comm, current->pid, field, address, write,
- field, regs->cp0_epc);
-#endif
-
-#ifdef CONFIG_KPROBES
- /*
- * This is to notify the fault handler of the kprobes. The
- * exception code is redundant as it is also carried in REGS,
- * but we pass it anyhow.
- */
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
- (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
- return;
-#endif
-
- info.si_code = SEGV_MAPERR;
-
- /*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- */
-#ifdef CONFIG_64BIT
-# define VMALLOC_FAULT_TARGET no_context
-#else
-# define VMALLOC_FAULT_TARGET vmalloc_fault
-#endif
-
- if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
- goto VMALLOC_FAULT_TARGET;
-#ifdef MODULE_START
- if (unlikely(address >= MODULE_START && address < MODULE_END))
- goto VMALLOC_FAULT_TARGET;
-#endif
-
- /*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
- if (in_atomic() || !mm)
- goto bad_area_nosemaphore;
-
-retry:
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- info.si_code = SEGV_ACCERR;
-
- if (write) {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- } else {
- if (kernel_uses_smartmips_rixi) {
- if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
-#if 0
- pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
- raw_smp_processor_id(),
- current->comm, current->pid,
- field, address, write,
- field, regs->cp0_epc);
-#endif
- goto bad_area;
- }
- if (!(vma->vm_flags & VM_READ)) {
-#if 0
- pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
- raw_smp_processor_id(),
- current->comm, current->pid,
- field, address, write,
- field, regs->cp0_epc);
-#endif
- goto bad_area;
- }
- } else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
- goto bad_area;
- }
- }
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(mm, vma, address, flags);
-
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
- return;
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- tsk->maj_flt++;
- } else {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- tsk->min_flt++;
- }
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
-
- /*
- * No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
-
- goto retry;
- }
- }
-
- up_read(&mm->mmap_sem);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- tsk->thread.cp0_badvaddr = address;
- tsk->thread.error_code = write;
-#if 0
- printk("do_page_fault() #2: sending SIGSEGV to %s for "
- "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
- tsk->comm,
- write ? "write access to" : "read access from",
- field, address,
- field, (unsigned long) regs->cp0_epc,
- field, (unsigned long) regs->regs[31]);
-#endif
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *) address;
- force_sig_info(SIGSEGV, &info, tsk);
- return;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs)) {
- current->thread.cp0_baduaddr = address;
- return;
- }
-
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- bust_spinlocks(1);
-
- printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
- "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
- raw_smp_processor_id(), field, address, field, regs->cp0_epc,
- field, regs->regs[31]);
- die("Oops", regs);
-
-out_of_memory:
- /*
- * We ran out of memory, call the OOM killer, and return the userspace
- * (which will retry the fault, or kill us if we got oom-killed).
- */
- up_read(&mm->mmap_sem);
- pagefault_out_of_memory();
- return;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- goto no_context;
- else
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
-#if 0
- printk("do_page_fault() #3: sending SIGBUS to %s for "
- "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
- tsk->comm,
- write ? "write access to" : "read access from",
- field, address,
- field, (unsigned long) regs->cp0_epc,
- field, (unsigned long) regs->regs[31]);
-#endif
- tsk->thread.cp0_badvaddr = address;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *) address;
- force_sig_info(SIGBUS, &info, tsk);
-
- return;
-#ifndef CONFIG_64BIT
-vmalloc_fault:
- {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = __pgd_offset(address);
- pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
- pgd_k = init_mm.pgd + offset;
-
- if (!pgd_present(*pgd_k))
- goto no_context;
- set_pgd(pgd, *pgd_k);
-
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
- goto no_context;
-
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- goto no_context;
- set_pmd(pmd, *pmd_k);
-
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto no_context;
- return;
- }
-#endif
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/gup.c b/ANDROID_3.4.5/arch/mips/mm/gup.c
deleted file mode 100644
index 33aadbcf..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/gup.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Lockless get_user_pages_fast for MIPS
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- * Copyright (C) 2011 Ralf Baechle
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/hugetlb.h>
-
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#else
- return ACCESS_ONCE(*ptep);
-#endif
-}
-
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t *ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- if (!pte_present(pte) ||
- pte_special(pte) || (write && !pte_write(pte))) {
- pte_unmap(ptep);
- return 0;
- }
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- SetPageReferenced(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- pte_unmap(ptep - 1);
- return 1;
-}
-
-static inline void get_head_page_multiple(struct page *page, int nr)
-{
- VM_BUG_ON(page != compound_head(page));
- VM_BUG_ON(page_count(page) == 0);
- atomic_add(nr, &page->_count);
- SetPageReferenced(page);
-}
-
-static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pmd;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- if (PageTail(page))
- get_huge_page_tail(page);
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- /*
- * The pmd_trans_splitting() check below explains why
- * pmdp_splitting_flush has to flush the tlb, to stop
- * this gup-fast code from running while we set the
- * splitting bit in the pmd. Returning zero will take
- * the slow path that will call wait_split_huge_page()
- * if the pmd is still in splitting state. gup-fast
- * can't because it has irq disabled and
- * wait_split_huge_page() would never return as the
- * tlb flush IPI wouldn't run.
- */
- if (pmd_none(pmd) || pmd_trans_splitting(pmd))
- return 0;
- if (unlikely(pmd_huge(pmd))) {
- if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pte_range(pmd, addr, next, write, pages,nr))
- return 0;
- }
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pud;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_huge(pud))) {
- if (!gup_huge_pud(pud, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pmd_range(pud, addr, next, write, pages,nr))
- return 0;
- }
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
- return 0;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch
- * size will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int ret, nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start)
- goto slow_irqon;
-
- /* XXX: batch / limit 'nr' */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-slow:
- local_irq_enable();
-
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT,
- write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
- return ret;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/highmem.c b/ANDROID_3.4.5/arch/mips/mm/highmem.c
deleted file mode 100644
index aff57057..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/highmem.c
+++ /dev/null
@@ -1,137 +0,0 @@
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static pte_t *kmap_pte;
-
-unsigned long highstart_pfn, highend_pfn;
-
-void *kmap(struct page *page)
-{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
-{
- unsigned long vaddr;
- int idx, type;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
- local_flush_tlb_one((unsigned long)vaddr);
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- return;
- }
-
- type = kmap_atomic_idx();
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- int idx = type + KM_TYPE_NR * smp_processor_id();
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_one(vaddr);
- }
-#endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
-
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long)ptr;
- pte_t *pte;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/hugetlbpage.c b/ANDROID_3.4.5/arch/mips/mm/hugetlbpage.c
deleted file mode 100644
index a7fee0df..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/hugetlbpage.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * MIPS Huge TLB Page Support for Kernel.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
- * Copyright 2005, Embedded Alley Solutions, Inc.
- * Matt Porter <mporter@embeddedalley.com>
- * Copyright (C) 2008, 2009 Cavium Networks, Inc.
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/err.h>
-#include <linux/sysctl.h>
-#include <asm/mman.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
- unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (pud)
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
-
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
- }
- return (pte_t *) pmd;
-}
-
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return (pmd_val(pmd) & _PAGE_HUGE) != 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return (pud_val(pud) & _PAGE_HUGE) != 0;
-}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- return page;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/init.c b/ANDROID_3.4.5/arch/mips/mm/init.c
deleted file mode 100644
index 1a85ba92..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/init.c
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 2000 Ralf Baechle
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/bug.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/pagemap.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/proc_fs.h>
-#include <linux/pfn.h>
-#include <linux/hardirq.h>
-#include <linux/gfp.h>
-
-#include <asm/asm-offsets.h>
-#include <asm/bootinfo.h>
-#include <asm/cachectl.h>
-#include <asm/cpu.h>
-#include <asm/dma.h>
-#include <asm/kmap_types.h>
-#include <asm/mmu_context.h>
-#include <asm/sections.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-#include <asm/fixmap.h>
-
-/* Atomicity and interruptability */
-#ifdef CONFIG_MIPS_MT_SMTC
-
-#include <asm/mipsmtregs.h>
-
-#define ENTER_CRITICAL(flags) \
- { \
- unsigned int mvpflags; \
- local_irq_save(flags);\
- mvpflags = dvpe()
-#define EXIT_CRITICAL(flags) \
- evpe(mvpflags); \
- local_irq_restore(flags); \
- }
-#else
-
-#define ENTER_CRITICAL(flags) local_irq_save(flags)
-#define EXIT_CRITICAL(flags) local_irq_restore(flags)
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-/*
- * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. This is necessary only on R4000 / R4400 SC and MC versions
- * where we have to avoid VCED / VECI exceptions for good performance at
- * any price. Since page is never written to after the initialization we
- * don't have to care about aliases on other CPUs.
- */
-unsigned long empty_zero_page, zero_page_mask;
-EXPORT_SYMBOL_GPL(empty_zero_page);
-
-/*
- * Not static inline because used by IP27 special magic initialization code
- */
-unsigned long setup_zero_pages(void)
-{
- unsigned int order;
- unsigned long size;
- struct page *page;
-
- if (cpu_has_vce)
- order = 3;
- else
- order = 0;
-
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
- SetPageReserved(page);
- page++;
- }
-
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
-
- return 1UL << order;
-}
-
-#ifdef CONFIG_MIPS_MT_SMTC
-static pte_t *kmap_coherent_pte;
-static void __init kmap_coherent_init(void)
-{
- unsigned long vaddr;
-
- /* cache the first coherent kmap pte */
- vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
- kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
-}
-#else
-static inline void kmap_coherent_init(void) {}
-#endif
-
-void *kmap_coherent(struct page *page, unsigned long addr)
-{
- enum fixed_addresses idx;
- unsigned long vaddr, flags, entrylo;
- unsigned long old_ctx;
- pte_t pte;
- int tlbidx;
-
- BUG_ON(Page_dcache_dirty(page));
-
- inc_preempt_count();
- idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
-#ifdef CONFIG_MIPS_MT_SMTC
- idx += FIX_N_COLOURS * smp_processor_id() +
- (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
-#else
- idx += in_interrupt() ? FIX_N_COLOURS : 0;
-#endif
- vaddr = __fix_to_virt(FIX_CMAP_END - idx);
- pte = mk_pte(page, PAGE_KERNEL);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
- entrylo = pte.pte_high;
-#else
- entrylo = pte_to_entrylo(pte_val(pte));
-#endif
-
- ENTER_CRITICAL(flags);
- old_ctx = read_c0_entryhi();
- write_c0_entryhi(vaddr & (PAGE_MASK << 1));
- write_c0_entrylo0(entrylo);
- write_c0_entrylo1(entrylo);
-#ifdef CONFIG_MIPS_MT_SMTC
- set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
- /* preload TLB instead of local_flush_tlb_one() */
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- tlbidx = read_c0_index();
- mtc0_tlbw_hazard();
- if (tlbidx < 0)
- tlb_write_random();
- else
- tlb_write_indexed();
-#else
- tlbidx = read_c0_wired();
- write_c0_wired(tlbidx + 1);
- write_c0_index(tlbidx);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
-#endif
- tlbw_use_hazard();
- write_c0_entryhi(old_ctx);
- EXIT_CRITICAL(flags);
-
- return (void*) vaddr;
-}
-
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
-void kunmap_coherent(void)
-{
-#ifndef CONFIG_MIPS_MT_SMTC
- unsigned int wired;
- unsigned long flags, old_ctx;
-
- ENTER_CRITICAL(flags);
- old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
- write_c0_wired(wired);
- write_c0_index(wired);
- write_c0_entryhi(UNIQUE_ENTRYHI(wired));
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- tlbw_use_hazard();
- write_c0_entryhi(old_ctx);
- EXIT_CRITICAL(flags);
-#endif
- dec_preempt_count();
- preempt_check_resched();
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *vfrom, *vto;
-
- vto = kmap_atomic(to);
- if (cpu_has_dc_aliases &&
- page_mapped(from) && !Page_dcache_dirty(from)) {
- vfrom = kmap_coherent(from, vaddr);
- copy_page(vto, vfrom);
- kunmap_coherent();
- } else {
- vfrom = kmap_atomic(from);
- copy_page(vto, vfrom);
- kunmap_atomic(vfrom);
- }
- if ((!cpu_has_ic_fills_f_dc) ||
- pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
- flush_data_cache_page((unsigned long)vto);
- kunmap_atomic(vto);
- /* Make sure this page is cleared on other CPU's too before using it */
- smp_wmb();
-}
-
-void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr, void *dst, const void *src,
- unsigned long len)
-{
- if (cpu_has_dc_aliases &&
- page_mapped(page) && !Page_dcache_dirty(page)) {
- void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
- memcpy(vto, src, len);
- kunmap_coherent();
- } else {
- memcpy(dst, src, len);
- if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
- }
- if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
- flush_cache_page(vma, vaddr, page_to_pfn(page));
-}
-
-void copy_from_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr, void *dst, const void *src,
- unsigned long len)
-{
- if (cpu_has_dc_aliases &&
- page_mapped(page) && !Page_dcache_dirty(page)) {
- void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
- memcpy(dst, vfrom, len);
- kunmap_coherent();
- } else {
- memcpy(dst, src, len);
- if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
- }
-}
-
-void __init fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base)
-{
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, j, k;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
- pud = (pud_t *)pgd;
- for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
- pmd = (pmd_t *)pud;
- for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd((unsigned long)pte));
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
- vaddr += PMD_SIZE;
- }
- k = 0;
- }
- j = 0;
- }
-#endif
-}
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-int page_is_ram(unsigned long pagenr)
-{
- int i;
-
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- unsigned long addr, end;
-
- switch (boot_mem_map.map[i].type) {
- case BOOT_MEM_RAM:
- case BOOT_MEM_INIT_RAM:
- break;
- default:
- /* not usable memory */
- continue;
- }
-
- addr = PFN_UP(boot_mem_map.map[i].addr);
- end = PFN_DOWN(boot_mem_map.map[i].addr +
- boot_mem_map.map[i].size);
-
- if (pagenr >= addr && pagenr < end)
- return 1;
- }
-
- return 0;
-}
-
-void __init paging_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long lastpfn __maybe_unused;
-
- pagetable_init();
-
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
- kmap_coherent_init();
-
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
-#endif
-#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
-#endif
- max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- lastpfn = max_low_pfn;
-#ifdef CONFIG_HIGHMEM
- max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
- lastpfn = highend_pfn;
-
- if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
- printk(KERN_WARNING "This processor doesn't support highmem."
- " %ldk highmem ignored\n",
- (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
- max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
- lastpfn = max_low_pfn;
- }
-#endif
-
- free_area_init_nodes(max_zone_pfns);
-}
-
-#ifdef CONFIG_64BIT
-static struct kcore_list kcore_kseg0;
-#endif
-
-void __init mem_init(void)
-{
- unsigned long codesize, reservedpages, datasize, initsize;
- unsigned long tmp, ram;
-
-#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
- totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
-
- reservedpages = ram = 0;
- for (tmp = 0; tmp < max_low_pfn; tmp++)
- if (page_is_ram(tmp) && pfn_valid(tmp)) {
- ram++;
- if (PageReserved(pfn_to_page(tmp)))
- reservedpages++;
- }
- num_physpages = ram;
-
-#ifdef CONFIG_HIGHMEM
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- if (!page_is_ram(tmp)) {
- SetPageReserved(page);
- continue;
- }
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
- }
- totalram_pages += totalhigh_pages;
- num_physpages += totalhigh_pages;
-#endif
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
-#ifdef CONFIG_64BIT
- if ((unsigned long) &_text > (unsigned long) CKSEG0)
- /* The -4 is a hack so that user tools don't have to handle
- the overflow. */
- kclist_add(&kcore_kseg0, (void *) CKSEG0,
- 0x80000000 - 4, KCORE_TEXT);
-#endif
-
- printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
- "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- ram << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- totalhigh_pages << (PAGE_SHIFT-10));
-}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-
-void free_init_pages(const char *what, unsigned long begin, unsigned long end)
-{
- unsigned long pfn;
-
- for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
- struct page *page = pfn_to_page(pfn);
- void *addr = phys_to_virt(PFN_PHYS(pfn));
-
- ClearPageReserved(page);
- init_page_count(page);
- memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
- __free_page(page);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_init_pages("initrd memory",
- virt_to_phys((void *)start),
- virt_to_phys((void *)end));
-}
-#endif
-
-void __init_refok free_initmem(void)
-{
- prom_free_prom_memory();
- free_init_pages("unused kernel memory",
- __pa_symbol(&__init_begin),
- __pa_symbol(&__init_end));
-}
-
-#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-unsigned long pgd_current[NR_CPUS];
-#endif
-/*
- * On 64-bit we've got three-level pagetables with a slightly
- * different layout ...
- */
-#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
-
-/*
- * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
- * are constants. So we use the variants from asm-offset.h until that gcc
- * will officially be retired.
- */
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
-#ifndef __PAGETABLE_PMD_FOLDED
-pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
-#endif
-pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/ANDROID_3.4.5/arch/mips/mm/ioremap.c b/ANDROID_3.4.5/arch/mips/mm/ioremap.c
deleted file mode 100644
index cacfd31e..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/ioremap.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001, 2002 Ralf Baechle
- */
-#include <linux/module.h>
-#include <asm/addrspace.h>
-#include <asm/byteorder.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/io.h>
-#include <asm/tlbflush.h>
-
-static inline void remap_area_pte(pte_t * pte, unsigned long address,
- phys_t size, phys_t phys_addr, unsigned long flags)
-{
- phys_t end;
- unsigned long pfn;
- pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- BUG_ON(address >= end);
- pfn = phys_addr >> PAGE_SHIFT;
- do {
- if (!pte_none(*pte)) {
- printk("remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, pfn_pte(pfn, pgprot));
- address += PAGE_SIZE;
- pfn++;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
- phys_t size, phys_t phys_addr, unsigned long flags)
-{
- phys_t end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- BUG_ON(address >= end);
- do {
- pte_t * pte = pte_alloc_kernel(pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-static int remap_area_pages(unsigned long address, phys_t phys_addr,
- phys_t size, unsigned long flags)
-{
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
-
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
- flush_cache_all();
- BUG_ON(address >= end);
- do {
- pud_t *pud;
- pmd_t *pmd;
-
- error = -ENOMEM;
- pud = pud_alloc(&init_mm, dir, address);
- if (!pud)
- break;
- pmd = pmd_alloc(&init_mm, pud, address);
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
- break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- flush_tlb_all();
- return error;
-}
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-
-#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
-
-void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
-{
- struct vm_struct * area;
- unsigned long offset;
- phys_t last_addr;
- void * addr;
-
- phys_addr = fixup_bigphys_addr(phys_addr, size);
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /*
- * Map uncached objects in the low 512mb of address space using KSEG1,
- * otherwise map using page tables.
- */
- if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
- flags == _CACHE_UNCACHED)
- return (void __iomem *) CKSEG1ADDR(phys_addr);
-
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
- }
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vunmap(addr);
- return NULL;
- }
-
- return (void __iomem *) (offset + (char *)addr);
-}
-
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
-
-void __iounmap(const volatile void __iomem *addr)
-{
- struct vm_struct *p;
-
- if (IS_KSEG1(addr))
- return;
-
- p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
- if (!p)
- printk(KERN_ERR "iounmap: bad address %p\n", addr);
-
- kfree(p);
-}
-
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(__iounmap);
diff --git a/ANDROID_3.4.5/arch/mips/mm/mmap.c b/ANDROID_3.4.5/arch/mips/mm/mmap.c
deleted file mode 100644
index 302d779d..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/mmap.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2011 Wind River Systems,
- * written by Ralf Baechle <ralf@linux-mips.org>
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/personality.h>
-#include <linux/random.h>
-#include <linux/sched.h>
-
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-EXPORT_SYMBOL(shm_align_mask);
-
-/* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
-
-static int mmap_is_legacy(void)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_base(unsigned long rnd)
-{
- unsigned long gap = rlimit(RLIMIT_STACK);
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
-}
-
-static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
- unsigned long pgoff)
-{
- unsigned long base = addr & ~shm_align_mask;
- unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
-
- if (base + off <= addr)
- return base + off;
-
- return base - off;
-}
-
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
-
-enum mmap_allocation_direction {UP, DOWN};
-
-static unsigned long arch_get_unmapped_area_common(struct file *filp,
- unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags, enum mmap_allocation_direction dir)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long addr = addr0;
- int do_color_align;
-
- if (unlikely(len > TASK_SIZE))
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- /* Even MAP_FIXED mappings must reside within TASK_SIZE */
- if (TASK_SIZE - len < addr)
- return -EINVAL;
-
- /*
- * We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
- if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
- return -EINVAL;
- return addr;
- }
-
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
-
- /* requesting a specific address */
- if (addr) {
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
- if (dir == UP) {
- addr = mm->mmap_base;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
- return addr;
- addr = vma->vm_end;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- }
- } else {
- /* check if free_area_cache is useful for us */
- if (len <= mm->cached_hole_size) {
- mm->cached_hole_size = 0;
- mm->free_area_cache = mm->mmap_base;
- }
-
- /*
- * either no address requested, or the mapping can't fit into
- * the requested address hole
- */
- addr = mm->free_area_cache;
- if (do_color_align) {
- unsigned long base =
- COLOUR_ALIGN_DOWN(addr - len, pgoff);
- addr = base + len;
- }
-
- /* make sure it can fit in the remaining address space */
- if (likely(addr > len)) {
- vma = find_vma(mm, addr - len);
- if (!vma || addr <= vma->vm_start) {
- /* cache the address as a hint for next time */
- return mm->free_area_cache = addr - len;
- }
- }
-
- if (unlikely(mm->mmap_base < len))
- goto bottomup;
-
- addr = mm->mmap_base - len;
- if (do_color_align)
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-
- do {
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
- if (likely(!vma || addr + len <= vma->vm_start)) {
- /* cache the address as a hint for next time */
- return mm->free_area_cache = addr;
- }
-
- /* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start - len;
- if (do_color_align)
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- } while (likely(len < vma->vm_start));
-
-bottomup:
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- mm->cached_hole_size = ~0UL;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
- mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
-
- return addr;
- }
-}
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return arch_get_unmapped_area_common(filp,
- addr0, len, pgoff, flags, UP);
-}
-
-/*
- * There is no need to export this but sched.h declares the function as
- * extern so making it static here results in an error.
- */
-unsigned long arch_get_unmapped_area_topdown(struct file *filp,
- unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- return arch_get_unmapped_area_common(filp,
- addr0, len, pgoff, flags, DOWN);
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
- if (TASK_IS_32BIT_ADDR)
- random_factor &= 0xfffffful;
- else
- random_factor &= 0xffffffful;
- }
-
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base(random_factor);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = get_random_int();
-
- rnd = rnd << PAGE_SHIFT;
- /* 8MB for 32bit, 256MB for 64bit */
- if (TASK_IS_32BIT_ADDR)
- rnd = rnd & 0x7ffffful;
- else
- rnd = rnd & 0xffffffful;
-
- return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/page.c b/ANDROID_3.4.5/arch/mips/mm/page.c
deleted file mode 100644
index cc0b6268..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/page.c
+++ /dev/null
@@ -1,690 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2007 Maciej W. Rozycki
- * Copyright (C) 2008 Thiemo Seufer
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-
-#include <asm/bugs.h>
-#include <asm/cacheops.h>
-#include <asm/inst.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/prefetch.h>
-#include <asm/bootinfo.h>
-#include <asm/mipsregs.h>
-#include <asm/mmu_context.h>
-#include <asm/cpu.h>
-#include <asm/war.h>
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-#include <asm/sibyte/sb1250.h>
-#include <asm/sibyte/sb1250_regs.h>
-#include <asm/sibyte/sb1250_dma.h>
-#endif
-
-#include <asm/uasm.h>
-
-/* Registers used in the assembled routines. */
-#define ZERO 0
-#define AT 2
-#define A0 4
-#define A1 5
-#define A2 6
-#define T0 8
-#define T1 9
-#define T2 10
-#define T3 11
-#define T9 25
-#define RA 31
-
-/* Handle labels (which must be positive integers). */
-enum label_id {
- label_clear_nopref = 1,
- label_clear_pref,
- label_copy_nopref,
- label_copy_pref_both,
- label_copy_pref_store,
-};
-
-UASM_L_LA(_clear_nopref)
-UASM_L_LA(_clear_pref)
-UASM_L_LA(_copy_nopref)
-UASM_L_LA(_copy_pref_both)
-UASM_L_LA(_copy_pref_store)
-
-/* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __cpuinitdata labels[5];
-static struct uasm_reloc __cpuinitdata relocs[5];
-
-#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
-#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x058 bytes
- * R4600 v1.7: 0x05c bytes
- * R4600 v2.0: 0x060 bytes
- * With prefetching, 16 word strides 0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x11c bytes
- * R4600 v1.7: 0x080 bytes
- * R4600 v2.0: 0x07c bytes
- * With prefetching, 16 word strides 0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
-static int pref_bias_clear_store __cpuinitdata;
-static int pref_bias_copy_load __cpuinitdata;
-static int pref_bias_copy_store __cpuinitdata;
-
-static u32 pref_src_mode __cpuinitdata;
-static u32 pref_dst_mode __cpuinitdata;
-
-static int clear_word_size __cpuinitdata;
-static int copy_word_size __cpuinitdata;
-
-static int half_clear_loop_size __cpuinitdata;
-static int half_copy_loop_size __cpuinitdata;
-
-static int cache_line_size __cpuinitdata;
-#define cache_line_mask() (cache_line_size - 1)
-
-static inline void __cpuinit
-pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
-{
- if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
- if (off > 0x7fff) {
- uasm_i_lui(buf, T9, uasm_rel_hi(off));
- uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
- } else
- uasm_i_addiu(buf, T9, ZERO, off);
- uasm_i_daddu(buf, reg1, reg2, T9);
- } else {
- if (off > 0x7fff) {
- uasm_i_lui(buf, T9, uasm_rel_hi(off));
- uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
- UASM_i_ADDU(buf, reg1, reg2, T9);
- } else
- UASM_i_ADDIU(buf, reg1, reg2, off);
- }
-}
-
-static void __cpuinit set_prefetch_parameters(void)
-{
- if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
- clear_word_size = 8;
- else
- clear_word_size = 4;
-
- if (cpu_has_64bit_gp_regs)
- copy_word_size = 8;
- else
- copy_word_size = 4;
-
- /*
- * The pref's used here are using "streaming" hints, which cause the
- * copied data to be kicked out of the cache sooner. A page copy often
- * ends up copying a lot more data than is commonly used, so this seems
- * to make sense in terms of reducing cache pollution, but I've no real
- * performance data to back this up.
- */
- if (cpu_has_prefetch) {
- /*
- * XXX: Most prefetch bias values in here are based on
- * guesswork.
- */
- cache_line_size = cpu_dcache_line_size();
- switch (current_cpu_type()) {
- case CPU_R5500:
- case CPU_TX49XX:
- /* These processors only support the Pref_Load. */
- pref_bias_copy_load = 256;
- break;
-
- case CPU_RM9000:
- /*
- * As a workaround for erratum G105 which make the
- * PrepareForStore hint unusable we fall back to
- * StoreRetained on the RM9000. Once it is known which
- * versions of the RM9000 we'll be able to condition-
- * alize this.
- */
-
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- /*
- * Those values have been experimentally tuned for an
- * Origin 200.
- */
- pref_bias_clear_store = 512;
- pref_bias_copy_load = 256;
- pref_bias_copy_store = 256;
- pref_src_mode = Pref_LoadStreamed;
- pref_dst_mode = Pref_StoreStreamed;
- break;
-
- case CPU_SB1:
- case CPU_SB1A:
- pref_bias_clear_store = 128;
- pref_bias_copy_load = 128;
- pref_bias_copy_store = 128;
- /*
- * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
- * hints are broken.
- */
- if (current_cpu_type() == CPU_SB1 &&
- (current_cpu_data.processor_id & 0xff) < 0x02) {
- pref_src_mode = Pref_Load;
- pref_dst_mode = Pref_Store;
- } else {
- pref_src_mode = Pref_LoadStreamed;
- pref_dst_mode = Pref_StoreStreamed;
- }
- break;
-
- default:
- pref_bias_clear_store = 128;
- pref_bias_copy_load = 256;
- pref_bias_copy_store = 128;
- pref_src_mode = Pref_LoadStreamed;
- pref_dst_mode = Pref_PrepareForStore;
- break;
- }
- } else {
- if (cpu_has_cache_cdex_s)
- cache_line_size = cpu_scache_line_size();
- else if (cpu_has_cache_cdex_p)
- cache_line_size = cpu_dcache_line_size();
- }
- /*
- * Too much unrolling will overflow the available space in
- * clear_space_array / copy_page_array.
- */
- half_clear_loop_size = min(16 * clear_word_size,
- max(cache_line_size >> 1,
- 4 * clear_word_size));
- half_copy_loop_size = min(16 * copy_word_size,
- max(cache_line_size >> 1,
- 4 * copy_word_size));
-}
-
-static void __cpuinit build_clear_store(u32 **buf, int off)
-{
- if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
- uasm_i_sd(buf, ZERO, off, A0);
- } else {
- uasm_i_sw(buf, ZERO, off, A0);
- }
-}
-
-static inline void __cpuinit build_clear_pref(u32 **buf, int off)
-{
- if (off & cache_line_mask())
- return;
-
- if (pref_bias_clear_store) {
- uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
- A0);
- } else if (cache_line_size == (half_clear_loop_size << 1)) {
- if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
- } else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- }
-
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
-
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
- }
- }
-}
-
-void __cpuinit build_clear_page(void)
-{
- int off;
- u32 *buf = (u32 *)&clear_page_array;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- int i;
-
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- set_prefetch_parameters();
-
- /*
- * This algorithm makes the following assumptions:
- * - The prefetch bias is a multiple of 2 words.
- * - The prefetch bias is less than one page.
- */
- BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
- BUG_ON(PAGE_SIZE < pref_bias_clear_store);
-
- off = PAGE_SIZE - pref_bias_clear_store;
- if (off > 0xffff || !pref_bias_clear_store)
- pg_addiu(&buf, A2, A0, off);
- else
- uasm_i_ori(&buf, A2, A0, off);
-
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lui(&buf, AT, 0xa000);
-
- off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
- * cache_line_size : 0;
- while (off) {
- build_clear_pref(&buf, -off);
- off -= cache_line_size;
- }
- uasm_l_clear_pref(&l, buf);
- do {
- build_clear_pref(&buf, off);
- build_clear_store(&buf, off);
- off += clear_word_size;
- } while (off < half_clear_loop_size);
- pg_addiu(&buf, A0, A0, 2 * off);
- off = -off;
- do {
- build_clear_pref(&buf, off);
- if (off == -clear_word_size)
- uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
- build_clear_store(&buf, off);
- off += clear_word_size;
- } while (off < 0);
-
- if (pref_bias_clear_store) {
- pg_addiu(&buf, A2, A0, pref_bias_clear_store);
- uasm_l_clear_nopref(&l, buf);
- off = 0;
- do {
- build_clear_store(&buf, off);
- off += clear_word_size;
- } while (off < half_clear_loop_size);
- pg_addiu(&buf, A0, A0, 2 * off);
- off = -off;
- do {
- if (off == -clear_word_size)
- uasm_il_bne(&buf, &r, A0, A2,
- label_clear_nopref);
- build_clear_store(&buf, off);
- off += clear_word_size;
- } while (off < 0);
- }
-
- uasm_i_jr(&buf, RA);
- uasm_i_nop(&buf);
-
- BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
-
- uasm_resolve_relocs(relocs, labels);
-
- pr_debug("Synthesized clear page handler (%u instructions).\n",
- (u32)(buf - clear_page_array));
-
- pr_debug("\t.set push\n");
- pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - clear_page_array); i++)
- pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
- pr_debug("\t.set pop\n");
-}
-
-static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
-{
- if (cpu_has_64bit_gp_regs) {
- uasm_i_ld(buf, reg, off, A1);
- } else {
- uasm_i_lw(buf, reg, off, A1);
- }
-}
-
-static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
-{
- if (cpu_has_64bit_gp_regs) {
- uasm_i_sd(buf, reg, off, A0);
- } else {
- uasm_i_sw(buf, reg, off, A0);
- }
-}
-
-static inline void build_copy_load_pref(u32 **buf, int off)
-{
- if (off & cache_line_mask())
- return;
-
- if (pref_bias_copy_load)
- uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
-}
-
-static inline void build_copy_store_pref(u32 **buf, int off)
-{
- if (off & cache_line_mask())
- return;
-
- if (pref_bias_copy_store) {
- uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
- A0);
- } else if (cache_line_size == (half_copy_loop_size << 1)) {
- if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
- } else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- uasm_i_nop(buf);
- }
-
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
-
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
- }
- }
-}
-
-void __cpuinit build_copy_page(void)
-{
- int off;
- u32 *buf = (u32 *)&copy_page_array;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- int i;
-
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- set_prefetch_parameters();
-
- /*
- * This algorithm makes the following assumptions:
- * - All prefetch biases are multiples of 8 words.
- * - The prefetch biases are less than one page.
- * - The store prefetch bias isn't greater than the load
- * prefetch bias.
- */
- BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
- BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
- BUG_ON(PAGE_SIZE < pref_bias_copy_load);
- BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
-
- off = PAGE_SIZE - pref_bias_copy_load;
- if (off > 0xffff || !pref_bias_copy_load)
- pg_addiu(&buf, A2, A0, off);
- else
- uasm_i_ori(&buf, A2, A0, off);
-
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lui(&buf, AT, 0xa000);
-
- off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
- cache_line_size : 0;
- while (off) {
- build_copy_load_pref(&buf, -off);
- off -= cache_line_size;
- }
- off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
- cache_line_size : 0;
- while (off) {
- build_copy_store_pref(&buf, -off);
- off -= cache_line_size;
- }
- uasm_l_copy_pref_both(&l, buf);
- do {
- build_copy_load_pref(&buf, off);
- build_copy_load(&buf, T0, off);
- build_copy_load_pref(&buf, off + copy_word_size);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load_pref(&buf, off + 2 * copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load_pref(&buf, off + 3 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
- build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store_pref(&buf, off + 3 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
- off = -off;
- do {
- build_copy_load_pref(&buf, off);
- build_copy_load(&buf, T0, off);
- build_copy_load_pref(&buf, off + copy_word_size);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load_pref(&buf, off + 2 * copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load_pref(&buf, off + 3 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
- build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store_pref(&buf, off + 3 * copy_word_size);
- if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < 0);
-
- if (pref_bias_copy_load - pref_bias_copy_store) {
- pg_addiu(&buf, A2, A0,
- pref_bias_copy_load - pref_bias_copy_store);
- uasm_l_copy_pref_store(&l, buf);
- off = 0;
- do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
- build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store_pref(&buf, off + 3 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
- off = -off;
- do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
- build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store_pref(&buf, off + 3 * copy_word_size);
- if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0,
- label_copy_pref_store);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < 0);
- }
-
- if (pref_bias_copy_store) {
- pg_addiu(&buf, A2, A0, pref_bias_copy_store);
- uasm_l_copy_nopref(&l, buf);
- off = 0;
- do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store(&buf, T0, off);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
- off = -off;
- do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store(&buf, T0, off);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0,
- label_copy_nopref);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
- off += 4 * copy_word_size;
- } while (off < 0);
- }
-
- uasm_i_jr(&buf, RA);
- uasm_i_nop(&buf);
-
- BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
-
- uasm_resolve_relocs(relocs, labels);
-
- pr_debug("Synthesized copy page handler (%u instructions).\n",
- (u32)(buf - copy_page_array));
-
- pr_debug("\t.set push\n");
- pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - copy_page_array); i++)
- pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
- pr_debug("\t.set pop\n");
-}
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-
-/*
- * Pad descriptors to cacheline, since each is exclusively owned by a
- * particular CPU.
- */
-struct dmadscr {
- u64 dscr_a;
- u64 dscr_b;
- u64 pad_a;
- u64 pad_b;
-} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
-
-void sb1_dma_init(void)
-{
- int i;
-
- for (i = 0; i < DM_NUM_CHANNELS; i++) {
- const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
- V_DM_DSCR_BASE_RINGSZ(1);
- void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
- __raw_writeq(base_val, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
- }
-}
-
-void clear_page(void *page)
-{
- u64 to_phys = CPHYSADDR((unsigned long)page);
- unsigned int cpu = smp_processor_id();
-
- /* if the page is not in KSEG0, use old way */
- if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
- return clear_page_cpu(page);
-
- page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
- M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
- page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
- __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
-
- /*
- * Don't really want to do it this way, but there's no
- * reliable way to delay completion detection.
- */
- while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
- & M_DM_DSCR_BASE_INTERRUPT))
- ;
- __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
-}
-
-void copy_page(void *to, void *from)
-{
- u64 from_phys = CPHYSADDR((unsigned long)from);
- u64 to_phys = CPHYSADDR((unsigned long)to);
- unsigned int cpu = smp_processor_id();
-
- /* if any page is not in KSEG0, use old way */
- if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
- || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
- return copy_page_cpu(to, from);
-
- page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
- M_DM_DSCRA_INTERRUPT;
- page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
- __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
-
- /*
- * Don't really want to do it this way, but there's no
- * reliable way to delay completion detection.
- */
- while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
- & M_DM_DSCR_BASE_INTERRUPT))
- ;
- __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
-}
-
-#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
diff --git a/ANDROID_3.4.5/arch/mips/mm/pgtable-32.c b/ANDROID_3.4.5/arch/mips/mm/pgtable-32.c
deleted file mode 100644
index adc6911b..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/pgtable-32.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003 by Ralf Baechle
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-void pgd_init(unsigned long page)
-{
- unsigned long *p = (unsigned long *) page;
- int i;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
- p[i + 0] = (unsigned long) invalid_pte_table;
- p[i + 1] = (unsigned long) invalid_pte_table;
- p[i + 2] = (unsigned long) invalid_pte_table;
- p[i + 3] = (unsigned long) invalid_pte_table;
- p[i + 4] = (unsigned long) invalid_pte_table;
- p[i + 5] = (unsigned long) invalid_pte_table;
- p[i + 6] = (unsigned long) invalid_pte_table;
- p[i + 7] = (unsigned long) invalid_pte_table;
- }
-}
-
-void __init pagetable_init(void)
-{
- unsigned long vaddr;
- pgd_t *pgd_base;
-#ifdef CONFIG_HIGHMEM
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-#endif
-
- /* Initialize the entire pgd. */
- pgd_init((unsigned long)swapper_pg_dir);
- pgd_init((unsigned long)swapper_pg_dir
- + sizeof(pgd_t) * USER_PTRS_PER_PGD);
-
- pgd_base = swapper_pg_dir;
-
- /*
- * Fixed mappings:
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
-
-#ifdef CONFIG_HIGHMEM
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
-#endif
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/pgtable-64.c b/ANDROID_3.4.5/arch/mips/mm/pgtable-64.c
deleted file mode 100644
index cda4e300..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/pgtable-64.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999, 2000 by Silicon Graphics
- * Copyright (C) 2003 by Ralf Baechle
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-void pgd_init(unsigned long page)
-{
- unsigned long *p, *end;
- unsigned long entry;
-
-#ifdef __PAGETABLE_PMD_FOLDED
- entry = (unsigned long)invalid_pte_table;
-#else
- entry = (unsigned long)invalid_pmd_table;
-#endif
-
- p = (unsigned long *) page;
- end = p + PTRS_PER_PGD;
-
- while (p < end) {
- p[0] = entry;
- p[1] = entry;
- p[2] = entry;
- p[3] = entry;
- p[4] = entry;
- p[5] = entry;
- p[6] = entry;
- p[7] = entry;
- p += 8;
- }
-}
-
-#ifndef __PAGETABLE_PMD_FOLDED
-void pmd_init(unsigned long addr, unsigned long pagetable)
-{
- unsigned long *p, *end;
-
- p = (unsigned long *) addr;
- end = p + PTRS_PER_PMD;
-
- while (p < end) {
- p[0] = pagetable;
- p[1] = pagetable;
- p[2] = pagetable;
- p[3] = pagetable;
- p[4] = pagetable;
- p[5] = pagetable;
- p[6] = pagetable;
- p[7] = pagetable;
- p += 8;
- }
-}
-#endif
-
-void __init pagetable_init(void)
-{
- unsigned long vaddr;
- pgd_t *pgd_base;
-
- /* Initialize the entire pgd. */
- pgd_init((unsigned long)swapper_pg_dir);
-#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
-#endif
- pgd_base = swapper_pg_dir;
- /*
- * Fixed mappings:
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/sc-ip22.c b/ANDROID_3.4.5/arch/mips/mm/sc-ip22.c
deleted file mode 100644
index 1eb708ef..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/sc-ip22.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * sc-ip22.c: Indy cache management functions.
- *
- * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
- * derived from r4xx0.c by David S. Miller (davem@davemloft.net).
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-#include <asm/bcache.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/bootinfo.h>
-#include <asm/sgi/ip22.h>
-#include <asm/sgi/mc.h>
-
-/* Secondary cache size in bytes, if present. */
-static unsigned long scache_size;
-
-#undef DEBUG_CACHE
-
-#define SC_SIZE 0x00080000
-#define SC_LINE 32
-#define CI_MASK (SC_SIZE - SC_LINE)
-#define SC_INDEX(n) ((n) & CI_MASK)
-
-static inline void indy_sc_wipe(unsigned long first, unsigned long last)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
- ".set\tpush\t\t\t# indy_sc_wipe\n\t"
- ".set\tnoreorder\n\t"
- ".set\tmips3\n\t"
- ".set\tnoat\n\t"
- "mfc0\t%2, $12\n\t"
- "li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
- "mtc0\t$1, $12\n\t"
-
- "dli\t$1, 0x9000000080000000\n\t"
- "or\t%0, $1\t\t\t# first line to flush\n\t"
- "or\t%1, $1\t\t\t# last line to flush\n\t"
- ".set\tat\n\t"
-
- "1:\tsw\t$0, 0(%0)\n\t"
- "bne\t%0, %1, 1b\n\t"
- " daddu\t%0, 32\n\t"
-
- "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
- "nop; nop; nop; nop;\n\t"
- ".set\tpop"
- : "=r" (first), "=r" (last), "=&r" (tmp)
- : "0" (first), "1" (last));
-}
-
-static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
-{
- unsigned long first_line, last_line;
- unsigned long flags;
-
-#ifdef DEBUG_CACHE
- printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
-#endif
-
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- /* Which lines to flush? */
- first_line = SC_INDEX(addr);
- last_line = SC_INDEX(addr + size - 1);
-
- local_irq_save(flags);
- if (first_line <= last_line) {
- indy_sc_wipe(first_line, last_line);
- goto out;
- }
-
- indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
- indy_sc_wipe(0, last_line);
-out:
- local_irq_restore(flags);
-}
-
-static void indy_sc_enable(void)
-{
- unsigned long addr, tmp1, tmp2;
-
- /* This is really cool... */
-#ifdef DEBUG_CACHE
- printk("Enabling R4600 SCACHE\n");
-#endif
- __asm__ __volatile__(
- ".set\tpush\n\t"
- ".set\tnoreorder\n\t"
- ".set\tmips3\n\t"
- "mfc0\t%2, $12\n\t"
- "nop; nop; nop; nop;\n\t"
- "li\t%1, 0x80\n\t"
- "mtc0\t%1, $12\n\t"
- "nop; nop; nop; nop;\n\t"
- "li\t%0, 0x1\n\t"
- "dsll\t%0, 31\n\t"
- "lui\t%1, 0x9000\n\t"
- "dsll32\t%1, 0\n\t"
- "or\t%0, %1, %0\n\t"
- "sb\t$0, 0(%0)\n\t"
- "mtc0\t$0, $12\n\t"
- "nop; nop; nop; nop;\n\t"
- "mtc0\t%2, $12\n\t"
- "nop; nop; nop; nop;\n\t"
- ".set\tpop"
- : "=r" (tmp1), "=r" (tmp2), "=r" (addr));
-}
-
-static void indy_sc_disable(void)
-{
- unsigned long tmp1, tmp2, tmp3;
-
-#ifdef DEBUG_CACHE
- printk("Disabling R4600 SCACHE\n");
-#endif
- __asm__ __volatile__(
- ".set\tpush\n\t"
- ".set\tnoreorder\n\t"
- ".set\tmips3\n\t"
- "li\t%0, 0x1\n\t"
- "dsll\t%0, 31\n\t"
- "lui\t%1, 0x9000\n\t"
- "dsll32\t%1, 0\n\t"
- "or\t%0, %1, %0\n\t"
- "mfc0\t%2, $12\n\t"
- "nop; nop; nop; nop\n\t"
- "li\t%1, 0x80\n\t"
- "mtc0\t%1, $12\n\t"
- "nop; nop; nop; nop\n\t"
- "sh\t$0, 0(%0)\n\t"
- "mtc0\t$0, $12\n\t"
- "nop; nop; nop; nop\n\t"
- "mtc0\t%2, $12\n\t"
- "nop; nop; nop; nop\n\t"
- ".set\tpop"
- : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
-}
-
-static inline int __init indy_sc_probe(void)
-{
- unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
- if (size == 0)
- return 0;
-
- size <<= PAGE_SHIFT;
- printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
- size >> 10);
- scache_size = size;
-
- return 1;
-}
-
-/* XXX Check with wje if the Indy caches can differenciate between
- writeback + invalidate and just invalidate. */
-static struct bcache_ops indy_sc_ops = {
- .bc_enable = indy_sc_enable,
- .bc_disable = indy_sc_disable,
- .bc_wback_inv = indy_sc_wback_invalidate,
- .bc_inv = indy_sc_wback_invalidate
-};
-
-void __cpuinit indy_sc_init(void)
-{
- if (indy_sc_probe()) {
- indy_sc_enable();
- bcops = &indy_sc_ops;
- }
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/sc-mips.c b/ANDROID_3.4.5/arch/mips/mm/sc-mips.c
deleted file mode 100644
index 93d937b4..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/sc-mips.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2006 Chris Dearman (chris@mips.com),
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-#include <asm/mipsregs.h>
-#include <asm/bcache.h>
-#include <asm/cacheops.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/r4kcache.h>
-
-/*
- * MIPS32/MIPS64 L2 cache handling
- */
-
-/*
- * Writeback and invalidate the secondary cache before DMA.
- */
-static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
-{
- blast_scache_range(addr, addr + size);
-}
-
-/*
- * Invalidate the secondary cache before DMA.
- */
-static void mips_sc_inv(unsigned long addr, unsigned long size)
-{
- unsigned long lsize = cpu_scache_line_size();
- unsigned long almask = ~(lsize - 1);
-
- cache_op(Hit_Writeback_Inv_SD, addr & almask);
- cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
- blast_inv_scache_range(addr, addr + size);
-}
-
-static void mips_sc_enable(void)
-{
- /* L2 cache is permanently enabled */
-}
-
-static void mips_sc_disable(void)
-{
- /* L2 cache is permanently enabled */
-}
-
-static struct bcache_ops mips_sc_ops = {
- .bc_enable = mips_sc_enable,
- .bc_disable = mips_sc_disable,
- .bc_wback_inv = mips_sc_wback_inv,
- .bc_inv = mips_sc_inv
-};
-
-/*
- * Check if the L2 cache controller is activated on a particular platform.
- * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
- * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
- * cache being disabled. However there is no guarantee for this to be
- * true on all platforms. In an act of stupidity the spec defined bits
- * 12..15 as implementation defined so below function will eventually have
- * to be replaced by a platform specific probe.
- */
-static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
-{
- unsigned int config2 = read_c0_config2();
- unsigned int tmp;
-
- /* Check the bypass bit (L2B) */
- switch (c->cputype) {
- case CPU_34K:
- case CPU_74K:
- case CPU_1004K:
- case CPU_BMIPS5000:
- if (config2 & (1 << 12))
- return 0;
- }
-
- tmp = (config2 >> 4) & 0x0f;
- if (0 < tmp && tmp <= 7)
- c->scache.linesz = 2 << tmp;
- else
- return 0;
- return 1;
-}
-
-static inline int __init mips_sc_probe(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
- unsigned int config1, config2;
- unsigned int tmp;
-
- /* Mark as not present until probe completed */
- c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
-
- /* Ignore anything but MIPSxx processors */
- if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
- c->isa_level != MIPS_CPU_ISA_M32R2 &&
- c->isa_level != MIPS_CPU_ISA_M64R1 &&
- c->isa_level != MIPS_CPU_ISA_M64R2)
- return 0;
-
- /* Does this MIPS32/MIPS64 CPU have a config2 register? */
- config1 = read_c0_config1();
- if (!(config1 & MIPS_CONF_M))
- return 0;
-
- config2 = read_c0_config2();
-
- if (!mips_sc_is_activated(c))
- return 0;
-
- tmp = (config2 >> 8) & 0x0f;
- if (0 <= tmp && tmp <= 7)
- c->scache.sets = 64 << tmp;
- else
- return 0;
-
- tmp = (config2 >> 0) & 0x0f;
- if (0 <= tmp && tmp <= 7)
- c->scache.ways = tmp + 1;
- else
- return 0;
-
- c->scache.waysize = c->scache.sets * c->scache.linesz;
- c->scache.waybit = __ffs(c->scache.waysize);
-
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
-
- return 1;
-}
-
-int __cpuinit mips_sc_init(void)
-{
- int found = mips_sc_probe();
- if (found) {
- mips_sc_enable();
- bcops = &mips_sc_ops;
- }
- return found;
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/sc-r5k.c b/ANDROID_3.4.5/arch/mips/mm/sc-r5k.c
deleted file mode 100644
index 8d90ff25..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/sc-r5k.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
- * derived from r4xx0.c by David S. Miller (davem@davemloft.net).
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-#include <asm/mipsregs.h>
-#include <asm/bcache.h>
-#include <asm/cacheops.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/r4kcache.h>
-
-/* Secondary cache size in bytes, if present. */
-static unsigned long scache_size;
-
-#define SC_LINE 32
-#define SC_PAGE (128*SC_LINE)
-
-static inline void blast_r5000_scache(void)
-{
- unsigned long start = INDEX_BASE;
- unsigned long end = start + scache_size;
-
- while(start < end) {
- cache_op(R5K_Page_Invalidate_S, start);
- start += SC_PAGE;
- }
-}
-
-static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
-{
- unsigned long end, a;
-
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- if (size >= scache_size) {
- blast_r5000_scache();
- return;
- }
-
- /* On the R5000 secondary cache we cannot
- * invalidate less than a page at a time.
- * The secondary cache is physically indexed, write-through.
- */
- a = addr & ~(SC_PAGE - 1);
- end = (addr + size - 1) & ~(SC_PAGE - 1);
- while (a <= end) {
- cache_op(R5K_Page_Invalidate_S, a);
- a += SC_PAGE;
- }
-}
-
-static void r5k_sc_enable(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- set_c0_config(R5K_CONF_SE);
- blast_r5000_scache();
- local_irq_restore(flags);
-}
-
-static void r5k_sc_disable(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_r5000_scache();
- clear_c0_config(R5K_CONF_SE);
- local_irq_restore(flags);
-}
-
-static inline int __init r5k_sc_probe(void)
-{
- unsigned long config = read_c0_config();
-
- if (config & CONF_SC)
- return(0);
-
- scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
-
- printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
- scache_size >> 10);
-
- return 1;
-}
-
-static struct bcache_ops r5k_sc_ops = {
- .bc_enable = r5k_sc_enable,
- .bc_disable = r5k_sc_disable,
- .bc_wback_inv = r5k_dma_cache_inv_sc,
- .bc_inv = r5k_dma_cache_inv_sc
-};
-
-void __cpuinit r5k_sc_init(void)
-{
- if (r5k_sc_probe()) {
- r5k_sc_enable();
- bcops = &r5k_sc_ops;
- }
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/sc-rm7k.c b/ANDROID_3.4.5/arch/mips/mm/sc-rm7k.c
deleted file mode 100644
index 274af3be..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/sc-rm7k.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * sc-rm7k.c: RM7000 cache management functions.
- *
- * Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
- */
-
-#undef DEBUG
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-
-#include <asm/addrspace.h>
-#include <asm/bcache.h>
-#include <asm/cacheops.h>
-#include <asm/mipsregs.h>
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h> /* for run_uncached() */
-
-/* Primary cache parameters. */
-#define sc_lsize 32
-#define tc_pagesize (32*128)
-
-/* Secondary cache parameters. */
-#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
-
-/* Tertiary cache parameters */
-#define tc_lsize 32
-
-extern unsigned long icache_way_size, dcache_way_size;
-static unsigned long tcache_size;
-
-#include <asm/r4kcache.h>
-
-static int rm7k_tcache_init;
-
-/*
- * Writeback and invalidate the primary cache dcache before DMA.
- * (XXX These need to be fixed ...)
- */
-static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end, a;
-
- pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
-
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- blast_scache_range(addr, addr + size);
-
- if (!rm7k_tcache_init)
- return;
-
- a = addr & ~(tc_pagesize - 1);
- end = (addr + size - 1) & ~(tc_pagesize - 1);
- while(1) {
- invalidate_tcache_page(a); /* Page_Invalidate_T */
- if (a == end)
- break;
- a += tc_pagesize;
- }
-}
-
-static void rm7k_sc_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end, a;
-
- pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
-
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- blast_inv_scache_range(addr, addr + size);
-
- if (!rm7k_tcache_init)
- return;
-
- a = addr & ~(tc_pagesize - 1);
- end = (addr + size - 1) & ~(tc_pagesize - 1);
- while(1) {
- invalidate_tcache_page(a); /* Page_Invalidate_T */
- if (a == end)
- break;
- a += tc_pagesize;
- }
-}
-
-static void blast_rm7k_tcache(void)
-{
- unsigned long start = CKSEG0ADDR(0);
- unsigned long end = start + tcache_size;
-
- write_c0_taglo(0);
-
- while (start < end) {
- cache_op(Page_Invalidate_T, start);
- start += tc_pagesize;
- }
-}
-
-/*
- * This function is executed in uncached address space.
- */
-static __cpuinit void __rm7k_tc_enable(void)
-{
- int i;
-
- set_c0_config(RM7K_CONF_TE);
-
- write_c0_taglo(0);
- write_c0_taghi(0);
-
- for (i = 0; i < tcache_size; i += tc_lsize)
- cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
-}
-
-static __cpuinit void rm7k_tc_enable(void)
-{
- if (read_c0_config() & RM7K_CONF_TE)
- return;
-
- BUG_ON(tcache_size == 0);
-
- run_uncached(__rm7k_tc_enable);
-}
-
-/*
- * This function is executed in uncached address space.
- */
-static __cpuinit void __rm7k_sc_enable(void)
-{
- int i;
-
- set_c0_config(RM7K_CONF_SE);
-
- write_c0_taglo(0);
- write_c0_taghi(0);
-
- for (i = 0; i < scache_size; i += sc_lsize)
- cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
-}
-
-static __cpuinit void rm7k_sc_enable(void)
-{
- if (read_c0_config() & RM7K_CONF_SE)
- return;
-
- pr_info("Enabling secondary cache...\n");
- run_uncached(__rm7k_sc_enable);
-
- if (rm7k_tcache_init)
- rm7k_tc_enable();
-}
-
-static void rm7k_tc_disable(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_rm7k_tcache();
- clear_c0_config(RM7K_CONF_TE);
- local_irq_save(flags);
-}
-
-static void rm7k_sc_disable(void)
-{
- clear_c0_config(RM7K_CONF_SE);
-
- if (rm7k_tcache_init)
- rm7k_tc_disable();
-}
-
-static struct bcache_ops rm7k_sc_ops = {
- .bc_enable = rm7k_sc_enable,
- .bc_disable = rm7k_sc_disable,
- .bc_wback_inv = rm7k_sc_wback_inv,
- .bc_inv = rm7k_sc_inv
-};
-
-/*
- * This is a probing function like the one found in c-r4k.c, we look for the
- * wrap around point with different addresses.
- */
-static __cpuinit void __probe_tcache(void)
-{
- unsigned long flags, addr, begin, end, pow2;
-
- begin = (unsigned long) &_stext;
- begin &= ~((8 * 1024 * 1024) - 1);
- end = begin + (8 * 1024 * 1024);
-
- local_irq_save(flags);
-
- set_c0_config(RM7K_CONF_TE);
-
- /* Fill size-multiple lines with a valid tag */
- pow2 = (256 * 1024);
- for (addr = begin; addr <= end; addr = (begin + pow2)) {
- unsigned long *p = (unsigned long *) addr;
- __asm__ __volatile__("nop" : : "r" (*p));
- pow2 <<= 1;
- }
-
- /* Load first line with a 0 tag, to check after */
- write_c0_taglo(0);
- write_c0_taghi(0);
- cache_op(Index_Store_Tag_T, begin);
-
- /* Look for the wrap-around */
- pow2 = (512 * 1024);
- for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
- cache_op(Index_Load_Tag_T, addr);
- if (!read_c0_taglo())
- break;
- pow2 <<= 1;
- }
-
- addr -= begin;
- tcache_size = addr;
-
- clear_c0_config(RM7K_CONF_TE);
-
- local_irq_restore(flags);
-}
-
-void __cpuinit rm7k_sc_init(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
- unsigned int config = read_c0_config();
-
- if ((config & RM7K_CONF_SC))
- return;
-
- c->scache.linesz = sc_lsize;
- c->scache.ways = 4;
- c->scache.waybit= __ffs(scache_size / c->scache.ways);
- c->scache.waysize = scache_size / c->scache.ways;
- c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
- printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
- (scache_size >> 10), sc_lsize);
-
- if (!(config & RM7K_CONF_SE))
- rm7k_sc_enable();
-
- bcops = &rm7k_sc_ops;
-
- /*
- * While we're at it let's deal with the tertiary cache.
- */
-
- rm7k_tcache_init = 0;
- tcache_size = 0;
-
- if (config & RM7K_CONF_TC)
- return;
-
- /*
- * No efficient way to ask the hardware for the size of the tcache,
- * so must probe for it.
- */
- run_uncached(__probe_tcache);
- rm7k_tc_enable();
- rm7k_tcache_init = 1;
- c->tcache.linesz = tc_lsize;
- c->tcache.ways = 1;
- pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/tlb-r3k.c b/ANDROID_3.4.5/arch/mips/mm/tlb-r3k.c
deleted file mode 100644
index a63d1ed0..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/tlb-r3k.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * r2300.c: R2000 and R3000 specific mmu/cache code.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- *
- * with a lot of changes to make this thing work for R3000s
- * Tx39XX R4k style caches added. HK
- * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
- * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
- * Copyright (C) 2002 Ralf Baechle
- * Copyright (C) 2002 Maciej W. Rozycki
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbmisc.h>
-#include <asm/isadep.h>
-#include <asm/io.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-
-#undef DEBUG_TLB
-
-extern void build_tlb_refill_handler(void);
-
-/* CP0 hazard avoidance. */
-#define BARRIER \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "nop\n\t" \
- ".set pop\n\t")
-
-int r3k_have_wired_reg; /* should be in cpu_data? */
-
-/* TLB operations. */
-void local_flush_tlb_all(void)
-{
- unsigned long flags;
- unsigned long old_ctx;
- int entry;
-
-#ifdef DEBUG_TLB
- printk("[tlball]");
-#endif
-
- local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
- write_c0_entrylo0(0);
- entry = r3k_have_wired_reg ? read_c0_wired() : 8;
- for (; entry < current_cpu_data.tlbsize; entry++) {
- write_c0_index(entry << 8);
- write_c0_entryhi((entry | 0x80000) << 12);
- BARRIER;
- tlb_write_indexed();
- }
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
-#ifdef DEBUG_TLB
- printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
-#endif
- drop_mmu_context(mm, cpu);
- }
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- unsigned long size, flags;
-
-#ifdef DEBUG_TLB
- printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
- cpu_context(cpu, mm) & ASID_MASK, start, end);
-#endif
- local_irq_save(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (size <= current_cpu_data.tlbsize) {
- int oldpid = read_c0_entryhi() & ASID_MASK;
- int newpid = cpu_context(cpu, mm) & ASID_MASK;
-
- start &= PAGE_MASK;
- end += PAGE_SIZE - 1;
- end &= PAGE_MASK;
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start | newpid);
- start += PAGE_SIZE; /* BARRIER */
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entryhi(KSEG0);
- if (idx < 0) /* BARRIER */
- continue;
- tlb_write_indexed();
- }
- write_c0_entryhi(oldpid);
- } else {
- drop_mmu_context(mm, cpu);
- }
- local_irq_restore(flags);
- }
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- unsigned long size, flags;
-
-#ifdef DEBUG_TLB
- printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
-#endif
- local_irq_save(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (size <= current_cpu_data.tlbsize) {
- int pid = read_c0_entryhi();
-
- start &= PAGE_MASK;
- end += PAGE_SIZE - 1;
- end &= PAGE_MASK;
-
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start);
- start += PAGE_SIZE; /* BARRIER */
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entryhi(KSEG0);
- if (idx < 0) /* BARRIER */
- continue;
- tlb_write_indexed();
- }
- write_c0_entryhi(pid);
- } else {
- local_flush_tlb_all();
- }
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- int cpu = smp_processor_id();
-
- if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long flags;
- int oldpid, newpid, idx;
-
-#ifdef DEBUG_TLB
- printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
-#endif
- newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
- page &= PAGE_MASK;
- local_irq_save(flags);
- oldpid = read_c0_entryhi() & ASID_MASK;
- write_c0_entryhi(page | newpid);
- BARRIER;
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entryhi(KSEG0);
- if (idx < 0) /* BARRIER */
- goto finish;
- tlb_write_indexed();
-
-finish:
- write_c0_entryhi(oldpid);
- local_irq_restore(flags);
- }
-}
-
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
-{
- unsigned long flags;
- int idx, pid;
-
- /*
- * Handle debugger faulting in for debugee.
- */
- if (current->active_mm != vma->vm_mm)
- return;
-
- pid = read_c0_entryhi() & ASID_MASK;
-
-#ifdef DEBUG_TLB
- if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
- printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
- (cpu_context(cpu, vma->vm_mm)), pid);
- }
-#endif
-
- local_irq_save(flags);
- address &= PAGE_MASK;
- write_c0_entryhi(address | pid);
- BARRIER;
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(pte_val(pte));
- write_c0_entryhi(address | pid);
- if (idx < 0) { /* BARRIER */
- tlb_write_random();
- } else {
- tlb_write_indexed();
- }
- write_c0_entryhi(pid);
- local_irq_restore(flags);
-}
-
-void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
- unsigned long entryhi, unsigned long pagemask)
-{
- unsigned long flags;
- unsigned long old_ctx;
- static unsigned long wired = 0;
-
- if (r3k_have_wired_reg) { /* TX39XX */
- unsigned long old_pagemask;
- unsigned long w;
-
-#ifdef DEBUG_TLB
- printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
- entrylo0, entryhi, pagemask);
-#endif
-
- local_irq_save(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & ASID_MASK;
- old_pagemask = read_c0_pagemask();
- w = read_c0_wired();
- write_c0_wired(w + 1);
- write_c0_index(w << 8);
- write_c0_pagemask(pagemask);
- write_c0_entryhi(entryhi);
- write_c0_entrylo0(entrylo0);
- BARRIER;
- tlb_write_indexed();
-
- write_c0_entryhi(old_ctx);
- write_c0_pagemask(old_pagemask);
- local_flush_tlb_all();
- local_irq_restore(flags);
-
- } else if (wired < 8) {
-#ifdef DEBUG_TLB
- printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
- entrylo0, entryhi);
-#endif
-
- local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
- write_c0_entrylo0(entrylo0);
- write_c0_entryhi(entryhi);
- write_c0_index(wired);
- wired++; /* BARRIER */
- tlb_write_indexed();
- write_c0_entryhi(old_ctx);
- local_flush_tlb_all();
- local_irq_restore(flags);
- }
-}
-
-void __cpuinit tlb_init(void)
-{
- local_flush_tlb_all();
-
- build_tlb_refill_handler();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/tlb-r4k.c b/ANDROID_3.4.5/arch/mips/mm/tlb-r4k.c
deleted file mode 100644
index d2572cb2..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/tlb-r4k.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/tlbmisc.h>
-
-extern void build_tlb_refill_handler(void);
-
-/*
- * Make sure all entries differ. If they're not different
- * MIPS32 will take revenge ...
- */
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
-/* Atomicity and interruptability */
-#ifdef CONFIG_MIPS_MT_SMTC
-
-#include <asm/smtc.h>
-#include <asm/mipsmtregs.h>
-
-#define ENTER_CRITICAL(flags) \
- { \
- unsigned int mvpflags; \
- local_irq_save(flags);\
- mvpflags = dvpe()
-#define EXIT_CRITICAL(flags) \
- evpe(mvpflags); \
- local_irq_restore(flags); \
- }
-#else
-
-#define ENTER_CRITICAL(flags) local_irq_save(flags)
-#define EXIT_CRITICAL(flags) local_irq_restore(flags)
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-#if defined(CONFIG_CPU_LOONGSON2)
-/*
- * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
- * unfortrunately, itlb is not totally transparent to software.
- */
-#define FLUSH_ITLB write_c0_diag(4);
-
-#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
-
-#else
-
-#define FLUSH_ITLB
-#define FLUSH_ITLB_VM(vma)
-
-#endif
-
-void local_flush_tlb_all(void)
-{
- unsigned long flags;
- unsigned long old_ctx;
- int entry;
-
- ENTER_CRITICAL(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
-
- entry = read_c0_wired();
-
- /* Blast 'em all away. */
- while (entry < current_cpu_data.tlbsize) {
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- write_c0_index(entry);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- entry++;
- }
- tlbw_use_hazard();
- write_c0_entryhi(old_ctx);
- FLUSH_ITLB;
- EXIT_CRITICAL(flags);
-}
-
-/* All entries common to a mm share an asid. To effectively flush
- these entries, we just bump the asid. */
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu;
-
- preempt_disable();
-
- cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- drop_mmu_context(mm, cpu);
- }
-
- preempt_enable();
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- unsigned long size, flags;
- int huge = is_vm_hugetlb_page(vma);
-
- ENTER_CRITICAL(flags);
- if (huge) {
- start = round_down(start, HPAGE_SIZE);
- end = round_up(end, HPAGE_SIZE);
- size = (end - start) >> HPAGE_SHIFT;
- } else {
- start = round_down(start, PAGE_SIZE << 1);
- end = round_up(end, PAGE_SIZE << 1);
- size = (end - start) >> (PAGE_SHIFT + 1);
- }
- if (size <= current_cpu_data.tlbsize/2) {
- int oldpid = read_c0_entryhi();
- int newpid = cpu_asid(cpu, mm);
-
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start | newpid);
- if (huge)
- start += HPAGE_SIZE;
- else
- start += (PAGE_SIZE << 1);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- if (idx < 0)
- continue;
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- }
- tlbw_use_hazard();
- write_c0_entryhi(oldpid);
- } else {
- drop_mmu_context(mm, cpu);
- }
- FLUSH_ITLB;
- EXIT_CRITICAL(flags);
- }
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- unsigned long size, flags;
-
- ENTER_CRITICAL(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- size = (size + 1) >> 1;
- if (size <= current_cpu_data.tlbsize / 2) {
- int pid = read_c0_entryhi();
-
- start &= (PAGE_MASK << 1);
- end += ((PAGE_SIZE << 1) - 1);
- end &= (PAGE_MASK << 1);
-
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start);
- start += (PAGE_SIZE << 1);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- if (idx < 0)
- continue;
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- }
- tlbw_use_hazard();
- write_c0_entryhi(pid);
- } else {
- local_flush_tlb_all();
- }
- FLUSH_ITLB;
- EXIT_CRITICAL(flags);
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long flags;
- int oldpid, newpid, idx;
-
- newpid = cpu_asid(cpu, vma->vm_mm);
- page &= (PAGE_MASK << 1);
- ENTER_CRITICAL(flags);
- oldpid = read_c0_entryhi();
- write_c0_entryhi(page | newpid);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- if (idx < 0)
- goto finish;
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- tlbw_use_hazard();
-
- finish:
- write_c0_entryhi(oldpid);
- FLUSH_ITLB_VM(vma);
- EXIT_CRITICAL(flags);
- }
-}
-
-/*
- * This one is only used for pages with the global bit set so we don't care
- * much about the ASID.
- */
-void local_flush_tlb_one(unsigned long page)
-{
- unsigned long flags;
- int oldpid, idx;
-
- ENTER_CRITICAL(flags);
- oldpid = read_c0_entryhi();
- page &= (PAGE_MASK << 1);
- write_c0_entryhi(page);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- if (idx >= 0) {
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- tlbw_use_hazard();
- }
- write_c0_entryhi(oldpid);
- FLUSH_ITLB;
- EXIT_CRITICAL(flags);
-}
-
-/*
- * We will need multiple versions of update_mmu_cache(), one that just
- * updates the TLB with the new pte(s), and another which also checks
- * for the R4k "end of page" hardware bug and does the needy.
- */
-void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
-{
- unsigned long flags;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- int idx, pid;
-
- /*
- * Handle debugger faulting in for debugee.
- */
- if (current->active_mm != vma->vm_mm)
- return;
-
- ENTER_CRITICAL(flags);
-
- pid = read_c0_entryhi() & ASID_MASK;
- address &= (PAGE_MASK << 1);
- write_c0_entryhi(address | pid);
- pgdp = pgd_offset(vma->vm_mm, address);
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- pudp = pud_offset(pgdp, address);
- pmdp = pmd_offset(pudp, address);
- idx = read_c0_index();
-#ifdef CONFIG_HUGETLB_PAGE
- /* this could be a huge page */
- if (pmd_huge(*pmdp)) {
- unsigned long lo;
- write_c0_pagemask(PM_HUGE_MASK);
- ptep = (pte_t *)pmdp;
- lo = pte_to_entrylo(pte_val(*ptep));
- write_c0_entrylo0(lo);
- write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
-
- mtc0_tlbw_hazard();
- if (idx < 0)
- tlb_write_random();
- else
- tlb_write_indexed();
- write_c0_pagemask(PM_DEFAULT_MASK);
- } else
-#endif
- {
- ptep = pte_offset_map(pmdp, address);
-
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
- write_c0_entrylo0(ptep->pte_high);
- ptep++;
- write_c0_entrylo1(ptep->pte_high);
-#else
- write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
- write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
-#endif
- mtc0_tlbw_hazard();
- if (idx < 0)
- tlb_write_random();
- else
- tlb_write_indexed();
- }
- tlbw_use_hazard();
- FLUSH_ITLB_VM(vma);
- EXIT_CRITICAL(flags);
-}
-
-void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
- unsigned long entryhi, unsigned long pagemask)
-{
- unsigned long flags;
- unsigned long wired;
- unsigned long old_pagemask;
- unsigned long old_ctx;
-
- ENTER_CRITICAL(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi();
- old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
- write_c0_wired(wired + 1);
- write_c0_index(wired);
- tlbw_use_hazard(); /* What is the hazard here? */
- write_c0_pagemask(pagemask);
- write_c0_entryhi(entryhi);
- write_c0_entrylo0(entrylo0);
- write_c0_entrylo1(entrylo1);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- tlbw_use_hazard();
-
- write_c0_entryhi(old_ctx);
- tlbw_use_hazard(); /* What is the hazard here? */
- write_c0_pagemask(old_pagemask);
- local_flush_tlb_all();
- EXIT_CRITICAL(flags);
-}
-
-static int __cpuinitdata ntlb;
-static int __init set_ntlb(char *str)
-{
- get_option(&str, &ntlb);
- return 1;
-}
-
-__setup("ntlb=", set_ntlb);
-
-void __cpuinit tlb_init(void)
-{
- /*
- * You should never change this register:
- * - On R4600 1.7 the tlbp never hits for pages smaller than
- * the value in the c0_pagemask register.
- * - The entire mm handling assumes the c0_pagemask register to
- * be set to fixed-size pages.
- */
- write_c0_pagemask(PM_DEFAULT_MASK);
- write_c0_wired(0);
- if (current_cpu_type() == CPU_R10000 ||
- current_cpu_type() == CPU_R12000 ||
- current_cpu_type() == CPU_R14000)
- write_c0_framemask(0);
-
- if (kernel_uses_smartmips_rixi) {
- /*
- * Enable the no read, no exec bits, and enable large virtual
- * address.
- */
- u32 pg = PG_RIE | PG_XIE;
-#ifdef CONFIG_64BIT
- pg |= PG_ELPA;
-#endif
- write_c0_pagegrain(pg);
- }
-
- /* From this point on the ARC firmware is dead. */
- local_flush_tlb_all();
-
- /* Did I tell you that ARC SUCKS? */
-
- if (ntlb) {
- if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
- int wired = current_cpu_data.tlbsize - ntlb;
- write_c0_wired(wired);
- write_c0_index(wired-1);
- printk("Restricting TLB to %d entries\n", ntlb);
- } else
- printk("Ignoring invalid argument ntlb=%d\n", ntlb);
- }
-
- build_tlb_refill_handler();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/tlb-r8k.c b/ANDROID_3.4.5/arch/mips/mm/tlb-r8k.c
deleted file mode 100644
index 91c2499f..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/tlb-r8k.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-
-extern void build_tlb_refill_handler(void);
-
-#define TFP_TLB_SIZE 384
-#define TFP_TLB_SET_SHIFT 7
-
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
-
-void local_flush_tlb_all(void)
-{
- unsigned long flags;
- unsigned long old_ctx;
- int entry;
-
- local_irq_save(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi();
- write_c0_entrylo(0);
-
- for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
- write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
- write_c0_vaddr(entry << PAGE_SHIFT);
- write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
- mtc0_tlbw_hazard();
- tlb_write();
- }
- tlbw_use_hazard();
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- int cpu = smp_processor_id();
- unsigned long flags;
- int oldpid, newpid, size;
-
- if (!cpu_context(cpu, mm))
- return;
-
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- size = (size + 1) >> 1;
-
- local_irq_save(flags);
-
- if (size > TFP_TLB_SIZE / 2) {
- drop_mmu_context(mm, cpu);
- goto out_restore;
- }
-
- oldpid = read_c0_entryhi();
- newpid = cpu_asid(cpu, mm);
-
- write_c0_entrylo(0);
-
- start &= PAGE_MASK;
- end += (PAGE_SIZE - 1);
- end &= PAGE_MASK;
- while (start < end) {
- signed long idx;
-
- write_c0_vaddr(start);
- write_c0_entryhi(start);
- start += PAGE_SIZE;
- tlb_probe();
- idx = read_c0_tlbset();
- if (idx < 0)
- continue;
-
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
- tlb_write();
- }
- write_c0_entryhi(oldpid);
-
-out_restore:
- local_irq_restore(flags);
-}
-
-/* Usable for KV1 addresses only! */
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- unsigned long size, flags;
-
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- size = (size + 1) >> 1;
-
- if (size > TFP_TLB_SIZE / 2) {
- local_flush_tlb_all();
- return;
- }
-
- local_irq_save(flags);
-
- write_c0_entrylo(0);
-
- start &= PAGE_MASK;
- end += (PAGE_SIZE - 1);
- end &= PAGE_MASK;
- while (start < end) {
- signed long idx;
-
- write_c0_vaddr(start);
- write_c0_entryhi(start);
- start += PAGE_SIZE;
- tlb_probe();
- idx = read_c0_tlbset();
- if (idx < 0)
- continue;
-
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
- tlb_write();
- }
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
- int oldpid, newpid;
- signed long idx;
-
- if (!cpu_context(cpu, vma->vm_mm))
- return;
-
- newpid = cpu_asid(cpu, vma->vm_mm);
- page &= PAGE_MASK;
- local_irq_save(flags);
- oldpid = read_c0_entryhi();
- write_c0_vaddr(page);
- write_c0_entryhi(newpid);
- tlb_probe();
- idx = read_c0_tlbset();
- if (idx < 0)
- goto finish;
-
- write_c0_entrylo(0);
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
- tlb_write();
-
-finish:
- write_c0_entryhi(oldpid);
- local_irq_restore(flags);
-}
-
-/*
- * We will need multiple versions of update_mmu_cache(), one that just
- * updates the TLB with the new pte(s), and another which also checks
- * for the R4k "end of page" hardware bug and does the needy.
- */
-void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
-{
- unsigned long flags;
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- int pid;
-
- /*
- * Handle debugger faulting in for debugee.
- */
- if (current->active_mm != vma->vm_mm)
- return;
-
- pid = read_c0_entryhi() & ASID_MASK;
-
- local_irq_save(flags);
- address &= PAGE_MASK;
- write_c0_vaddr(address);
- write_c0_entryhi(pid);
- pgdp = pgd_offset(vma->vm_mm, address);
- pmdp = pmd_offset(pgdp, address);
- ptep = pte_offset_map(pmdp, address);
- tlb_probe();
-
- write_c0_entrylo(pte_val(*ptep++) >> 6);
- tlb_write();
-
- write_c0_entryhi(pid);
- local_irq_restore(flags);
-}
-
-static void __cpuinit probe_tlb(unsigned long config)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
-
- c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
-}
-
-void __cpuinit tlb_init(void)
-{
- unsigned int config = read_c0_config();
- unsigned long status;
-
- probe_tlb(config);
-
- status = read_c0_status();
- status &= ~(ST0_UPS | ST0_KPS);
-#ifdef CONFIG_PAGE_SIZE_4KB
- status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
-#elif defined(CONFIG_PAGE_SIZE_8KB)
- status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
-#elif defined(CONFIG_PAGE_SIZE_16KB)
- status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
-#elif defined(CONFIG_PAGE_SIZE_64KB)
- status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
-#endif
- write_c0_status(status);
-
- write_c0_wired(0);
-
- local_flush_tlb_all();
-
- build_tlb_refill_handler();
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/tlbex-fault.S b/ANDROID_3.4.5/arch/mips/mm/tlbex-fault.S
deleted file mode 100644
index e99eaa1f..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/tlbex-fault.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- */
-#include <asm/mipsregs.h>
-#include <asm/page.h>
-#include <asm/regdef.h>
-#include <asm/stackframe.h>
-
- .macro tlb_do_page_fault, write
- NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
- SAVE_ALL
- MFC0 a2, CP0_BADVADDR
- KMODE
- move a0, sp
- REG_S a2, PT_BVADDR(sp)
- li a1, \write
- PTR_LA ra, ret_from_exception
- j do_page_fault
- END(tlb_do_page_fault_\write)
- .endm
-
- tlb_do_page_fault 0
- tlb_do_page_fault 1
diff --git a/ANDROID_3.4.5/arch/mips/mm/tlbex.c b/ANDROID_3.4.5/arch/mips/mm/tlbex.c
deleted file mode 100644
index 0bc485b3..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/tlbex.c
+++ /dev/null
@@ -1,2160 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Synthesize TLB refill handlers at runtime.
- *
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
- * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2008, 2009 Cavium Networks, Inc.
- *
- * ... and the days got worse and worse and now you see
- * I've gone completly out of my mind.
- *
- * They're coming to take me a away haha
- * they're coming to take me a away hoho hihi haha
- * to the funny farm where code is beautiful all the time ...
- *
- * (Condolences to Napoleon XIV)
- */
-
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/cache.h>
-
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-#include <asm/war.h>
-#include <asm/uasm.h>
-#include <asm/setup.h>
-
-/*
- * TLB load/store/modify handlers.
- *
- * Only the fastpath gets synthesized at runtime, the slowpath for
- * do_page_fault remains normal asm.
- */
-extern void tlb_do_page_fault_0(void);
-extern void tlb_do_page_fault_1(void);
-
-struct work_registers {
- int r1;
- int r2;
- int r3;
-};
-
-struct tlb_reg_save {
- unsigned long a;
- unsigned long b;
-} ____cacheline_aligned_in_smp;
-
-static struct tlb_reg_save handler_reg_save[NR_CPUS];
-
-static inline int r45k_bvahwbug(void)
-{
- /* XXX: We should probe for the presence of this bug, but we don't. */
- return 0;
-}
-
-static inline int r4k_250MHZhwbug(void)
-{
- /* XXX: We should probe for the presence of this bug, but we don't. */
- return 0;
-}
-
-static inline int __maybe_unused bcm1250_m3_war(void)
-{
- return BCM1250_M3_WAR;
-}
-
-static inline int __maybe_unused r10000_llsc_war(void)
-{
- return R10000_LLSC_WAR;
-}
-
-static int use_bbit_insns(void)
-{
- switch (current_cpu_type()) {
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- return 1;
- default:
- return 0;
- }
-}
-
-static int use_lwx_insns(void)
-{
- switch (current_cpu_type()) {
- case CPU_CAVIUM_OCTEON2:
- return 1;
- default:
- return 0;
- }
-}
-#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
- CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
-static bool scratchpad_available(void)
-{
- return true;
-}
-static int scratchpad_offset(int i)
-{
- /*
- * CVMSEG starts at address -32768 and extends for
- * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
- */
- i += 1; /* Kernel use starts at the top and works down. */
- return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
-}
-#else
-static bool scratchpad_available(void)
-{
- return false;
-}
-static int scratchpad_offset(int i)
-{
- BUG();
- /* Really unreachable, but evidently some GCC want this. */
- return 0;
-}
-#endif
-/*
- * Found by experiment: At least some revisions of the 4kc throw under
- * some circumstances a machine check exception, triggered by invalid
- * values in the index register. Delaying the tlbp instruction until
- * after the next branch, plus adding an additional nop in front of
- * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
- * why; it's not an issue caused by the core RTL.
- *
- */
-static int __cpuinit m4kc_tlbp_war(void)
-{
- return (current_cpu_data.processor_id & 0xffff00) ==
- (PRID_COMP_MIPS | PRID_IMP_4KC);
-}
-
-/* Handle labels (which must be positive integers). */
-enum label_id {
- label_second_part = 1,
- label_leave,
- label_vmalloc,
- label_vmalloc_done,
- label_tlbw_hazard,
- label_split,
- label_tlbl_goaround1,
- label_tlbl_goaround2,
- label_nopage_tlbl,
- label_nopage_tlbs,
- label_nopage_tlbm,
- label_smp_pgtable_change,
- label_r3000_write_probe_fail,
- label_large_segbits_fault,
-#ifdef CONFIG_HUGETLB_PAGE
- label_tlb_huge_update,
-#endif
-};
-
-UASM_L_LA(_second_part)
-UASM_L_LA(_leave)
-UASM_L_LA(_vmalloc)
-UASM_L_LA(_vmalloc_done)
-UASM_L_LA(_tlbw_hazard)
-UASM_L_LA(_split)
-UASM_L_LA(_tlbl_goaround1)
-UASM_L_LA(_tlbl_goaround2)
-UASM_L_LA(_nopage_tlbl)
-UASM_L_LA(_nopage_tlbs)
-UASM_L_LA(_nopage_tlbm)
-UASM_L_LA(_smp_pgtable_change)
-UASM_L_LA(_r3000_write_probe_fail)
-UASM_L_LA(_large_segbits_fault)
-#ifdef CONFIG_HUGETLB_PAGE
-UASM_L_LA(_tlb_huge_update)
-#endif
-
-/*
- * For debug purposes.
- */
-static inline void dump_handler(const u32 *handler, int count)
-{
- int i;
-
- pr_debug("\t.set push\n");
- pr_debug("\t.set noreorder\n");
-
- for (i = 0; i < count; i++)
- pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
-
- pr_debug("\t.set pop\n");
-}
-
-/* The only general purpose registers allowed in TLB handlers. */
-#define K0 26
-#define K1 27
-
-/* Some CP0 registers */
-#define C0_INDEX 0, 0
-#define C0_ENTRYLO0 2, 0
-#define C0_TCBIND 2, 2
-#define C0_ENTRYLO1 3, 0
-#define C0_CONTEXT 4, 0
-#define C0_PAGEMASK 5, 0
-#define C0_BADVADDR 8, 0
-#define C0_ENTRYHI 10, 0
-#define C0_EPC 14, 0
-#define C0_XCONTEXT 20, 0
-
-#ifdef CONFIG_64BIT
-# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
-#else
-# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
-#endif
-
-/* The worst case length of the handler is around 18 instructions for
- * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
- * Maximum space available is 32 instructions for R3000 and 64
- * instructions for R4000.
- *
- * We deliberately chose a buffer size of 128, so we won't scribble
- * over anything important on overflow before we panic.
- */
-static u32 tlb_handler[128] __cpuinitdata;
-
-/* simply assume worst case size for labels and relocs */
-static struct uasm_label labels[128] __cpuinitdata;
-static struct uasm_reloc relocs[128] __cpuinitdata;
-
-#ifdef CONFIG_64BIT
-static int check_for_high_segbits __cpuinitdata;
-#endif
-
-static int check_for_high_segbits __cpuinitdata;
-
-static unsigned int kscratch_used_mask __cpuinitdata;
-
-static int __cpuinit allocate_kscratch(void)
-{
- int r;
- unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
-
- r = ffs(a);
-
- if (r == 0)
- return -1;
-
- r--; /* make it zero based */
-
- kscratch_used_mask |= (1 << r);
-
- return r;
-}
-
-static int scratch_reg __cpuinitdata;
-static int pgd_reg __cpuinitdata;
-enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
-
-static struct work_registers __cpuinit build_get_work_registers(u32 **p)
-{
- struct work_registers r;
-
- int smp_processor_id_reg;
- int smp_processor_id_sel;
- int smp_processor_id_shift;
-
- if (scratch_reg > 0) {
- /* Save in CPU local C0_KScratch? */
- UASM_i_MTC0(p, 1, 31, scratch_reg);
- r.r1 = K0;
- r.r2 = K1;
- r.r3 = 1;
- return r;
- }
-
- if (num_possible_cpus() > 1) {
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- smp_processor_id_shift = 51;
- smp_processor_id_reg = 20; /* XContext */
- smp_processor_id_sel = 0;
-#else
-# ifdef CONFIG_32BIT
- smp_processor_id_shift = 25;
- smp_processor_id_reg = 4; /* Context */
- smp_processor_id_sel = 0;
-# endif
-# ifdef CONFIG_64BIT
- smp_processor_id_shift = 26;
- smp_processor_id_reg = 4; /* Context */
- smp_processor_id_sel = 0;
-# endif
-#endif
- /* Get smp_processor_id */
- UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
- UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
-
- /* handler_reg_save index in K0 */
- UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
-
- UASM_i_LA(p, K1, (long)&handler_reg_save);
- UASM_i_ADDU(p, K0, K0, K1);
- } else {
- UASM_i_LA(p, K0, (long)&handler_reg_save);
- }
- /* K0 now points to save area, save $1 and $2 */
- UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
-
- r.r1 = K1;
- r.r2 = 1;
- r.r3 = 2;
- return r;
-}
-
-static void __cpuinit build_restore_work_registers(u32 **p)
-{
- if (scratch_reg > 0) {
- UASM_i_MFC0(p, 1, 31, scratch_reg);
- return;
- }
- /* K0 already points to save area, restore $1 and $2 */
- UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
-}
-
-#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-
-/*
- * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
- * we cannot do r3000 under these circumstances.
- *
- * Declare pgd_current here instead of including mmu_context.h to avoid type
- * conflicts for tlbmiss_handler_setup_pgd
- */
-extern unsigned long pgd_current[];
-
-/*
- * The R3000 TLB handler is simple.
- */
-static void __cpuinit build_r3000_tlb_refill_handler(void)
-{
- long pgdc = (long)pgd_current;
- u32 *p;
-
- memset(tlb_handler, 0, sizeof(tlb_handler));
- p = tlb_handler;
-
- uasm_i_mfc0(&p, K0, C0_BADVADDR);
- uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
- uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
- uasm_i_srl(&p, K0, K0, 22); /* load delay */
- uasm_i_sll(&p, K0, K0, 2);
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_mfc0(&p, K0, C0_CONTEXT);
- uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
- uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_lw(&p, K0, 0, K1);
- uasm_i_nop(&p); /* load delay */
- uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
- uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
- uasm_i_tlbwr(&p); /* cp0 delay */
- uasm_i_jr(&p, K1);
- uasm_i_rfe(&p); /* branch delay */
-
- if (p > tlb_handler + 32)
- panic("TLB refill handler space exceeded");
-
- pr_debug("Wrote TLB refill handler (%u instructions).\n",
- (unsigned int)(p - tlb_handler));
-
- memcpy((void *)ebase, tlb_handler, 0x80);
-
- dump_handler((u32 *)ebase, 32);
-}
-#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
-
-/*
- * The R4000 TLB handler is much more complicated. We have two
- * consecutive handler areas with 32 instructions space each.
- * Since they aren't used at the same time, we can overflow in the
- * other one.To keep things simple, we first assume linear space,
- * then we relocate it to the final handler layout as needed.
- */
-static u32 final_handler[64] __cpuinitdata;
-
-/*
- * Hazards
- *
- * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
- * 2. A timing hazard exists for the TLBP instruction.
- *
- * stalling_instruction
- * TLBP
- *
- * The JTLB is being read for the TLBP throughout the stall generated by the
- * previous instruction. This is not really correct as the stalling instruction
- * can modify the address used to access the JTLB. The failure symptom is that
- * the TLBP instruction will use an address created for the stalling instruction
- * and not the address held in C0_ENHI and thus report the wrong results.
- *
- * The software work-around is to not allow the instruction preceding the TLBP
- * to stall - make it an NOP or some other instruction guaranteed not to stall.
- *
- * Errata 2 will not be fixed. This errata is also on the R5000.
- *
- * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
- */
-static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
-{
- switch (current_cpu_type()) {
- /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
- case CPU_R4600:
- case CPU_R4700:
- case CPU_R5000:
- case CPU_R5000A:
- case CPU_NEVADA:
- uasm_i_nop(p);
- uasm_i_tlbp(p);
- break;
-
- default:
- uasm_i_tlbp(p);
- break;
- }
-}
-
-/*
- * Write random or indexed TLB entry, and care about the hazards from
- * the preceding mtc0 and for the following eret.
- */
-enum tlb_write_entry { tlb_random, tlb_indexed };
-
-static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r,
- enum tlb_write_entry wmode)
-{
- void(*tlbw)(u32 **) = NULL;
-
- switch (wmode) {
- case tlb_random: tlbw = uasm_i_tlbwr; break;
- case tlb_indexed: tlbw = uasm_i_tlbwi; break;
- }
-
- if (cpu_has_mips_r2) {
- if (cpu_has_mips_r2_exec_hazard)
- uasm_i_ehb(p);
- tlbw(p);
- return;
- }
-
- switch (current_cpu_type()) {
- case CPU_R4000PC:
- case CPU_R4000SC:
- case CPU_R4000MC:
- case CPU_R4400PC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- /*
- * This branch uses up a mtc0 hazard nop slot and saves
- * two nops after the tlbw instruction.
- */
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
- tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
- uasm_i_nop(p);
- break;
-
- case CPU_R4600:
- case CPU_R4700:
- case CPU_R5000:
- case CPU_R5000A:
- uasm_i_nop(p);
- tlbw(p);
- uasm_i_nop(p);
- break;
-
- case CPU_R4300:
- case CPU_5KC:
- case CPU_TX49XX:
- case CPU_PR4450:
- case CPU_XLR:
- uasm_i_nop(p);
- tlbw(p);
- break;
-
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- case CPU_4KC:
- case CPU_4KEC:
- case CPU_SB1:
- case CPU_SB1A:
- case CPU_4KSC:
- case CPU_20KC:
- case CPU_25KF:
- case CPU_BMIPS32:
- case CPU_BMIPS3300:
- case CPU_BMIPS4350:
- case CPU_BMIPS4380:
- case CPU_BMIPS5000:
- case CPU_LOONGSON2:
- case CPU_R5500:
- if (m4kc_tlbp_war())
- uasm_i_nop(p);
- case CPU_ALCHEMY:
- tlbw(p);
- break;
-
- case CPU_NEVADA:
- uasm_i_nop(p); /* QED specifies 2 nops hazard */
- /*
- * This branch uses up a mtc0 hazard nop slot and saves
- * a nop after the tlbw instruction.
- */
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
- tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
- break;
-
- case CPU_RM7000:
- uasm_i_nop(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- break;
-
- case CPU_RM9000:
- /*
- * When the JTLB is updated by tlbwi or tlbwr, a subsequent
- * use of the JTLB for instructions should not occur for 4
- * cpu cycles and use for data translations should not occur
- * for 3 cpu cycles.
- */
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- tlbw(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- break;
-
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- break;
-
- case CPU_VR4131:
- case CPU_VR4133:
- case CPU_R5432:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- break;
-
- case CPU_JZRISC:
- tlbw(p);
- uasm_i_nop(p);
- break;
-
- default:
- panic("No TLB refill handler yet (CPU type: %d)",
- current_cpu_data.cputype);
- break;
- }
-}
-
-static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
- unsigned int reg)
-{
- if (kernel_uses_smartmips_rixi) {
- UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- } else {
-#ifdef CONFIG_64BIT_PHYS_ADDR
- uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
-#else
- UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
-#endif
- }
-}
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-static __cpuinit void build_restore_pagemask(u32 **p,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum label_id lid,
- int restore_scratch)
-{
- if (restore_scratch) {
- /* Reset default page size */
- if (PM_DEFAULT_MASK >> 16) {
- uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
- uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
- uasm_il_b(p, r, lid);
- } else if (PM_DEFAULT_MASK) {
- uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
- uasm_il_b(p, r, lid);
- } else {
- uasm_i_mtc0(p, 0, C0_PAGEMASK);
- uasm_il_b(p, r, lid);
- }
- if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
- else
- UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- } else {
- /* Reset default page size */
- if (PM_DEFAULT_MASK >> 16) {
- uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
- uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
- uasm_il_b(p, r, lid);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
- } else if (PM_DEFAULT_MASK) {
- uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
- uasm_il_b(p, r, lid);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
- } else {
- uasm_il_b(p, r, lid);
- uasm_i_mtc0(p, 0, C0_PAGEMASK);
- }
- }
-}
-
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
- struct uasm_label **l,
- struct uasm_reloc **r,
- unsigned int tmp,
- enum tlb_write_entry wmode,
- int restore_scratch)
-{
- /* Set huge page tlb entry size */
- uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
- uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
- uasm_i_mtc0(p, tmp, C0_PAGEMASK);
-
- build_tlb_write_entry(p, l, r, wmode);
-
- build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
-}
-
-/*
- * Check if Huge PTE is present, if so then jump to LABEL.
- */
-static void __cpuinit
-build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
- unsigned int pmd, int lid)
-{
- UASM_i_LW(p, tmp, 0, pmd);
- if (use_bbit_insns()) {
- uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
- } else {
- uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
- uasm_il_bnez(p, r, tmp, lid);
- }
-}
-
-static __cpuinit void build_huge_update_entries(u32 **p,
- unsigned int pte,
- unsigned int tmp)
-{
- int small_sequence;
-
- /*
- * A huge PTE describes an area the size of the
- * configured huge page size. This is twice the
- * of the large TLB entry size we intend to use.
- * A TLB entry half the size of the configured
- * huge page size is configured into entrylo0
- * and entrylo1 to cover the contiguous huge PTE
- * address space.
- */
- small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
-
- /* We can clobber tmp. It isn't used after this.*/
- if (!small_sequence)
- uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
-
- build_convert_pte_to_entrylo(p, pte);
- UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
- /* convert to entrylo1 */
- if (small_sequence)
- UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
- else
- UASM_i_ADDU(p, pte, pte, tmp);
-
- UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
-}
-
-static __cpuinit void build_huge_handler_tail(u32 **p,
- struct uasm_reloc **r,
- struct uasm_label **l,
- unsigned int pte,
- unsigned int ptr)
-{
-#ifdef CONFIG_SMP
- UASM_i_SC(p, pte, 0, ptr);
- uasm_il_beqz(p, r, pte, label_tlb_huge_update);
- UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
-#else
- UASM_i_SW(p, pte, 0, ptr);
-#endif
- build_huge_update_entries(p, pte, ptr);
- build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
-#ifdef CONFIG_64BIT
-/*
- * TMP and PTR are scratch.
- * TMP will be clobbered, PTR will hold the pmd entry.
- */
-static void __cpuinit
-build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
- unsigned int tmp, unsigned int ptr)
-{
-#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
- long pgdc = (long)pgd_current;
-#endif
- /*
- * The vmalloc handling is not in the hotpath.
- */
- uasm_i_dmfc0(p, tmp, C0_BADVADDR);
-
- if (check_for_high_segbits) {
- /*
- * The kernel currently implicitely assumes that the
- * MIPS SEGBITS parameter for the processor is
- * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
- * allocate virtual addresses outside the maximum
- * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
- * that doesn't prevent user code from accessing the
- * higher xuseg addresses. Here, we make sure that
- * everything but the lower xuseg addresses goes down
- * the module_alloc/vmalloc path.
- */
- uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
- uasm_il_bnez(p, r, ptr, label_vmalloc);
- } else {
- uasm_il_bltz(p, r, tmp, label_vmalloc);
- }
- /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
-
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- if (pgd_reg != -1) {
- /* pgd is in pgd_reg */
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
- } else {
- /*
- * &pgd << 11 stored in CONTEXT [23..63].
- */
- UASM_i_MFC0(p, ptr, C0_CONTEXT);
-
- /* Clear lower 23 bits of context. */
- uasm_i_dins(p, ptr, 0, 0, 23);
-
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
- uasm_i_drotr(p, ptr, ptr, 11);
- }
-#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC uses TCBind value as "CPU" index
- */
- uasm_i_mfc0(p, ptr, C0_TCBIND);
- uasm_i_dsrl_safe(p, ptr, ptr, 19);
-# else
- /*
- * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
- * stored in CONTEXT.
- */
- uasm_i_dmfc0(p, ptr, C0_CONTEXT);
- uasm_i_dsrl_safe(p, ptr, ptr, 23);
-# endif
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_daddu(p, ptr, ptr, tmp);
- uasm_i_dmfc0(p, tmp, C0_BADVADDR);
- uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
-#else
- UASM_i_LA_mostly(p, ptr, pgdc);
- uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
-#endif
-
- uasm_l_vmalloc_done(l, *p);
-
- /* get pgd offset in bytes */
- uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
-
- uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
- uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
-#ifndef __PAGETABLE_PMD_FOLDED
- uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
- uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
- uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
- uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
- uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
-#endif
-}
-
-/*
- * BVADDR is the faulting address, PTR is scratch.
- * PTR will hold the pgd for vmalloc.
- */
-static void __cpuinit
-build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
- unsigned int bvaddr, unsigned int ptr,
- enum vmalloc64_mode mode)
-{
- long swpd = (long)swapper_pg_dir;
- int single_insn_swpd;
- int did_vmalloc_branch = 0;
-
- single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
-
- uasm_l_vmalloc(l, *p);
-
- if (mode != not_refill && check_for_high_segbits) {
- if (single_insn_swpd) {
- uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
- uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
- did_vmalloc_branch = 1;
- /* fall through */
- } else {
- uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
- }
- }
- if (!did_vmalloc_branch) {
- if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
- uasm_il_b(p, r, label_vmalloc_done);
- uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
- } else {
- UASM_i_LA_mostly(p, ptr, swpd);
- uasm_il_b(p, r, label_vmalloc_done);
- if (uasm_in_compat_space_p(swpd))
- uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
- else
- uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
- }
- }
- if (mode != not_refill && check_for_high_segbits) {
- uasm_l_large_segbits_fault(l, *p);
- /*
- * We get here if we are an xsseg address, or if we are
- * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
- *
- * Ignoring xsseg (assume disabled so would generate
- * (address errors?), the only remaining possibility
- * is the upper xuseg addresses. On processors with
- * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
- * addresses would have taken an address error. We try
- * to mimic that here by taking a load/istream page
- * fault.
- */
- UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
- uasm_i_jr(p, ptr);
-
- if (mode == refill_scratch) {
- if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
- else
- UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- } else {
- uasm_i_nop(p);
- }
- }
-}
-
-#else /* !CONFIG_64BIT */
-
-/*
- * TMP and PTR are scratch.
- * TMP will be clobbered, PTR will hold the pgd entry.
- */
-static void __cpuinit __maybe_unused
-build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
-{
- long pgdc = (long)pgd_current;
-
- /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
-#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC uses TCBind value as "CPU" index
- */
- uasm_i_mfc0(p, ptr, C0_TCBIND);
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_srl(p, ptr, ptr, 19);
-#else
- /*
- * smp_processor_id() << 3 is stored in CONTEXT.
- */
- uasm_i_mfc0(p, ptr, C0_CONTEXT);
- UASM_i_LA_mostly(p, tmp, pgdc);
- uasm_i_srl(p, ptr, ptr, 23);
-#endif
- uasm_i_addu(p, ptr, tmp, ptr);
-#else
- UASM_i_LA_mostly(p, ptr, pgdc);
-#endif
- uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
- uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
- uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
- uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
- uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
-}
-
-#endif /* !CONFIG_64BIT */
-
-static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
-{
- unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
- unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
-
- switch (current_cpu_type()) {
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4181:
- case CPU_VR4181A:
- case CPU_VR4133:
- shift += 2;
- break;
-
- default:
- break;
- }
-
- if (shift)
- UASM_i_SRL(p, ctx, ctx, shift);
- uasm_i_andi(p, ctx, ctx, mask);
-}
-
-static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
-{
- /*
- * Bug workaround for the Nevada. It seems as if under certain
- * circumstances the move from cp0_context might produce a
- * bogus result when the mfc0 instruction and its consumer are
- * in a different cacheline or a load instruction, probably any
- * memory reference, is between them.
- */
- switch (current_cpu_type()) {
- case CPU_NEVADA:
- UASM_i_LW(p, ptr, 0, ptr);
- GET_CONTEXT(p, tmp); /* get context reg */
- break;
-
- default:
- GET_CONTEXT(p, tmp); /* get context reg */
- UASM_i_LW(p, ptr, 0, ptr);
- break;
- }
-
- build_adjust_context(p, tmp);
- UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
-}
-
-static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
- unsigned int ptep)
-{
- /*
- * 64bit address support (36bit on a 32bit CPU) in a 32bit
- * Kernel is a special case. Only a few CPUs use it.
- */
-#ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bits) {
- uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
- uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
- if (kernel_uses_smartmips_rixi) {
- UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
- UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- } else {
- uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
- }
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
- } else {
- int pte_off_even = sizeof(pte_t) / 2;
- int pte_off_odd = pte_off_even + sizeof(pte_t);
-
- /* The pte entries are pre-shifted */
- uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
- }
-#else
- UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
- UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
- if (r45k_bvahwbug())
- build_tlb_probe_entry(p);
- if (kernel_uses_smartmips_rixi) {
- UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
- UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- if (r4k_250MHZhwbug())
- UASM_i_MTC0(p, 0, C0_ENTRYLO0);
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- } else {
- UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
- if (r4k_250MHZhwbug())
- UASM_i_MTC0(p, 0, C0_ENTRYLO0);
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
- if (r45k_bvahwbug())
- uasm_i_mfc0(p, tmp, C0_INDEX);
- }
- if (r4k_250MHZhwbug())
- UASM_i_MTC0(p, 0, C0_ENTRYLO1);
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
-#endif
-}
-
-struct mips_huge_tlb_info {
- int huge_pte;
- int restore_scratch;
-};
-
-static struct mips_huge_tlb_info __cpuinit
-build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
- struct uasm_reloc **r, unsigned int tmp,
- unsigned int ptr, int c0_scratch)
-{
- struct mips_huge_tlb_info rv;
- unsigned int even, odd;
- int vmalloc_branch_delay_filled = 0;
- const int scratch = 1; /* Our extra working register */
-
- rv.huge_pte = scratch;
- rv.restore_scratch = 0;
-
- if (check_for_high_segbits) {
- UASM_i_MFC0(p, tmp, C0_BADVADDR);
-
- if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
- else
- UASM_i_MFC0(p, ptr, C0_CONTEXT);
-
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
- else
- UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
-
- uasm_i_dsrl_safe(p, scratch, tmp,
- PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
- uasm_il_bnez(p, r, scratch, label_vmalloc);
-
- if (pgd_reg == -1) {
- vmalloc_branch_delay_filled = 1;
- /* Clear lower 23 bits of context. */
- uasm_i_dins(p, ptr, 0, 0, 23);
- }
- } else {
- if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
- else
- UASM_i_MFC0(p, ptr, C0_CONTEXT);
-
- UASM_i_MFC0(p, tmp, C0_BADVADDR);
-
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
- else
- UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
-
- if (pgd_reg == -1)
- /* Clear lower 23 bits of context. */
- uasm_i_dins(p, ptr, 0, 0, 23);
-
- uasm_il_bltz(p, r, tmp, label_vmalloc);
- }
-
- if (pgd_reg == -1) {
- vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
- uasm_i_drotr(p, ptr, ptr, 11);
- }
-
-#ifdef __PAGETABLE_PMD_FOLDED
-#define LOC_PTEP scratch
-#else
-#define LOC_PTEP ptr
-#endif
-
- if (!vmalloc_branch_delay_filled)
- /* get pgd offset in bytes */
- uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
-
- uasm_l_vmalloc_done(l, *p);
-
- /*
- * tmp ptr
- * fall-through case = badvaddr *pgd_current
- * vmalloc case = badvaddr swapper_pg_dir
- */
-
- if (vmalloc_branch_delay_filled)
- /* get pgd offset in bytes */
- uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
-
-#ifdef __PAGETABLE_PMD_FOLDED
- GET_CONTEXT(p, tmp); /* get context reg */
-#endif
- uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
-
- if (use_lwx_insns()) {
- UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
- } else {
- uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
- uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
- }
-
-#ifndef __PAGETABLE_PMD_FOLDED
- /* get pmd offset in bytes */
- uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
- uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
- GET_CONTEXT(p, tmp); /* get context reg */
-
- if (use_lwx_insns()) {
- UASM_i_LWX(p, scratch, scratch, ptr);
- } else {
- uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
- UASM_i_LW(p, scratch, 0, ptr);
- }
-#endif
- /* Adjust the context during the load latency. */
- build_adjust_context(p, tmp);
-
-#ifdef CONFIG_HUGETLB_PAGE
- uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
- /*
- * The in the LWX case we don't want to do the load in the
- * delay slot. It cannot issue in the same cycle and may be
- * speculative and unneeded.
- */
- if (use_lwx_insns())
- uasm_i_nop(p);
-#endif /* CONFIG_HUGETLB_PAGE */
-
-
- /* build_update_entries */
- if (use_lwx_insns()) {
- even = ptr;
- odd = tmp;
- UASM_i_LWX(p, even, scratch, tmp);
- UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
- UASM_i_LWX(p, odd, scratch, tmp);
- } else {
- UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
- even = tmp;
- odd = ptr;
- UASM_i_LW(p, even, 0, ptr); /* get even pte */
- UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
- }
- if (kernel_uses_smartmips_rixi) {
- uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
- uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
- uasm_i_drotr(p, even, even,
- ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
- uasm_i_drotr(p, odd, odd,
- ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
- } else {
- uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
- UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
- uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
- }
- UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
-
- if (c0_scratch >= 0) {
- UASM_i_MFC0(p, scratch, 31, c0_scratch);
- build_tlb_write_entry(p, l, r, tlb_random);
- uasm_l_leave(l, *p);
- rv.restore_scratch = 1;
- } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
- build_tlb_write_entry(p, l, r, tlb_random);
- uasm_l_leave(l, *p);
- UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
- } else {
- UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
- build_tlb_write_entry(p, l, r, tlb_random);
- uasm_l_leave(l, *p);
- rv.restore_scratch = 1;
- }
-
- uasm_i_eret(p); /* return from trap */
-
- return rv;
-}
-
-/*
- * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
- * because EXL == 0. If we wrap, we can also use the 32 instruction
- * slots before the XTLB refill exception handler which belong to the
- * unused TLB refill exception.
- */
-#define MIPS64_REFILL_INSNS 32
-
-static void __cpuinit build_r4000_tlb_refill_handler(void)
-{
- u32 *p = tlb_handler;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- u32 *f;
- unsigned int final_len;
- struct mips_huge_tlb_info htlb_info __maybe_unused;
- enum vmalloc64_mode vmalloc_mode __maybe_unused;
-
- memset(tlb_handler, 0, sizeof(tlb_handler));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
- memset(final_handler, 0, sizeof(final_handler));
-
- if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
- htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
- scratch_reg);
- vmalloc_mode = refill_scratch;
- } else {
- htlb_info.huge_pte = K0;
- htlb_info.restore_scratch = 0;
- vmalloc_mode = refill_noscratch;
- /*
- * create the plain linear handler
- */
- if (bcm1250_m3_war()) {
- unsigned int segbits = 44;
-
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
- /* No need for uasm_i_nop */
- }
-
-#ifdef CONFIG_64BIT
- build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
-#else
- build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
-#endif
-
-#ifdef CONFIG_HUGETLB_PAGE
- build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
-#endif
-
- build_get_ptep(&p, K0, K1);
- build_update_entries(&p, K0, K1);
- build_tlb_write_entry(&p, &l, &r, tlb_random);
- uasm_l_leave(&l, p);
- uasm_i_eret(&p); /* return from trap */
- }
-#ifdef CONFIG_HUGETLB_PAGE
- uasm_l_tlb_huge_update(&l, p);
- build_huge_update_entries(&p, htlb_info.huge_pte, K1);
- build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
- htlb_info.restore_scratch);
-#endif
-
-#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
-#endif
-
- /*
- * Overflow check: For the 64bit handler, we need at least one
- * free instruction slot for the wrap-around branch. In worst
- * case, if the intended insertion point is a delay slot, we
- * need three, with the second nop'ed and the third being
- * unused.
- */
- /* Loongson2 ebase is different than r4k, we have more space */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- if ((p - tlb_handler) > 64)
- panic("TLB refill handler space exceeded");
-#else
- if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
- || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
- && uasm_insn_has_bdelay(relocs,
- tlb_handler + MIPS64_REFILL_INSNS - 3)))
- panic("TLB refill handler space exceeded");
-#endif
-
- /*
- * Now fold the handler in the TLB refill handler space.
- */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- f = final_handler;
- /* Simplest case, just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
-#else /* CONFIG_64BIT */
- f = final_handler + MIPS64_REFILL_INSNS;
- if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
- /* Just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
- } else {
-#if defined(CONFIG_HUGETLB_PAGE)
- const enum label_id ls = label_tlb_huge_update;
-#else
- const enum label_id ls = label_vmalloc;
-#endif
- u32 *split;
- int ov = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
- ;
- BUG_ON(i == ARRAY_SIZE(labels));
- split = labels[i].addr;
-
- /*
- * See if we have overflown one way or the other.
- */
- if (split > tlb_handler + MIPS64_REFILL_INSNS ||
- split < p - MIPS64_REFILL_INSNS)
- ov = 1;
-
- if (ov) {
- /*
- * Split two instructions before the end. One
- * for the branch and one for the instruction
- * in the delay slot.
- */
- split = tlb_handler + MIPS64_REFILL_INSNS - 2;
-
- /*
- * If the branch would fall in a delay slot,
- * we must back up an additional instruction
- * so that it is no longer in a delay slot.
- */
- if (uasm_insn_has_bdelay(relocs, split - 1))
- split--;
- }
- /* Copy first part of the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, split, f);
- f += split - tlb_handler;
-
- if (ov) {
- /* Insert branch. */
- uasm_l_split(&l, final_handler);
- uasm_il_b(&f, &r, label_split);
- if (uasm_insn_has_bdelay(relocs, split))
- uasm_i_nop(&f);
- else {
- uasm_copy_handler(relocs, labels,
- split, split + 1, f);
- uasm_move_labels(labels, f, f + 1, -1);
- f++;
- split++;
- }
- }
-
- /* Copy the rest of the handler. */
- uasm_copy_handler(relocs, labels, split, p, final_handler);
- final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
- (p - split);
- }
-#endif /* CONFIG_64BIT */
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB refill handler (%u instructions).\n",
- final_len);
-
- memcpy((void *)ebase, final_handler, 0x100);
-
- dump_handler((u32 *)ebase, 64);
-}
-
-/*
- * 128 instructions for the fastpath handler is generous and should
- * never be exceeded.
- */
-#define FASTPATH_SIZE 128
-
-u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
-u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
-u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
-
-static void __cpuinit build_r4000_setup_pgd(void)
-{
- const int a0 = 4;
- const int a1 = 5;
- u32 *p = tlbmiss_handler_setup_pgd;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
-
- memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- pgd_reg = allocate_kscratch();
-
- if (pgd_reg == -1) {
- /* PGD << 11 in c0_Context */
- /*
- * If it is a ckseg0 address, convert to a physical
- * address. Shifting right by 29 and adding 4 will
- * result in zero for these addresses.
- *
- */
- UASM_i_SRA(&p, a1, a0, 29);
- UASM_i_ADDIU(&p, a1, a1, 4);
- uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
- uasm_i_nop(&p);
- uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
- uasm_l_tlbl_goaround1(&l, p);
- UASM_i_SLL(&p, a0, a0, 11);
- uasm_i_jr(&p, 31);
- UASM_i_MTC0(&p, a0, C0_CONTEXT);
- } else {
- /* PGD in c0_KScratch */
- uasm_i_jr(&p, 31);
- UASM_i_MTC0(&p, a0, 31, pgd_reg);
- }
- if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
- panic("tlbmiss_handler_setup_pgd space exceeded");
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
- (unsigned int)(p - tlbmiss_handler_setup_pgd));
-
- dump_handler(tlbmiss_handler_setup_pgd,
- ARRAY_SIZE(tlbmiss_handler_setup_pgd));
-}
-#endif
-
-static void __cpuinit
-iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
-{
-#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bits)
- uasm_i_lld(p, pte, 0, ptr);
- else
-# endif
- UASM_i_LL(p, pte, 0, ptr);
-#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bits)
- uasm_i_ld(p, pte, 0, ptr);
- else
-# endif
- UASM_i_LW(p, pte, 0, ptr);
-#endif
-}
-
-static void __cpuinit
-iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
- unsigned int mode)
-{
-#ifdef CONFIG_64BIT_PHYS_ADDR
- unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
-#endif
-
- uasm_i_ori(p, pte, pte, mode);
-#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bits)
- uasm_i_scd(p, pte, 0, ptr);
- else
-# endif
- UASM_i_SC(p, pte, 0, ptr);
-
- if (r10000_llsc_war())
- uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
- else
- uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
-
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (!cpu_has_64bits) {
- /* no uasm_i_nop needed */
- uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
- uasm_i_ori(p, pte, pte, hwmode);
- uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
- uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
- /* no uasm_i_nop needed */
- uasm_i_lw(p, pte, 0, ptr);
- } else
- uasm_i_nop(p);
-# else
- uasm_i_nop(p);
-# endif
-#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (cpu_has_64bits)
- uasm_i_sd(p, pte, 0, ptr);
- else
-# endif
- UASM_i_SW(p, pte, 0, ptr);
-
-# ifdef CONFIG_64BIT_PHYS_ADDR
- if (!cpu_has_64bits) {
- uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
- uasm_i_ori(p, pte, pte, hwmode);
- uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
- uasm_i_lw(p, pte, 0, ptr);
- }
-# endif
-#endif
-}
-
-/*
- * Check if PTE is present, if not then jump to LABEL. PTR points to
- * the page table where this PTE is located, PTE will be re-loaded
- * with it's original value.
- */
-static void __cpuinit
-build_pte_present(u32 **p, struct uasm_reloc **r,
- int pte, int ptr, int scratch, enum label_id lid)
-{
- int t = scratch >= 0 ? scratch : pte;
-
- if (kernel_uses_smartmips_rixi) {
- if (use_bbit_insns()) {
- uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
- uasm_i_nop(p);
- } else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT);
- uasm_il_beqz(p, r, t, lid);
- if (pte == t)
- /* You lose the SMP race :-(*/
- iPTE_LW(p, pte, ptr);
- }
- } else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
- uasm_il_bnez(p, r, t, lid);
- if (pte == t)
- /* You lose the SMP race :-(*/
- iPTE_LW(p, pte, ptr);
- }
-}
-
-/* Make PTE valid, store result in PTR. */
-static void __cpuinit
-build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
- unsigned int ptr)
-{
- unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
-
- iPTE_SW(p, r, pte, ptr, mode);
-}
-
-/*
- * Check if PTE can be written to, if not branch to LABEL. Regardless
- * restore PTE with value from PTR when done.
- */
-static void __cpuinit
-build_pte_writable(u32 **p, struct uasm_reloc **r,
- unsigned int pte, unsigned int ptr, int scratch,
- enum label_id lid)
-{
- int t = scratch >= 0 ? scratch : pte;
-
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_il_bnez(p, r, t, lid);
- if (pte == t)
- /* You lose the SMP race :-(*/
- iPTE_LW(p, pte, ptr);
- else
- uasm_i_nop(p);
-}
-
-/* Make PTE writable, update software status bits as well, then store
- * at PTR.
- */
-static void __cpuinit
-build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
- unsigned int ptr)
-{
- unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
- | _PAGE_DIRTY);
-
- iPTE_SW(p, r, pte, ptr, mode);
-}
-
-/*
- * Check if PTE can be modified, if not branch to LABEL. Regardless
- * restore PTE with value from PTR when done.
- */
-static void __cpuinit
-build_pte_modifiable(u32 **p, struct uasm_reloc **r,
- unsigned int pte, unsigned int ptr, int scratch,
- enum label_id lid)
-{
- if (use_bbit_insns()) {
- uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
- uasm_i_nop(p);
- } else {
- int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_WRITE);
- uasm_il_beqz(p, r, t, lid);
- if (pte == t)
- /* You lose the SMP race :-(*/
- iPTE_LW(p, pte, ptr);
- }
-}
-
-#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-
-
-/*
- * R3000 style TLB load/store/modify handlers.
- */
-
-/*
- * This places the pte into ENTRYLO0 and writes it with tlbwi.
- * Then it returns.
- */
-static void __cpuinit
-build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
-{
- uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
- uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
- uasm_i_tlbwi(p);
- uasm_i_jr(p, tmp);
- uasm_i_rfe(p); /* branch delay */
-}
-
-/*
- * This places the pte into ENTRYLO0 and writes it with tlbwi
- * or tlbwr as appropriate. This is because the index register
- * may have the probe fail bit set as a result of a trap on a
- * kseg2 access, i.e. without refill. Then it returns.
- */
-static void __cpuinit
-build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r, unsigned int pte,
- unsigned int tmp)
-{
- uasm_i_mfc0(p, tmp, C0_INDEX);
- uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
- uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
- uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
- uasm_i_tlbwi(p); /* cp0 delay */
- uasm_i_jr(p, tmp);
- uasm_i_rfe(p); /* branch delay */
- uasm_l_r3000_write_probe_fail(l, *p);
- uasm_i_tlbwr(p); /* cp0 delay */
- uasm_i_jr(p, tmp);
- uasm_i_rfe(p); /* branch delay */
-}
-
-static void __cpuinit
-build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
- unsigned int ptr)
-{
- long pgdc = (long)pgd_current;
-
- uasm_i_mfc0(p, pte, C0_BADVADDR);
- uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
- uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
- uasm_i_srl(p, pte, pte, 22); /* load delay */
- uasm_i_sll(p, pte, pte, 2);
- uasm_i_addu(p, ptr, ptr, pte);
- uasm_i_mfc0(p, pte, C0_CONTEXT);
- uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
- uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
- uasm_i_addu(p, ptr, ptr, pte);
- uasm_i_lw(p, pte, 0, ptr);
- uasm_i_tlbp(p); /* load delay */
-}
-
-static void __cpuinit build_r3000_tlb_load_handler(void)
-{
- u32 *p = handle_tlbl;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
-
- memset(handle_tlbl, 0, sizeof(handle_tlbl));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
- uasm_i_nop(&p); /* load delay */
- build_make_valid(&p, &r, K0, K1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
-
- uasm_l_nopage_tlbl(&l, p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbl) > FASTPATH_SIZE)
- panic("TLB load handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbl));
-
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
-}
-
-static void __cpuinit build_r3000_tlb_store_handler(void)
-{
- u32 *p = handle_tlbs;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
-
- memset(handle_tlbs, 0, sizeof(handle_tlbs));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
- uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
-
- uasm_l_nopage_tlbs(&l, p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbs) > FASTPATH_SIZE)
- panic("TLB store handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbs));
-
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
-}
-
-static void __cpuinit build_r3000_tlb_modify_handler(void)
-{
- u32 *p = handle_tlbm;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
-
- memset(handle_tlbm, 0, sizeof(handle_tlbm));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
- uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1);
- build_r3000_pte_reload_tlbwi(&p, K0, K1);
-
- uasm_l_nopage_tlbm(&l, p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbm) > FASTPATH_SIZE)
- panic("TLB modify handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbm));
-
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
-}
-#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
-
-/*
- * R4000 style TLB load/store/modify handlers.
- */
-static struct work_registers __cpuinit
-build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r)
-{
- struct work_registers wr = build_get_work_registers(p);
-
-#ifdef CONFIG_64BIT
- build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
-#else
- build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
-#endif
-
-#ifdef CONFIG_HUGETLB_PAGE
- /*
- * For huge tlb entries, pmd doesn't contain an address but
- * instead contains the tlb pte. Check the PAGE_HUGE bit and
- * see if we need to jump to huge tlb processing.
- */
- build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
-#endif
-
- UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
- UASM_i_LW(p, wr.r2, 0, wr.r2);
- UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
- uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
- UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
-
-#ifdef CONFIG_SMP
- uasm_l_smp_pgtable_change(l, *p);
-#endif
- iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
- if (!m4kc_tlbp_war())
- build_tlb_probe_entry(p);
- return wr;
-}
-
-static void __cpuinit
-build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r, unsigned int tmp,
- unsigned int ptr)
-{
- uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
- uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
- build_update_entries(p, tmp, ptr);
- build_tlb_write_entry(p, l, r, tlb_indexed);
- uasm_l_leave(l, *p);
- build_restore_work_registers(p);
- uasm_i_eret(p); /* return from trap */
-
-#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
-#endif
-}
-
-static void __cpuinit build_r4000_tlb_load_handler(void)
-{
- u32 *p = handle_tlbl;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- struct work_registers wr;
-
- memset(handle_tlbl, 0, sizeof(handle_tlbl));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- if (bcm1250_m3_war()) {
- unsigned int segbits = 44;
-
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
- /* No need for uasm_i_nop */
- }
-
- wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
- build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
- if (m4kc_tlbp_war())
- build_tlb_probe_entry(&p);
-
- if (kernel_uses_smartmips_rixi) {
- /*
- * If the page is not _PAGE_VALID, RI or XI could not
- * have triggered it. Skip the expensive test..
- */
- if (use_bbit_insns()) {
- uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
- label_tlbl_goaround1);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
- uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
- }
- uasm_i_nop(&p);
-
- uasm_i_tlbr(&p);
- /* Examine entrylo 0 or 1 based on ptr. */
- if (use_bbit_insns()) {
- uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
- uasm_i_beqz(&p, wr.r3, 8);
- }
- /* load it in the delay slot*/
- UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
- /* load it if ptr is odd */
- UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
- /*
- * If the entryLo (now in wr.r3) is valid (bit 1), RI or
- * XI must have triggered it.
- */
- if (use_bbit_insns()) {
- uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
- uasm_i_nop(&p);
- uasm_l_tlbl_goaround1(&l, p);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r3, 2);
- uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
- uasm_i_nop(&p);
- }
- uasm_l_tlbl_goaround1(&l, p);
- }
- build_make_valid(&p, &r, wr.r1, wr.r2);
- build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-
-#ifdef CONFIG_HUGETLB_PAGE
- /*
- * This is the entry point when build_r4000_tlbchange_handler_head
- * spots a huge page.
- */
- uasm_l_tlb_huge_update(&l, p);
- iPTE_LW(&p, wr.r1, wr.r2);
- build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
- build_tlb_probe_entry(&p);
-
- if (kernel_uses_smartmips_rixi) {
- /*
- * If the page is not _PAGE_VALID, RI or XI could not
- * have triggered it. Skip the expensive test..
- */
- if (use_bbit_insns()) {
- uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
- label_tlbl_goaround2);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
- uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
- }
- uasm_i_nop(&p);
-
- uasm_i_tlbr(&p);
- /* Examine entrylo 0 or 1 based on ptr. */
- if (use_bbit_insns()) {
- uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
- uasm_i_beqz(&p, wr.r3, 8);
- }
- /* load it in the delay slot*/
- UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
- /* load it if ptr is odd */
- UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
- /*
- * If the entryLo (now in wr.r3) is valid (bit 1), RI or
- * XI must have triggered it.
- */
- if (use_bbit_insns()) {
- uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
- } else {
- uasm_i_andi(&p, wr.r3, wr.r3, 2);
- uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
- }
- if (PM_DEFAULT_MASK == 0)
- uasm_i_nop(&p);
- /*
- * We clobbered C0_PAGEMASK, restore it. On the other branch
- * it is restored in build_huge_tlb_write_entry.
- */
- build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
-
- uasm_l_tlbl_goaround2(&l, p);
- }
- uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
-#endif
-
- uasm_l_nopage_tlbl(&l, p);
- build_restore_work_registers(&p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbl) > FASTPATH_SIZE)
- panic("TLB load handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbl));
-
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
-}
-
-static void __cpuinit build_r4000_tlb_store_handler(void)
-{
- u32 *p = handle_tlbs;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- struct work_registers wr;
-
- memset(handle_tlbs, 0, sizeof(handle_tlbs));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
- build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
- if (m4kc_tlbp_war())
- build_tlb_probe_entry(&p);
- build_make_write(&p, &r, wr.r1, wr.r2);
- build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-
-#ifdef CONFIG_HUGETLB_PAGE
- /*
- * This is the entry point when
- * build_r4000_tlbchange_handler_head spots a huge page.
- */
- uasm_l_tlb_huge_update(&l, p);
- iPTE_LW(&p, wr.r1, wr.r2);
- build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
- build_tlb_probe_entry(&p);
- uasm_i_ori(&p, wr.r1, wr.r1,
- _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
-#endif
-
- uasm_l_nopage_tlbs(&l, p);
- build_restore_work_registers(&p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbs) > FASTPATH_SIZE)
- panic("TLB store handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbs));
-
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
-}
-
-static void __cpuinit build_r4000_tlb_modify_handler(void)
-{
- u32 *p = handle_tlbm;
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
- struct work_registers wr;
-
- memset(handle_tlbm, 0, sizeof(handle_tlbm));
- memset(labels, 0, sizeof(labels));
- memset(relocs, 0, sizeof(relocs));
-
- wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
- build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
- if (m4kc_tlbp_war())
- build_tlb_probe_entry(&p);
- /* Present and writable bits set, set accessed and dirty bits. */
- build_make_write(&p, &r, wr.r1, wr.r2);
- build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-
-#ifdef CONFIG_HUGETLB_PAGE
- /*
- * This is the entry point when
- * build_r4000_tlbchange_handler_head spots a huge page.
- */
- uasm_l_tlb_huge_update(&l, p);
- iPTE_LW(&p, wr.r1, wr.r2);
- build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
- build_tlb_probe_entry(&p);
- uasm_i_ori(&p, wr.r1, wr.r1,
- _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
-#endif
-
- uasm_l_nopage_tlbm(&l, p);
- build_restore_work_registers(&p);
- uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
- uasm_i_nop(&p);
-
- if ((p - handle_tlbm) > FASTPATH_SIZE)
- panic("TLB modify handler fastpath space exceeded");
-
- uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbm));
-
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
-}
-
-void __cpuinit build_tlb_refill_handler(void)
-{
- /*
- * The refill handler is generated per-CPU, multi-node systems
- * may have local storage for it. The other handlers are only
- * needed once.
- */
- static int run_once = 0;
-
-#ifdef CONFIG_64BIT
- check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
-#endif
-
- switch (current_cpu_type()) {
- case CPU_R2000:
- case CPU_R3000:
- case CPU_R3000A:
- case CPU_R3081E:
- case CPU_TX3912:
- case CPU_TX3922:
- case CPU_TX3927:
-#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
- build_r3000_tlb_refill_handler();
- if (!run_once) {
- build_r3000_tlb_load_handler();
- build_r3000_tlb_store_handler();
- build_r3000_tlb_modify_handler();
- run_once++;
- }
-#else
- panic("No R3000 TLB refill handler");
-#endif
- break;
-
- case CPU_R6000:
- case CPU_R6000A:
- panic("No R6000 TLB refill handler yet");
- break;
-
- case CPU_R8000:
- panic("No R8000 TLB refill handler yet");
- break;
-
- default:
- if (!run_once) {
- scratch_reg = allocate_kscratch();
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- build_r4000_setup_pgd();
-#endif
- build_r4000_tlb_load_handler();
- build_r4000_tlb_store_handler();
- build_r4000_tlb_modify_handler();
- run_once++;
- }
- build_r4000_tlb_refill_handler();
- }
-}
-
-void __cpuinit flush_tlb_handlers(void)
-{
- local_flush_icache_range((unsigned long)handle_tlbl,
- (unsigned long)handle_tlbl + sizeof(handle_tlbl));
- local_flush_icache_range((unsigned long)handle_tlbs,
- (unsigned long)handle_tlbs + sizeof(handle_tlbs));
- local_flush_icache_range((unsigned long)handle_tlbm,
- (unsigned long)handle_tlbm + sizeof(handle_tlbm));
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
- (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
-#endif
-}
diff --git a/ANDROID_3.4.5/arch/mips/mm/uasm.c b/ANDROID_3.4.5/arch/mips/mm/uasm.c
deleted file mode 100644
index 5fa18515..00000000
--- a/ANDROID_3.4.5/arch/mips/mm/uasm.c
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * A small micro-assembler. It is intentionally kept simple, does only
- * support a subset of instructions, and does not try to hide pipeline
- * effects like branch delay slots.
- *
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007 Maciej W. Rozycki
- * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
-enum fields {
- RS = 0x001,
- RT = 0x002,
- RD = 0x004,
- RE = 0x008,
- SIMM = 0x010,
- UIMM = 0x020,
- BIMM = 0x040,
- JIMM = 0x080,
- FUNC = 0x100,
- SET = 0x200,
- SCIMM = 0x400
-};
-
-#define OP_MASK 0x3f
-#define OP_SH 26
-#define RS_MASK 0x1f
-#define RS_SH 21
-#define RT_MASK 0x1f
-#define RT_SH 16
-#define RD_MASK 0x1f
-#define RD_SH 11
-#define RE_MASK 0x1f
-#define RE_SH 6
-#define IMM_MASK 0xffff
-#define IMM_SH 0
-#define JIMM_MASK 0x3ffffff
-#define JIMM_SH 0
-#define FUNC_MASK 0x3f
-#define FUNC_SH 0
-#define SET_MASK 0x7
-#define SET_SH 0
-#define SCIMM_MASK 0xfffff
-#define SCIMM_SH 6
-
-enum opcode {
- insn_invalid,
- insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
- insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
- insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
- insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret,
- insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld,
- insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori,
- insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
- insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
- insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
- insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1,
- insn_lwx, insn_ldx
-};
-
-struct insn {
- enum opcode opcode;
- u32 match;
- enum fields fields;
-};
-
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f) \
- ((a) << OP_SH \
- | (b) << RS_SH \
- | (c) << RT_SH \
- | (d) << RD_SH \
- | (e) << RE_SH \
- | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
- { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
- { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
- { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
- { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
- { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
- { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
- { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
- { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
- { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
- { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
- { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
- { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
- { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
- { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
- { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
- { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
- { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
- { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
- { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
- { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
- { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
- { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
- { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
- { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
- { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
- { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
- { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_invalid, 0, 0 }
-};
-
-#undef M
-
-static inline __uasminit u32 build_rs(u32 arg)
-{
- WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg & RS_MASK) << RS_SH;
-}
-
-static inline __uasminit u32 build_rt(u32 arg)
-{
- WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg & RT_MASK) << RT_SH;
-}
-
-static inline __uasminit u32 build_rd(u32 arg)
-{
- WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg & RD_MASK) << RD_SH;
-}
-
-static inline __uasminit u32 build_re(u32 arg)
-{
- WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg & RE_MASK) << RE_SH;
-}
-
-static inline __uasminit u32 build_simm(s32 arg)
-{
- WARN(arg > 0x7fff || arg < -0x8000,
- KERN_WARNING "Micro-assembler field overflow\n");
-
- return arg & 0xffff;
-}
-
-static inline __uasminit u32 build_uimm(u32 arg)
-{
- WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return arg & IMM_MASK;
-}
-
-static inline __uasminit u32 build_bimm(s32 arg)
-{
- WARN(arg > 0x1ffff || arg < -0x20000,
- KERN_WARNING "Micro-assembler field overflow\n");
-
- WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
- return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
- WARN(arg & ~(JIMM_MASK << 2),
- KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg >> 2) & JIMM_MASK;
-}
-
-static inline __uasminit u32 build_scimm(u32 arg)
-{
- WARN(arg & ~SCIMM_MASK,
- KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg & SCIMM_MASK) << SCIMM_SH;
-}
-
-static inline __uasminit u32 build_func(u32 arg)
-{
- WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return arg & FUNC_MASK;
-}
-
-static inline __uasminit u32 build_set(u32 arg)
-{
- WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
-
- return arg & SET_MASK;
-}
-
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
- struct insn *ip = NULL;
- unsigned int i;
- va_list ap;
- u32 op;
-
- for (i = 0; insn_table[i].opcode != insn_invalid; i++)
- if (insn_table[i].opcode == opc) {
- ip = &insn_table[i];
- break;
- }
-
- if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
- panic("Unsupported Micro-assembler instruction %d", opc);
-
- op = ip->match;
- va_start(ap, opc);
- if (ip->fields & RS)
- op |= build_rs(va_arg(ap, u32));
- if (ip->fields & RT)
- op |= build_rt(va_arg(ap, u32));
- if (ip->fields & RD)
- op |= build_rd(va_arg(ap, u32));
- if (ip->fields & RE)
- op |= build_re(va_arg(ap, u32));
- if (ip->fields & SIMM)
- op |= build_simm(va_arg(ap, s32));
- if (ip->fields & UIMM)
- op |= build_uimm(va_arg(ap, u32));
- if (ip->fields & BIMM)
- op |= build_bimm(va_arg(ap, s32));
- if (ip->fields & JIMM)
- op |= build_jimm(va_arg(ap, u32));
- if (ip->fields & FUNC)
- op |= build_func(va_arg(ap, u32));
- if (ip->fields & SET)
- op |= build_set(va_arg(ap, u32));
- if (ip->fields & SCIMM)
- op |= build_scimm(va_arg(ap, u32));
- va_end(ap);
-
- **buf = op;
- (*buf)++;
-}
-
-#define I_u1u2u3(op) \
-Ip_u1u2u3(op) \
-{ \
- build_insn(buf, insn##op, a, b, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u2u1u3(op) \
-Ip_u2u1u3(op) \
-{ \
- build_insn(buf, insn##op, b, a, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u3u1u2(op) \
-Ip_u3u1u2(op) \
-{ \
- build_insn(buf, insn##op, b, c, a); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u1u2s3(op) \
-Ip_u1u2s3(op) \
-{ \
- build_insn(buf, insn##op, a, b, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u2s3u1(op) \
-Ip_u2s3u1(op) \
-{ \
- build_insn(buf, insn##op, c, a, b); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u2u1s3(op) \
-Ip_u2u1s3(op) \
-{ \
- build_insn(buf, insn##op, b, a, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u2u1msbu3(op) \
-Ip_u2u1msbu3(op) \
-{ \
- build_insn(buf, insn##op, b, a, c+d-1, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u2u1msb32u3(op) \
-Ip_u2u1msbu3(op) \
-{ \
- build_insn(buf, insn##op, b, a, c+d-33, c); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u1u2(op) \
-Ip_u1u2(op) \
-{ \
- build_insn(buf, insn##op, a, b); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u1s2(op) \
-Ip_u1s2(op) \
-{ \
- build_insn(buf, insn##op, a, b); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_u1(op) \
-Ip_u1(op) \
-{ \
- build_insn(buf, insn##op, a); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-#define I_0(op) \
-Ip_0(op) \
-{ \
- build_insn(buf, insn##op); \
-} \
-UASM_EXPORT_SYMBOL(uasm_i##op);
-
-I_u2u1s3(_addiu)
-I_u3u1u2(_addu)
-I_u2u1u3(_andi)
-I_u3u1u2(_and)
-I_u1u2s3(_beq)
-I_u1u2s3(_beql)
-I_u1s2(_bgez)
-I_u1s2(_bgezl)
-I_u1s2(_bltz)
-I_u1s2(_bltzl)
-I_u1u2s3(_bne)
-I_u2s3u1(_cache)
-I_u1u2u3(_dmfc0)
-I_u1u2u3(_dmtc0)
-I_u2u1s3(_daddiu)
-I_u3u1u2(_daddu)
-I_u2u1u3(_dsll)
-I_u2u1u3(_dsll32)
-I_u2u1u3(_dsra)
-I_u2u1u3(_dsrl)
-I_u2u1u3(_dsrl32)
-I_u2u1u3(_drotr)
-I_u2u1u3(_drotr32)
-I_u3u1u2(_dsubu)
-I_0(_eret)
-I_u1(_j)
-I_u1(_jal)
-I_u1(_jr)
-I_u2s3u1(_ld)
-I_u2s3u1(_ll)
-I_u2s3u1(_lld)
-I_u1s2(_lui)
-I_u2s3u1(_lw)
-I_u1u2u3(_mfc0)
-I_u1u2u3(_mtc0)
-I_u2u1u3(_ori)
-I_u3u1u2(_or)
-I_0(_rfe)
-I_u2s3u1(_sc)
-I_u2s3u1(_scd)
-I_u2s3u1(_sd)
-I_u2u1u3(_sll)
-I_u2u1u3(_sra)
-I_u2u1u3(_srl)
-I_u2u1u3(_rotr)
-I_u3u1u2(_subu)
-I_u2s3u1(_sw)
-I_0(_tlbp)
-I_0(_tlbr)
-I_0(_tlbwi)
-I_0(_tlbwr)
-I_u3u1u2(_xor)
-I_u2u1u3(_xori)
-I_u2u1msbu3(_dins);
-I_u2u1msb32u3(_dinsm);
-I_u1(_syscall);
-I_u1u2s3(_bbit0);
-I_u1u2s3(_bbit1);
-I_u3u1u2(_lwx)
-I_u3u1u2(_ldx)
-
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
- unsigned int c)
-{
- if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
- /*
- * As per erratum Core-14449, replace prefetches 0-4,
- * 6-24 with 'pref 28'.
- */
- build_insn(buf, insn_pref, c, 28, b);
- else
- build_insn(buf, insn_pref, c, a, b);
-}
-UASM_EXPORT_SYMBOL(uasm_i_pref);
-#else
-I_u2s3u1(_pref)
-#endif
-
-/* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
-{
- (*lab)->addr = addr;
- (*lab)->lab = lid;
- (*lab)++;
-}
-UASM_EXPORT_SYMBOL(uasm_build_label);
-
-int __uasminit uasm_in_compat_space_p(long addr)
-{
- /* Is this address in 32bit compat space? */
-#ifdef CONFIG_64BIT
- return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
-#else
- return 1;
-#endif
-}
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
-
-static int __uasminit uasm_rel_highest(long val)
-{
-#ifdef CONFIG_64BIT
- return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
-#else
- return 0;
-#endif
-}
-
-static int __uasminit uasm_rel_higher(long val)
-{
-#ifdef CONFIG_64BIT
- return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
-#else
- return 0;
-#endif
-}
-
-int __uasminit uasm_rel_hi(long val)
-{
- return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
-}
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
-
-int __uasminit uasm_rel_lo(long val)
-{
- return ((val & 0xffff) ^ 0x8000) - 0x8000;
-}
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
-
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
-{
- if (!uasm_in_compat_space_p(addr)) {
- uasm_i_lui(buf, rs, uasm_rel_highest(addr));
- if (uasm_rel_higher(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
- if (uasm_rel_hi(addr)) {
- uasm_i_dsll(buf, rs, rs, 16);
- uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
- uasm_i_dsll(buf, rs, rs, 16);
- } else
- uasm_i_dsll32(buf, rs, rs, 0);
- } else
- uasm_i_lui(buf, rs, uasm_rel_hi(addr));
-}
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
-
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
-{
- UASM_i_LA_mostly(buf, rs, addr);
- if (uasm_rel_lo(addr)) {
- if (!uasm_in_compat_space_p(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
- else
- uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
- }
-}
-UASM_EXPORT_SYMBOL(UASM_i_LA);
-
-/* Handle relocations. */
-void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
-{
- (*rel)->addr = addr;
- (*rel)->type = R_MIPS_PC16;
- (*rel)->lab = lid;
- (*rel)++;
-}
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
-
-static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
- long laddr = (long)lab->addr;
- long raddr = (long)rel->addr;
-
- switch (rel->type) {
- case R_MIPS_PC16:
- *rel->addr |= build_bimm(laddr - (raddr + 4));
- break;
-
- default:
- panic("Unsupported Micro-assembler relocation %d",
- rel->type);
- }
-}
-
-void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
- struct uasm_label *l;
-
- for (; rel->lab != UASM_LABEL_INVALID; rel++)
- for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
- if (rel->lab == l->lab)
- __resolve_relocs(rel, l);
-}
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
-
-void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
-{
- for (; rel->lab != UASM_LABEL_INVALID; rel++)
- if (rel->addr >= first && rel->addr < end)
- rel->addr += off;
-}
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
-
-void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
-{
- for (; lab->lab != UASM_LABEL_INVALID; lab++)
- if (lab->addr >= first && lab->addr < end)
- lab->addr += off;
-}
-UASM_EXPORT_SYMBOL(uasm_move_labels);
-
-void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
- u32 *end, u32 *target)
-{
- long off = (long)(target - first);
-
- memcpy(target, first, (end - first) * sizeof(u32));
-
- uasm_move_relocs(rel, first, end, off);
- uasm_move_labels(lab, first, end, off);
-}
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
-
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
-{
- for (; rel->lab != UASM_LABEL_INVALID; rel++) {
- if (rel->addr == addr
- && (rel->type == R_MIPS_PC16
- || rel->type == R_MIPS_26))
- return 1;
- }
-
- return 0;
-}
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
-
-/* Convenience functions for labeled branches. */
-void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bltz(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
-
-void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_b(p, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_b);
-
-void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqz(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
-
-void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqzl(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
-
-void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
- unsigned int reg2, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bne(p, reg1, reg2, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bne);
-
-void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bnez(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
-
-void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgezl(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
-
-void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgez(p, reg, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
-
-void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit0(p, reg, bit, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
-
-void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid)
-{
- uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit1(p, reg, bit, 0);
-}
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);