summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/arch/parisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/arch/parisc/mm')
-rw-r--r--ANDROID_3.4.5/arch/parisc/mm/Makefile5
-rw-r--r--ANDROID_3.4.5/arch/parisc/mm/fault.c270
-rw-r--r--ANDROID_3.4.5/arch/parisc/mm/init.c1109
-rw-r--r--ANDROID_3.4.5/arch/parisc/mm/ioremap.c99
4 files changed, 0 insertions, 1483 deletions
diff --git a/ANDROID_3.4.5/arch/parisc/mm/Makefile b/ANDROID_3.4.5/arch/parisc/mm/Makefile
deleted file mode 100644
index 758ceefb..00000000
--- a/ANDROID_3.4.5/arch/parisc/mm/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/parisc/mm
-#
-
-obj-y := init.o fault.o ioremap.o
diff --git a/ANDROID_3.4.5/arch/parisc/mm/fault.c b/ANDROID_3.4.5/arch/parisc/mm/fault.c
deleted file mode 100644
index 18162ce4..00000000
--- a/ANDROID_3.4.5/arch/parisc/mm/fault.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- *
- * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
- * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
- * Copyright 1999 Hewlett Packard Co.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/ptrace.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
- /* dumped to the console via printk) */
-
-
-/* Various important other fields */
-#define bit22set(x) (x & 0x00000200)
-#define bits23_25set(x) (x & 0x000001c0)
-#define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
- /* extended opcode is 0x6a */
-
-#define BITSSET 0x1c0 /* for identifying LDCW */
-
-
-DEFINE_PER_CPU(struct exception_data, exception_data);
-
-/*
- * parisc_acctyp(unsigned int inst) --
- * Given a PA-RISC memory access instruction, determine if the
- * the instruction would perform a memory read or memory write
- * operation.
- *
- * This function assumes that the given instruction is a memory access
- * instruction (i.e. you should really only call it if you know that
- * the instruction has generated some sort of a memory access fault).
- *
- * Returns:
- * VM_READ if read operation
- * VM_WRITE if write operation
- * VM_EXEC if execute operation
- */
-static unsigned long
-parisc_acctyp(unsigned long code, unsigned int inst)
-{
- if (code == 6 || code == 16)
- return VM_EXEC;
-
- switch (inst & 0xf0000000) {
- case 0x40000000: /* load */
- case 0x50000000: /* new load */
- return VM_READ;
-
- case 0x60000000: /* store */
- case 0x70000000: /* new store */
- return VM_WRITE;
-
- case 0x20000000: /* coproc */
- case 0x30000000: /* coproc2 */
- if (bit22set(inst))
- return VM_WRITE;
-
- case 0x0: /* indexed/memory management */
- if (bit22set(inst)) {
- /*
- * Check for the 'Graphics Flush Read' instruction.
- * It resembles an FDC instruction, except for bits
- * 20 and 21. Any combination other than zero will
- * utilize the block mover functionality on some
- * older PA-RISC platforms. The case where a block
- * move is performed from VM to graphics IO space
- * should be treated as a READ.
- *
- * The significance of bits 20,21 in the FDC
- * instruction is:
- *
- * 00 Flush data cache (normal instruction behavior)
- * 01 Graphics flush write (IO space -> VM)
- * 10 Graphics flush read (VM -> IO space)
- * 11 Graphics flush read/write (VM <-> IO space)
- */
- if (isGraphicsFlushRead(inst))
- return VM_READ;
- return VM_WRITE;
- } else {
- /*
- * Check for LDCWX and LDCWS (semaphore instructions).
- * If bits 23 through 25 are all 1's it is one of
- * the above two instructions and is a write.
- *
- * Note: With the limited bits we are looking at,
- * this will also catch PROBEW and PROBEWI. However,
- * these should never get in here because they don't
- * generate exceptions of the type:
- * Data TLB miss fault/data page fault
- * Data memory protection trap
- */
- if (bits23_25set(inst) == BITSSET)
- return VM_WRITE;
- }
- return VM_READ; /* Default */
- }
- return VM_READ; /* Default */
-}
-
-#undef bit22set
-#undef bits23_25set
-#undef isGraphicsFlushRead
-#undef BITSSET
-
-
-#if 0
-/* This is the treewalk to find a vma which is the highest that has
- * a start < addr. We're using find_vma_prev instead right now, but
- * we might want to use this at some point in the future. Probably
- * not, but I want it committed to CVS so I don't lose it :-)
- */
- while (tree != vm_avl_empty) {
- if (tree->vm_start > addr) {
- tree = tree->vm_avl_left;
- } else {
- prev = tree;
- if (prev->vm_next == NULL)
- break;
- if (prev->vm_next->vm_start > addr)
- break;
- tree = tree->vm_avl_right;
- }
- }
-#endif
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fix;
-
- fix = search_exception_tables(regs->iaoq[0]);
- if (fix) {
- struct exception_data *d;
- d = &__get_cpu_var(exception_data);
- d->fault_ip = regs->iaoq[0];
- d->fault_space = regs->isr;
- d->fault_addr = regs->ior;
-
- regs->iaoq[0] = ((fix->fixup) & ~3);
- /*
- * NOTE: In some cases the faulting instruction
- * may be in the delay slot of a branch. We
- * don't want to take the branch, so we don't
- * increment iaoq[1], instead we set it to be
- * iaoq[0]+4, and clear the B bit in the PSW
- */
- regs->iaoq[1] = regs->iaoq[0] + 4;
- regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
-
- return 1;
- }
-
- return 0;
-}
-
-void do_page_fault(struct pt_regs *regs, unsigned long code,
- unsigned long address)
-{
- struct vm_area_struct *vma, *prev_vma;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- unsigned long acc_type;
- int fault;
-
- if (in_atomic() || !mm)
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma_prev(mm, address, &prev_vma);
- if (!vma || address < vma->vm_start)
- goto check_expansion;
-/*
- * Ok, we have a good vm_area for this memory access. We still need to
- * check the access permissions.
- */
-
-good_area:
-
- acc_type = parisc_acctyp(code,regs->iir);
-
- if ((vma->vm_flags & acc_type) != acc_type)
- goto bad_area;
-
- /*
- * If for any reason at all we couldn't handle the fault, make
- * sure we exit gracefully rather than endlessly redo the
- * fault.
- */
-
- fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- /*
- * We hit a shared mapping outside of the file, or some
- * other thing happened to us that made us unable to
- * handle the page fault gracefully.
- */
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto bad_area;
- BUG();
- }
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
- up_read(&mm->mmap_sem);
- return;
-
-check_expansion:
- vma = prev_vma;
- if (vma && (expand_stack(vma, address) == 0))
- goto good_area;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
- if (user_mode(regs)) {
- struct siginfo si;
-
-#ifdef PRINT_USER_FAULTS
- printk(KERN_DEBUG "\n");
- printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
- task_pid_nr(tsk), tsk->comm, code, address);
- if (vma) {
- printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
- vma->vm_start, vma->vm_end);
- }
- show_regs(regs);
-#endif
- /* FIXME: actually we need to get the signo and code correct */
- si.si_signo = SIGSEGV;
- si.si_errno = 0;
- si.si_code = SEGV_MAPERR;
- si.si_addr = (void __user *) address;
- force_sig_info(SIGSEGV, &si, current);
- return;
- }
-
-no_context:
-
- if (!user_mode(regs) && fixup_exception(regs)) {
- return;
- }
-
- parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
-
- out_of_memory:
- up_read(&mm->mmap_sem);
- if (!user_mode(regs))
- goto no_context;
- pagefault_out_of_memory();
-}
diff --git a/ANDROID_3.4.5/arch/parisc/mm/init.c b/ANDROID_3.4.5/arch/parisc/mm/init.c
deleted file mode 100644
index 82f364e2..00000000
--- a/ANDROID_3.4.5/arch/parisc/mm/init.c
+++ /dev/null
@@ -1,1109 +0,0 @@
-/*
- * linux/arch/parisc/mm/init.c
- *
- * Copyright (C) 1995 Linus Torvalds
- * Copyright 1999 SuSE GmbH
- * changed by Philipp Rumpf
- * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
- * Copyright 2004 Randolph Chung (tausq@debian.org)
- * Copyright 2006-2007 Helge Deller (deller@gmx.de)
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
-#include <linux/initrd.h>
-#include <linux/swap.h>
-#include <linux/unistd.h>
-#include <linux/nodemask.h> /* for node_online_map */
-#include <linux/pagemap.h> /* for release_pages and page_cache_release */
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
-#include <asm/pdc_chassis.h>
-#include <asm/mmzone.h>
-#include <asm/sections.h>
-
-extern int data_start;
-
-#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
-#endif
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
-};
-
-static struct resource pdcdata_resource = {
- .name = "PDC data (Page Zero)",
- .start = 0,
- .end = 0x9ff,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
-};
-
-static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
-
-/* The following array is initialized from the firmware specific
- * information retrieved in kernel/inventory.c.
- */
-
-physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
-int npmem_ranges __read_mostly;
-
-#ifdef CONFIG_64BIT
-#define MAX_MEM (~0UL)
-#else /* !CONFIG_64BIT */
-#define MAX_MEM (3584U*1024U*1024U)
-#endif /* !CONFIG_64BIT */
-
-static unsigned long mem_limit __read_mostly = MAX_MEM;
-
-static void __init mem_limit_func(void)
-{
- char *cp, *end;
- unsigned long limit;
-
- /* We need this before __setup() functions are called */
-
- limit = MAX_MEM;
- for (cp = boot_command_line; *cp; ) {
- if (memcmp(cp, "mem=", 4) == 0) {
- cp += 4;
- limit = memparse(cp, &end);
- if (end != cp)
- break;
- cp = end;
- } else {
- while (*cp != ' ' && *cp)
- ++cp;
- while (*cp == ' ')
- ++cp;
- }
- }
-
- if (limit < mem_limit)
- mem_limit = limit;
-}
-
-#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
-
-static void __init setup_bootmem(void)
-{
- unsigned long bootmap_size;
- unsigned long mem_max;
- unsigned long bootmap_pages;
- unsigned long bootmap_start_pfn;
- unsigned long bootmap_pfn;
-#ifndef CONFIG_DISCONTIGMEM
- physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
- int npmem_holes;
-#endif
- int i, sysram_resource_count;
-
- disable_sr_hashing(); /* Turn off space register hashing */
-
- /*
- * Sort the ranges. Since the number of ranges is typically
- * small, and performance is not an issue here, just do
- * a simple insertion sort.
- */
-
- for (i = 1; i < npmem_ranges; i++) {
- int j;
-
- for (j = i; j > 0; j--) {
- unsigned long tmp;
-
- if (pmem_ranges[j-1].start_pfn <
- pmem_ranges[j].start_pfn) {
-
- break;
- }
- tmp = pmem_ranges[j-1].start_pfn;
- pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
- pmem_ranges[j].start_pfn = tmp;
- tmp = pmem_ranges[j-1].pages;
- pmem_ranges[j-1].pages = pmem_ranges[j].pages;
- pmem_ranges[j].pages = tmp;
- }
- }
-
-#ifndef CONFIG_DISCONTIGMEM
- /*
- * Throw out ranges that are too far apart (controlled by
- * MAX_GAP).
- */
-
- for (i = 1; i < npmem_ranges; i++) {
- if (pmem_ranges[i].start_pfn -
- (pmem_ranges[i-1].start_pfn +
- pmem_ranges[i-1].pages) > MAX_GAP) {
- npmem_ranges = i;
- printk("Large gap in memory detected (%ld pages). "
- "Consider turning on CONFIG_DISCONTIGMEM\n",
- pmem_ranges[i].start_pfn -
- (pmem_ranges[i-1].start_pfn +
- pmem_ranges[i-1].pages));
- break;
- }
- }
-#endif
-
- if (npmem_ranges > 1) {
-
- /* Print the memory ranges */
-
- printk(KERN_INFO "Memory Ranges:\n");
-
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long start;
- unsigned long size;
-
- size = (pmem_ranges[i].pages << PAGE_SHIFT);
- start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
- printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
- i,start, start + (size - 1), size >> 20);
- }
- }
-
- sysram_resource_count = npmem_ranges;
- for (i = 0; i < sysram_resource_count; i++) {
- struct resource *res = &sysram_resources[i];
- res->name = "System RAM";
- res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
- res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- request_resource(&iomem_resource, res);
- }
-
- /*
- * For 32 bit kernels we limit the amount of memory we can
- * support, in order to preserve enough kernel address space
- * for other purposes. For 64 bit kernels we don't normally
- * limit the memory, but this mechanism can be used to
- * artificially limit the amount of memory (and it is written
- * to work with multiple memory ranges).
- */
-
- mem_limit_func(); /* check for "mem=" argument */
-
- mem_max = 0;
- num_physpages = 0;
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long rsize;
-
- rsize = pmem_ranges[i].pages << PAGE_SHIFT;
- if ((mem_max + rsize) > mem_limit) {
- printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
- if (mem_max == mem_limit)
- npmem_ranges = i;
- else {
- pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
- - (mem_max >> PAGE_SHIFT);
- npmem_ranges = i + 1;
- mem_max = mem_limit;
- }
- num_physpages += pmem_ranges[i].pages;
- break;
- }
- num_physpages += pmem_ranges[i].pages;
- mem_max += rsize;
- }
-
- printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
-
-#ifndef CONFIG_DISCONTIGMEM
- /* Merge the ranges, keeping track of the holes */
-
- {
- unsigned long end_pfn;
- unsigned long hole_pages;
-
- npmem_holes = 0;
- end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
- for (i = 1; i < npmem_ranges; i++) {
-
- hole_pages = pmem_ranges[i].start_pfn - end_pfn;
- if (hole_pages) {
- pmem_holes[npmem_holes].start_pfn = end_pfn;
- pmem_holes[npmem_holes++].pages = hole_pages;
- end_pfn += hole_pages;
- }
- end_pfn += pmem_ranges[i].pages;
- }
-
- pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
- npmem_ranges = 1;
- }
-#endif
-
- bootmap_pages = 0;
- for (i = 0; i < npmem_ranges; i++)
- bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
-
- bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
-
-#ifdef CONFIG_DISCONTIGMEM
- for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
- memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bootmem_node_data[i];
- }
- memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
-
- for (i = 0; i < npmem_ranges; i++) {
- node_set_state(i, N_NORMAL_MEMORY);
- node_set_online(i);
- }
-#endif
-
- /*
- * Initialize and free the full range of memory in each range.
- * Note that the only writing these routines do are to the bootmap,
- * and we've made sure to locate the bootmap properly so that they
- * won't be writing over anything important.
- */
-
- bootmap_pfn = bootmap_start_pfn;
- max_pfn = 0;
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long start_pfn;
- unsigned long npages;
-
- start_pfn = pmem_ranges[i].start_pfn;
- npages = pmem_ranges[i].pages;
-
- bootmap_size = init_bootmem_node(NODE_DATA(i),
- bootmap_pfn,
- start_pfn,
- (start_pfn + npages) );
- free_bootmem_node(NODE_DATA(i),
- (start_pfn << PAGE_SHIFT),
- (npages << PAGE_SHIFT) );
- bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if ((start_pfn + npages) > max_pfn)
- max_pfn = start_pfn + npages;
- }
-
- /* IOMMU is always used to access "high mem" on those boxes
- * that can support enough mem that a PCI device couldn't
- * directly DMA to any physical addresses.
- * ISA DMA support will need to revisit this.
- */
- max_low_pfn = max_pfn;
-
- /* bootmap sizing messed up? */
- BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
-
- /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
-
-#define PDC_CONSOLE_IO_IODC_SIZE 32768
-
- reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free +
- PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
- (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
-
-#ifndef CONFIG_DISCONTIGMEM
-
- /* reserve the holes */
-
- for (i = 0; i < npmem_holes; i++) {
- reserve_bootmem_node(NODE_DATA(0),
- (pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
- }
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
- if (__pa(initrd_start) < mem_max) {
- unsigned long initrd_reserve;
-
- if (__pa(initrd_end) > mem_max) {
- initrd_reserve = mem_max - __pa(initrd_start);
- } else {
- initrd_reserve = initrd_end - initrd_start;
- }
- initrd_below_start_ok = 1;
- printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
-
- reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- initrd_reserve, BOOTMEM_DEFAULT);
- }
- }
-#endif
-
- data_resource.start = virt_to_phys(&data_start);
- data_resource.end = virt_to_phys(_end) - 1;
- code_resource.start = virt_to_phys(_text);
- code_resource.end = virt_to_phys(&data_start)-1;
-
- /* We don't know which region the kernel will be in, so try
- * all of them.
- */
- for (i = 0; i < sysram_resource_count; i++) {
- struct resource *res = &sysram_resources[i];
- request_resource(res, &code_resource);
- request_resource(res, &data_resource);
- }
- request_resource(&sysram_resources[0], &pdcdata_resource);
-}
-
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
-{
- pgd_t *pg_dir;
- pmd_t *pmd;
- pte_t *pg_table;
- unsigned long end_paddr;
- unsigned long start_pmd;
- unsigned long start_pte;
- unsigned long tmp1;
- unsigned long tmp2;
- unsigned long address;
- unsigned long vaddr;
- unsigned long ro_start;
- unsigned long ro_end;
- unsigned long fv_addr;
- unsigned long gw_addr;
- extern const unsigned long fault_vector_20;
- extern void * const linux_gateway_page;
-
- ro_start = __pa((unsigned long)_text);
- ro_end = __pa((unsigned long)&data_start);
- fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
- gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
-
- end_paddr = start_paddr + size;
-
- pg_dir = pgd_offset_k(start_vaddr);
-
-#if PTRS_PER_PMD == 1
- start_pmd = 0;
-#else
- start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-#endif
- start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-
- address = start_paddr;
- vaddr = start_vaddr;
- while (address < end_paddr) {
-#if PTRS_PER_PMD == 1
- pmd = (pmd_t *)__pa(pg_dir);
-#else
- pmd = (pmd_t *)pgd_address(*pg_dir);
-
- /*
- * pmd is physical at this point
- */
-
- if (!pmd) {
- pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
- pmd = (pmd_t *) __pa(pmd);
- }
-
- pgd_populate(NULL, pg_dir, __va(pmd));
-#endif
- pg_dir++;
-
- /* now change pmd to kernel virtual addresses */
-
- pmd = (pmd_t *)__va(pmd) + start_pmd;
- for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
-
- /*
- * pg_table is physical at this point
- */
-
- pg_table = (pte_t *)pmd_address(*pmd);
- if (!pg_table) {
- pg_table = (pte_t *)
- alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
- pg_table = (pte_t *) __pa(pg_table);
- }
-
- pmd_populate_kernel(NULL, pmd, __va(pg_table));
-
- /* now change pg_table to kernel virtual addresses */
-
- pg_table = (pte_t *) __va(pg_table) + start_pte;
- for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
- pte_t pte;
-
- /*
- * Map the fault vector writable so we can
- * write the HPMC checksum.
- */
- if (force)
- pte = __mk_pte(address, pgprot);
- else if (core_kernel_text(vaddr) &&
- address != fv_addr)
- pte = __mk_pte(address, PAGE_KERNEL_EXEC);
- else
-#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
- if (address >= ro_start && address < ro_end
- && address != fv_addr
- && address != gw_addr)
- pte = __mk_pte(address, PAGE_KERNEL_RO);
- else
-#endif
- pte = __mk_pte(address, pgprot);
-
- if (address >= end_paddr) {
- if (force)
- break;
- else
- pte_val(pte) = 0;
- }
-
- set_pte(pg_table, pte);
-
- address += PAGE_SIZE;
- vaddr += PAGE_SIZE;
- }
- start_pte = 0;
-
- if (address >= end_paddr)
- break;
- }
- start_pmd = 0;
- }
-}
-
-void free_initmem(void)
-{
- unsigned long addr;
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
- /* The init text pages are marked R-X. We have to
- * flush the icache and mark them RW-
- *
- * This is tricky, because map_pages is in the init section.
- * Do a dummy remap of the data section first (the data
- * section is already PAGE_KERNEL) to pull in the TLB entries
- * for map_kernel */
- map_pages(init_begin, __pa(init_begin), init_end - init_begin,
- PAGE_KERNEL_RWX, 1);
- /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
- * map_pages */
- map_pages(init_begin, __pa(init_begin), init_end - init_begin,
- PAGE_KERNEL, 1);
-
- /* force the kernel to see the new TLB entries */
- __flush_tlb_range(0, init_begin, init_end);
- /* Attempt to catch anyone trying to execute code here
- * by filling the page with BRK insns.
- */
- memset((void *)init_begin, 0x00, init_end - init_begin);
- /* finally dump all the instructions which were cached, since the
- * pages are no-longer executable */
- flush_icache_range(init_begin, init_end);
-
- for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- num_physpages++;
- totalram_pages++;
- }
-
- /* set up a new led state on systems shipped LED State panel */
- pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
-
- printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
- (init_end - init_begin) >> 10);
-}
-
-
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
-{
- /* rodata memory was already mapped with KERNEL_RO access rights by
- pagetable_init() and map_pages(). No need to do additional stuff here */
- printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- (unsigned long)(__end_rodata - __start_rodata) >> 10);
-}
-#endif
-
-
-/*
- * Just an arbitrary offset to serve as a "hole" between mapping areas
- * (between top of physical memory and a potential pcxl dma mapping
- * area, and below the vmalloc mapping area).
- *
- * The current 32K value just means that there will be a 32K "hole"
- * between mapping areas. That means that any out-of-bounds memory
- * accesses will hopefully be caught. The vmalloc() routines leaves
- * a hole of 4kB between each vmalloced area for the same reason.
- */
-
- /* Leave room for gateway page expansion */
-#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
-#error KERNEL_MAP_START is in gateway reserved region
-#endif
-#define MAP_START (KERNEL_MAP_START)
-
-#define VM_MAP_OFFSET (32*1024)
-#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
- & ~(VM_MAP_OFFSET-1)))
-
-void *parisc_vmalloc_start __read_mostly;
-EXPORT_SYMBOL(parisc_vmalloc_start);
-
-#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start __read_mostly;
-#endif
-
-void __init mem_init(void)
-{
- int codesize, reservedpages, datasize, initsize;
-
- /* Do sanity checks on page table constants */
- BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
- BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
- BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
- BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
- > BITS_PER_LONG);
-
- high_memory = __va((max_pfn << PAGE_SHIFT));
-
-#ifndef CONFIG_DISCONTIGMEM
- max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
- totalram_pages += free_all_bootmem();
-#else
- {
- int i;
-
- for (i = 0; i < npmem_ranges; i++)
- totalram_pages += free_all_bootmem_node(NODE_DATA(i));
- }
-#endif
-
- codesize = (unsigned long)_etext - (unsigned long)_text;
- datasize = (unsigned long)_edata - (unsigned long)_etext;
- initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
-
- reservedpages = 0;
-{
- unsigned long pfn;
-#ifdef CONFIG_DISCONTIGMEM
- int i;
-
- for (i = 0; i < npmem_ranges; i++) {
- for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
- if (PageReserved(pfn_to_page(pfn)))
- reservedpages++;
- }
- }
-#else /* !CONFIG_DISCONTIGMEM */
- for (pfn = 0; pfn < max_pfn; pfn++) {
- /*
- * Only count reserved RAM pages
- */
- if (PageReserved(pfn_to_page(pfn)))
- reservedpages++;
- }
-#endif
-}
-
-#ifdef CONFIG_PA11
- if (hppa_dma_ops == &pcxl_dma_ops) {
- pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
- parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
- + PCXL_DMA_MAP_SIZE);
- } else {
- pcxl_dma_start = 0;
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
- }
-#else
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
-#endif
-
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10
- );
-
-#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
- printk("virtual kernel memory layout:\n"
- " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
- " memory : 0x%p - 0x%p (%4ld MB)\n"
- " .init : 0x%p - 0x%p (%4ld kB)\n"
- " .data : 0x%p - 0x%p (%4ld kB)\n"
- " .text : 0x%p - 0x%p (%4ld kB)\n",
-
- (void*)VMALLOC_START, (void*)VMALLOC_END,
- (VMALLOC_END - VMALLOC_START) >> 20,
-
- __va(0), high_memory,
- ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
-
- __init_begin, __init_end,
- ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
-
- _etext, _edata,
- ((unsigned long)_edata - (unsigned long)_etext) >> 10,
-
- _text, _etext,
- ((unsigned long)_etext - (unsigned long)_text) >> 10);
-#endif
-}
-
-unsigned long *empty_zero_page __read_mostly;
-EXPORT_SYMBOL(empty_zero_page);
-
-void show_mem(unsigned int filter)
-{
- int i,free = 0,total = 0,reserved = 0;
- int shared = 0, cached = 0;
-
- printk(KERN_INFO "Mem-info:\n");
- show_free_areas(filter);
-#ifndef CONFIG_DISCONTIGMEM
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(&mem_map[i]))
- free++;
- else
- shared += page_count(&mem_map[i]) - 1;
- }
-#else
- for (i = 0; i < npmem_ranges; i++) {
- int j;
-
- for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
- struct page *p;
- unsigned long flags;
-
- pgdat_resize_lock(NODE_DATA(i), &flags);
- p = nid_page_nr(i, j) - node_start_pfn(i);
-
- total++;
- if (PageReserved(p))
- reserved++;
- else if (PageSwapCache(p))
- cached++;
- else if (!page_count(p))
- free++;
- else
- shared += page_count(p) - 1;
- pgdat_resize_unlock(NODE_DATA(i), &flags);
- }
- }
-#endif
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
-
-
-#ifdef CONFIG_DISCONTIGMEM
- {
- struct zonelist *zl;
- int i, j;
-
- for (i = 0; i < npmem_ranges; i++) {
- zl = node_zonelist(i, 0);
- for (j = 0; j < MAX_NR_ZONES; j++) {
- struct zoneref *z;
- struct zone *zone;
-
- printk("Zone list for zone %d on node %d: ", j, i);
- for_each_zone_zonelist(zone, z, zl, j)
- printk("[%d/%s] ", zone_to_nid(zone),
- zone->name);
- printk("\n");
- }
- }
- }
-#endif
-}
-
-/*
- * pagetable_init() sets up the page tables
- *
- * Note that gateway_init() places the Linux gateway page at page 0.
- * Since gateway pages cannot be dereferenced this has the desirable
- * side effect of trapping those pesky NULL-reference errors in the
- * kernel.
- */
-static void __init pagetable_init(void)
-{
- int range;
-
- /* Map each physical memory range to its kernel vaddr */
-
- for (range = 0; range < npmem_ranges; range++) {
- unsigned long start_paddr;
- unsigned long end_paddr;
- unsigned long size;
-
- start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
- end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
- size = pmem_ranges[range].pages << PAGE_SHIFT;
-
- map_pages((unsigned long)__va(start_paddr), start_paddr,
- size, PAGE_KERNEL, 0);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_end && initrd_end > mem_limit) {
- printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
- map_pages(initrd_start, __pa(initrd_start),
- initrd_end - initrd_start, PAGE_KERNEL, 0);
- }
-#endif
-
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
-}
-
-static void __init gateway_init(void)
-{
- unsigned long linux_gateway_page_addr;
- /* FIXME: This is 'const' in order to trick the compiler
- into not treating it as DP-relative data. */
- extern void * const linux_gateway_page;
-
- linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
-
- /*
- * Setup Linux Gateway page.
- *
- * The Linux gateway page will reside in kernel space (on virtual
- * page 0), so it doesn't need to be aliased into user space.
- */
-
- map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
- PAGE_SIZE, PAGE_GATEWAY, 1);
-}
-
-#ifdef CONFIG_HPUX
-void
-map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
-{
- pgd_t *pg_dir;
- pmd_t *pmd;
- pte_t *pg_table;
- unsigned long start_pmd;
- unsigned long start_pte;
- unsigned long address;
- unsigned long hpux_gw_page_addr;
- /* FIXME: This is 'const' in order to trick the compiler
- into not treating it as DP-relative data. */
- extern void * const hpux_gateway_page;
-
- hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
-
- /*
- * Setup HP-UX Gateway page.
- *
- * The HP-UX gateway page resides in the user address space,
- * so it needs to be aliased into each process.
- */
-
- pg_dir = pgd_offset(mm,hpux_gw_page_addr);
-
-#if PTRS_PER_PMD == 1
- start_pmd = 0;
-#else
- start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-#endif
- start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-
- address = __pa(&hpux_gateway_page);
-#if PTRS_PER_PMD == 1
- pmd = (pmd_t *)__pa(pg_dir);
-#else
- pmd = (pmd_t *) pgd_address(*pg_dir);
-
- /*
- * pmd is physical at this point
- */
-
- if (!pmd) {
- pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
- pmd = (pmd_t *) __pa(pmd);
- }
-
- __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
-#endif
- /* now change pmd to kernel virtual addresses */
-
- pmd = (pmd_t *)__va(pmd) + start_pmd;
-
- /*
- * pg_table is physical at this point
- */
-
- pg_table = (pte_t *) pmd_address(*pmd);
- if (!pg_table)
- pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
-
- __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
-
- /* now change pg_table to kernel virtual addresses */
-
- pg_table = (pte_t *) __va(pg_table) + start_pte;
- set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
-}
-EXPORT_SYMBOL(map_hpux_gateway_page);
-#endif
-
-void __init paging_init(void)
-{
- int i;
-
- setup_bootmem();
- pagetable_init();
- gateway_init();
- flush_cache_all_local(); /* start with known state */
- flush_tlb_all_local(NULL);
-
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-
- zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
-
-#ifdef CONFIG_DISCONTIGMEM
- /* Need to initialize the pfnnid_map before we can initialize
- the zone */
- {
- int j;
- for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
- j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
- j++) {
- pfnnid_map[j] = i;
- }
- }
-#endif
-
- free_area_init_node(i, zones_size,
- pmem_ranges[i].start_pfn, NULL);
- }
-}
-
-#ifdef CONFIG_PA20
-
-/*
- * Currently, all PA20 chips have 18 bit protection IDs, which is the
- * limiting factor (space ids are 32 bits).
- */
-
-#define NR_SPACE_IDS 262144
-
-#else
-
-/*
- * Currently we have a one-to-one relationship between space IDs and
- * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
- * support 15 bit protection IDs, so that is the limiting factor.
- * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
- * probably not worth the effort for a special case here.
- */
-
-#define NR_SPACE_IDS 32768
-
-#endif /* !CONFIG_PA20 */
-
-#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
-#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
-
-static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
-static unsigned long dirty_space_id[SID_ARRAY_SIZE];
-static unsigned long space_id_index;
-static unsigned long free_space_ids = NR_SPACE_IDS - 1;
-static unsigned long dirty_space_ids = 0;
-
-static DEFINE_SPINLOCK(sid_lock);
-
-unsigned long alloc_sid(void)
-{
- unsigned long index;
-
- spin_lock(&sid_lock);
-
- if (free_space_ids == 0) {
- if (dirty_space_ids != 0) {
- spin_unlock(&sid_lock);
- flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
- spin_lock(&sid_lock);
- }
- BUG_ON(free_space_ids == 0);
- }
-
- free_space_ids--;
-
- index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
- space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
- space_id_index = index;
-
- spin_unlock(&sid_lock);
-
- return index << SPACEID_SHIFT;
-}
-
-void free_sid(unsigned long spaceid)
-{
- unsigned long index = spaceid >> SPACEID_SHIFT;
- unsigned long *dirty_space_offset;
-
- dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
- index &= (BITS_PER_LONG - 1);
-
- spin_lock(&sid_lock);
-
- BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
-
- *dirty_space_offset |= (1L << index);
- dirty_space_ids++;
-
- spin_unlock(&sid_lock);
-}
-
-
-#ifdef CONFIG_SMP
-static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
-{
- int i;
-
- /* NOTE: sid_lock must be held upon entry */
-
- *ndirtyptr = dirty_space_ids;
- if (dirty_space_ids != 0) {
- for (i = 0; i < SID_ARRAY_SIZE; i++) {
- dirty_array[i] = dirty_space_id[i];
- dirty_space_id[i] = 0;
- }
- dirty_space_ids = 0;
- }
-
- return;
-}
-
-static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
-{
- int i;
-
- /* NOTE: sid_lock must be held upon entry */
-
- if (ndirty != 0) {
- for (i = 0; i < SID_ARRAY_SIZE; i++) {
- space_id[i] ^= dirty_array[i];
- }
-
- free_space_ids += ndirty;
- space_id_index = 0;
- }
-}
-
-#else /* CONFIG_SMP */
-
-static void recycle_sids(void)
-{
- int i;
-
- /* NOTE: sid_lock must be held upon entry */
-
- if (dirty_space_ids != 0) {
- for (i = 0; i < SID_ARRAY_SIZE; i++) {
- space_id[i] ^= dirty_space_id[i];
- dirty_space_id[i] = 0;
- }
-
- free_space_ids += dirty_space_ids;
- dirty_space_ids = 0;
- space_id_index = 0;
- }
-}
-#endif
-
-/*
- * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
- * purged, we can safely reuse the space ids that were released but
- * not flushed from the tlb.
- */
-
-#ifdef CONFIG_SMP
-
-static unsigned long recycle_ndirty;
-static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
-static unsigned int recycle_inuse;
-
-void flush_tlb_all(void)
-{
- int do_recycle;
-
- do_recycle = 0;
- spin_lock(&sid_lock);
- if (dirty_space_ids > RECYCLE_THRESHOLD) {
- BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
- get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
- recycle_inuse++;
- do_recycle++;
- }
- spin_unlock(&sid_lock);
- on_each_cpu(flush_tlb_all_local, NULL, 1);
- if (do_recycle) {
- spin_lock(&sid_lock);
- recycle_sids(recycle_ndirty,recycle_dirty_array);
- recycle_inuse = 0;
- spin_unlock(&sid_lock);
- }
-}
-#else
-void flush_tlb_all(void)
-{
- spin_lock(&sid_lock);
- flush_tlb_all_local(NULL);
- recycle_sids();
- spin_unlock(&sid_lock);
-}
-#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (start >= end)
- return;
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- num_physpages++;
- totalram_pages++;
- }
-}
-#endif
diff --git a/ANDROID_3.4.5/arch/parisc/mm/ioremap.c b/ANDROID_3.4.5/arch/parisc/mm/ioremap.c
deleted file mode 100644
index 838d0259..00000000
--- a/ANDROID_3.4.5/arch/parisc/mm/ioremap.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * arch/parisc/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
- * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
- */
-
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <asm/pgalloc.h>
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-{
- void __iomem *addr;
- struct vm_struct *area;
- unsigned long offset, last_addr;
- pgprot_t pgprot;
-
-#ifdef CONFIG_EISA
- unsigned long end = phys_addr + size - 1;
- /* Support EISA addresses */
- if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
- (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
- phys_addr |= F_EXTEND(0xfc000000);
- flags |= _PAGE_NO_CACHE;
- }
-#endif
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for (page = virt_to_page(t_addr);
- page <= virt_to_page(t_end); page++) {
- if(!PageReserved(page))
- return NULL;
- }
- }
-
- pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
- _PAGE_ACCESSED | flags);
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
-
- addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
- phys_addr, pgprot)) {
- vfree(addr);
- return NULL;
- }
-
- return (void __iomem *) (offset + (char __iomem *)addr);
-}
-EXPORT_SYMBOL(__ioremap);
-
-void iounmap(const volatile void __iomem *addr)
-{
- if (addr > high_memory)
- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
-}
-EXPORT_SYMBOL(iounmap);