summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/arch/microblaze/mm
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /ANDROID_3.4.5/arch/microblaze/mm
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'ANDROID_3.4.5/arch/microblaze/mm')
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/Makefile8
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/consistent.c255
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/fault.c277
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/highmem.c88
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/init.c502
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/mmu_context.c70
-rw-r--r--ANDROID_3.4.5/arch/microblaze/mm/pgtable.c262
7 files changed, 0 insertions, 1462 deletions
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/Makefile b/ANDROID_3.4.5/arch/microblaze/mm/Makefile
deleted file mode 100644
index 7313bd8a..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile
-#
-
-obj-y := consistent.o init.o
-
-obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/consistent.c b/ANDROID_3.4.5/arch/microblaze/mm/consistent.c
deleted file mode 100644
index a1e2e18e..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/consistent.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Microblaze support for cache consistent memory.
- * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2010 PetaLogix
- * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
- *
- * Based on PowerPC version derived from arch/arm/mm/consistent.c
- * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-
-#include <asm/pgalloc.h>
-#include <linux/io.h>
-#include <linux/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/cpuinfo.h>
-#include <asm/tlbflush.h>
-
-#ifndef CONFIG_MMU
-/* I have to use dcache values because I can't relate on ram size */
-# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
-#endif
-
-/*
- * Consistent memory allocators. Used for DMA devices that want to
- * share uncached memory with the processor core.
- * My crufty no-MMU approach is simple. In the HW platform we can optionally
- * mirror the DDR up above the processor cacheable region. So, memory accessed
- * in this mirror region will not be cached. It's alloced from the same
- * pool as normal memory, but the handle we return is shifted up into the
- * uncached region. This will no doubt cause big problems if memory allocated
- * here is not also freed properly. -- JW
- */
-void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
-{
- unsigned long order, vaddr;
- void *ret;
- unsigned int i, err = 0;
- struct page *page, *end;
-
-#ifdef CONFIG_MMU
- phys_addr_t pa;
- struct vm_struct *area;
- unsigned long va;
-#endif
-
- if (in_interrupt())
- BUG();
-
- /* Only allocate page size areas. */
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- vaddr = __get_free_pages(gfp, order);
- if (!vaddr)
- return NULL;
-
- /*
- * we need to ensure that there are no cachelines in use,
- * or worse dirty in this area.
- */
- flush_dcache_range(virt_to_phys((void *)vaddr),
- virt_to_phys((void *)vaddr) + size);
-
-#ifndef CONFIG_MMU
- ret = (void *)vaddr;
- /*
- * Here's the magic! Note if the uncached shadow is not implemented,
- * it's up to the calling code to also test that condition and make
- * other arranegments, such as manually flushing the cache and so on.
- */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
-# endif
- if ((unsigned int)ret > cpuinfo.dcache_base &&
- (unsigned int)ret < cpuinfo.dcache_high)
- printk(KERN_WARNING
- "ERROR: Your cache coherent area is CACHED!!!\n");
-
- /* dma_handle is same as physical (shadowed) address */
- *dma_handle = (dma_addr_t)ret;
-#else
- /* Allocate some common virtual space to map the new pages. */
- area = get_vm_area(size, VM_ALLOC);
- if (!area) {
- free_pages(vaddr, order);
- return NULL;
- }
- va = (unsigned long) area->addr;
- ret = (void *)va;
-
- /* This gives us the real physical address of the first page. */
- *dma_handle = pa = virt_to_bus((void *)vaddr);
-#endif
-
- /*
- * free wasted pages. We skip the first page since we know
- * that it will have count = 1 and won't require freeing.
- * We also mark the pages in use as reserved so that
- * remap_page_range works.
- */
- page = virt_to_page(vaddr);
- end = page + (1 << order);
-
- split_page(page, order);
-
- for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
-#ifdef CONFIG_MMU
- /* MS: This is the whole magic - use cache inhibit pages */
- err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
-#endif
-
- SetPageReserved(page);
- page++;
- }
-
- /* Free the otherwise unused pages. */
- while (page < end) {
- __free_page(page);
- page++;
- }
-
- if (err) {
- free_pages(vaddr, order);
- return NULL;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(consistent_alloc);
-
-/*
- * free page(s) as defined by the above mapping.
- */
-void consistent_free(size_t size, void *vaddr)
-{
- struct page *page;
-
- if (in_interrupt())
- BUG();
-
- size = PAGE_ALIGN(size);
-
-#ifndef CONFIG_MMU
- /* Clear SHADOW_MASK bit in address, and free as per usual */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
-# endif
- page = virt_to_page(vaddr);
-
- do {
- ClearPageReserved(page);
- __free_page(page);
- page++;
- } while (size -= PAGE_SIZE);
-#else
- do {
- pte_t *ptep;
- unsigned long pfn;
-
- ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
- (unsigned int)vaddr),
- (unsigned int)vaddr),
- (unsigned int)vaddr);
- if (!pte_none(*ptep) && pte_present(*ptep)) {
- pfn = pte_pfn(*ptep);
- pte_clear(&init_mm, (unsigned int)vaddr, ptep);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
-
- ClearPageReserved(page);
- __free_page(page);
- }
- }
- vaddr += PAGE_SIZE;
- } while (size -= PAGE_SIZE);
-
- /* flush tlb */
- flush_tlb_all();
-#endif
-}
-EXPORT_SYMBOL(consistent_free);
-
-/*
- * make an area consistent.
- */
-void consistent_sync(void *vaddr, size_t size, int direction)
-{
- unsigned long start;
- unsigned long end;
-
- start = (unsigned long)vaddr;
-
- /* Convert start address back down to unshadowed memory region */
-#ifdef CONFIG_XILINX_UNCACHED_SHADOW
- start &= ~UNCACHED_SHADOW_MASK;
-#endif
- end = start + size;
-
- switch (direction) {
- case PCI_DMA_NONE:
- BUG();
- case PCI_DMA_FROMDEVICE: /* invalidate only */
- invalidate_dcache_range(start, end);
- break;
- case PCI_DMA_TODEVICE: /* writeback only */
- flush_dcache_range(start, end);
- break;
- case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
- flush_dcache_range(start, end);
- break;
- }
-}
-EXPORT_SYMBOL(consistent_sync);
-
-/*
- * consistent_sync_page makes memory consistent. identical
- * to consistent_sync, but takes a struct page instead of a
- * virtual address
- */
-void consistent_sync_page(struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- unsigned long start = (unsigned long)page_address(page) + offset;
- consistent_sync((void *)start, size, direction);
-}
-EXPORT_SYMBOL(consistent_sync_page);
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/fault.c b/ANDROID_3.4.5/arch/microblaze/mm/fault.c
deleted file mode 100644
index c38a2658..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/fault.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * arch/microblaze/mm/fault.c
- *
- * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
- *
- * Derived from "arch/ppc/mm/fault.c"
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/i386/mm/fault.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * Modified by Cort Dougan and Paul Mackerras.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <linux/uaccess.h>
-#include <asm/exceptions.h>
-
-static unsigned long pte_misses; /* updated by do_page_fault() */
-static unsigned long pte_errors; /* updated by do_page_fault() */
-
-/*
- * Check whether the instruction at regs->pc is a store using
- * an update addressing form which will update r1.
- */
-static int store_updates_sp(struct pt_regs *regs)
-{
- unsigned int inst;
-
- if (get_user(inst, (unsigned int __user *)regs->pc))
- return 0;
- /* check for 1 in the rD field */
- if (((inst >> 21) & 0x1f) != 1)
- return 0;
- /* check for store opcodes */
- if ((inst & 0xd0000000) == 0xd0000000)
- return 1;
- return 0;
-}
-
-
-/*
- * bad_page_fault is called when we have a bad access from the kernel.
- * It is called from do_page_fault above and from some of the procedures
- * in traps.c.
- */
-void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
-{
- const struct exception_table_entry *fixup;
-/* MS: no context */
- /* Are we prepared to handle this fault? */
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return;
- }
-
- /* kernel has accessed a bad area */
- die("kernel access of bad area", regs, sig);
-}
-
-/*
- * The error_code parameter is ESR for a data fault,
- * 0 for an instruction fault.
- */
-void do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
- siginfo_t info;
- int code = SEGV_MAPERR;
- int is_write = error_code & ESR_S;
- int fault;
-
- regs->ear = address;
- regs->esr = error_code;
-
- /* On a kernel SLB miss we can only check for a valid exception entry */
- if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
- printk(KERN_WARNING "kernel task_size exceed");
- _exception(SIGSEGV, regs, code, address);
- }
-
- /* for instr TLB miss and instr storage exception ESR_S is undefined */
- if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
- is_write = 0;
-
- if (unlikely(in_atomic() || !mm)) {
- if (kernel_mode(regs))
- goto bad_area_nosemaphore;
-
- /* in_atomic() in user mode is really bad,
- as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with "
- "in_atomic(), mm = %p\n", mm);
- printk(KERN_EMERG "r15 = %lx MSR = %lx\n",
- regs->r15, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
- }
-
- /* When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
- * we will deadlock attempting to validate the fault against the
- * address space. Luckily the kernel only validly references user
- * space from well defined areas of code, which are listed in the
- * exceptions table.
- *
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
- */
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- if (kernel_mode(regs) && !search_exception_tables(regs->pc))
- goto bad_area_nosemaphore;
-
- down_read(&mm->mmap_sem);
- }
-
- vma = find_vma(mm, address);
- if (unlikely(!vma))
- goto bad_area;
-
- if (vma->vm_start <= address)
- goto good_area;
-
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
- goto bad_area;
-
- if (unlikely(!is_write))
- goto bad_area;
-
- /*
- * N.B. The ABI allows programs to access up to
- * a few hundred bytes below the stack pointer (TBD).
- * The kernel signal delivery code writes up to about 1.5kB
- * below the stack pointer (r1) before decrementing it.
- * The exec code can write slightly over 640kB to the stack
- * before setting the user r1. Thus we allow the stack to
- * expand to 1MB without further checks.
- */
- if (unlikely(address + 0x100000 < vma->vm_end)) {
-
- /* get user regs even if this fault is in kernel mode */
- struct pt_regs *uregs = current->thread.regs;
- if (uregs == NULL)
- goto bad_area;
-
- /*
- * A user-mode access to an address a long way below
- * the stack pointer is only valid if the instruction
- * is one which would update the stack pointer to the
- * address accessed if the instruction completed,
- * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
- * (or the byte, halfword, float or double forms).
- *
- * If we don't check this then any write to the area
- * between the last mapped region and the stack will
- * expand the stack rather than segfaulting.
- */
- if (address + 2048 < uregs->r1
- && (kernel_mode(regs) || !store_updates_sp(regs)))
- goto bad_area;
- }
- if (expand_stack(vma, address))
- goto bad_area;
-
-good_area:
- code = SEGV_ACCERR;
-
- /* a write */
- if (unlikely(is_write)) {
- if (unlikely(!(vma->vm_flags & VM_WRITE)))
- goto bad_area;
- /* a read */
- } else {
- /* protection fault */
- if (unlikely(error_code & 0x08000000))
- goto bad_area;
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
- goto bad_area;
- }
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (unlikely(fault & VM_FAULT_MAJOR))
- current->maj_flt++;
- else
- current->min_flt++;
- up_read(&mm->mmap_sem);
- /*
- * keep track of tlb+htab misses that are good addrs but
- * just need pte's created via handle_mm_fault()
- * -- Cort
- */
- pte_misses++;
- return;
-
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- pte_errors++;
-
- /* User mode accesses cause a SIGSEGV */
- if (user_mode(regs)) {
- _exception(SIGSEGV, regs, code, address);
-/* info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = (void *) address;
- force_sig_info(SIGSEGV, &info, current);*/
- return;
- }
-
- bad_page_fault(regs, address, SIGSEGV);
- return;
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (!user_mode(regs))
- bad_page_fault(regs, address, SIGKILL);
- else
- pagefault_out_of_memory();
- return;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
- if (user_mode(regs)) {
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, current);
- return;
- }
- bad_page_fault(regs, address, SIGBUS);
-}
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/highmem.c b/ANDROID_3.4.5/arch/microblaze/mm/highmem.c
deleted file mode 100644
index 7d78838e..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/highmem.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * PowerPC version, stolen from the i386 version.
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- *
- * Reworked for PowerPC by various contributors. Moved from
- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-#include <asm/tlbflush.h>
-
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
-
- unsigned long vaddr;
- int idx, type;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
- local_flush_tlb_page(NULL, vaddr);
-
- return (void *) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_prot);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- return;
- }
-
- type = kmap_atomic_idx();
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- unsigned int idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
- }
-#endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/init.c b/ANDROID_3.4.5/arch/microblaze/mm/init.c
deleted file mode 100644
index ce808230..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/init.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/bootmem.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/mm.h> /* mem_init */
-#include <linux/initrd.h>
-#include <linux/pagemap.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-
-#include <asm/page.h>
-#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-#include <asm/sections.h>
-#include <asm/tlb.h>
-#include <asm/fixmap.h>
-
-/* Use for MMU and noMMU because of PCI generic code */
-int mem_init_done;
-
-#ifndef CONFIG_MMU
-unsigned int __page_offset;
-EXPORT_SYMBOL(__page_offset);
-
-#else
-static int init_bootmem_done;
-#endif /* CONFIG_MMU */
-
-char *klimit = _end;
-
-/*
- * Initialize the bootmem system and give it all the memory we
- * have available.
- */
-unsigned long memory_start;
-EXPORT_SYMBOL(memory_start);
-unsigned long memory_size;
-EXPORT_SYMBOL(memory_size);
-unsigned long lowmem_size;
-
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-EXPORT_SYMBOL(kmap_pte);
-pgprot_t kmap_prot;
-EXPORT_SYMBOL(kmap_prot);
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
- vaddr), vaddr);
-}
-
-static void __init highmem_init(void)
-{
- pr_debug("%x\n", (u32)PKMAP_BASE);
- map_page(PKMAP_BASE, 0, 0); /* XXX gross */
- pkmap_page_table = virt_to_kpte(PKMAP_BASE);
-
- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = PAGE_KERNEL;
-}
-
-static unsigned long highmem_setup(void)
-{
- unsigned long pfn;
- unsigned long reservedpages = 0;
-
- for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
- struct page *page = pfn_to_page(pfn);
-
- /* FIXME not sure about */
- if (memblock_is_reserved(pfn << PAGE_SHIFT))
- continue;
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
- reservedpages++;
- }
- totalram_pages += totalhigh_pages;
- printk(KERN_INFO "High memory: %luk\n",
- totalhigh_pages << (PAGE_SHIFT-10));
-
- return reservedpages;
-}
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * paging_init() sets up the page tables - in fact we've already done this.
- */
-static void __init paging_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES];
-#ifdef CONFIG_MMU
- int idx;
-
- /* Setup fixmaps */
- for (idx = 0; idx < __end_of_fixed_addresses; idx++)
- clear_fixmap(idx);
-#endif
-
- /* Clean every zones */
- memset(zones_size, 0, sizeof(zones_size));
-
-#ifdef CONFIG_HIGHMEM
- highmem_init();
-
- zones_size[ZONE_DMA] = max_low_pfn;
- zones_size[ZONE_HIGHMEM] = max_pfn;
-#else
- zones_size[ZONE_DMA] = max_pfn;
-#endif
-
- /* We don't have holes in memory map */
- free_area_init_nodes(zones_size);
-}
-
-void __init setup_memory(void)
-{
- unsigned long map_size;
- struct memblock_region *reg;
-
-#ifndef CONFIG_MMU
- u32 kernel_align_start, kernel_align_size;
-
- /* Find main memory where is the kernel */
- for_each_memblock(memory, reg) {
- memory_start = (u32)reg->base;
- lowmem_size = reg->size;
- if ((memory_start <= (u32)_text) &&
- ((u32)_text <= (memory_start + lowmem_size - 1))) {
- memory_size = lowmem_size;
- PAGE_OFFSET = memory_start;
- printk(KERN_INFO "%s: Main mem: 0x%x, "
- "size 0x%08x\n", __func__, (u32) memory_start,
- (u32) memory_size);
- break;
- }
- }
-
- if (!memory_start || !memory_size) {
- panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
- __func__, (u32) memory_start, (u32) memory_size);
- }
-
- /* reservation of region where is the kernel */
- kernel_align_start = PAGE_DOWN((u32)_text);
- /* ALIGN can be remove because _end in vmlinux.lds.S is align */
- kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
- printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
- __func__, kernel_align_start, kernel_align_start
- + kernel_align_size, kernel_align_size);
- memblock_reserve(kernel_align_start, kernel_align_size);
-#endif
- /*
- * Kernel:
- * start: base phys address of kernel - page align
- * end: base phys address of kernel - page align
- *
- * min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
- * max_low_pfn
- * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
- * num_physpages - number of all pages
- */
-
- /* memory start is from the kernel end (aligned) to higher addr */
- min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
- /* RAM is assumed contiguous */
- num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
- max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
- max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
-
- printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
- printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
- printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
- printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
-
- /*
- * Find an area to use for the bootmem bitmap.
- * We look for the first area which is at least
- * 128kB in length (128kB is enough for a bitmap
- * for 4GB of memory, using 4kB pages), plus 1 page
- * (in case the address isn't page-aligned).
- */
- map_size = init_bootmem_node(NODE_DATA(0),
- PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
- memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
-
- /* Add active regions with valid PFNs */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
-
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- memblock_set_node(start_pfn << PAGE_SHIFT,
- (end_pfn - start_pfn) << PAGE_SHIFT, 0);
- }
-
- /* free bootmem is whole main memory */
- free_bootmem_with_active_regions(0, max_low_pfn);
-
- /* reserve allocate blocks */
- for_each_memblock(reserved, reg) {
- unsigned long top = reg->base + reg->size - 1;
-
- pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
- (u32) reg->base, (u32) reg->size, top,
- memory_start + lowmem_size - 1);
-
- if (top <= (memory_start + lowmem_size - 1)) {
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
- } else if (reg->base < (memory_start + lowmem_size - 1)) {
- unsigned long trunc_size = memory_start + lowmem_size -
- reg->base;
- reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
- }
- }
-
- /* XXX need to clip this if using highmem? */
- sparse_memory_present_with_active_regions(0);
-
-#ifdef CONFIG_MMU
- init_bootmem_done = 1;
-#endif
- paging_init();
-}
-
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
-
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
- (int)(pages * (PAGE_SIZE / 1024)));
-}
-#endif
-
-void free_initmem(void)
-{
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
-}
-
-void __init mem_init(void)
-{
- pg_data_t *pgdat;
- unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
-
- high_memory = (void *)__va(memory_start + lowmem_size - 1);
-
- /* this will put all memory onto the freelists */
- totalram_pages += free_all_bootmem();
-
- for_each_online_pgdat(pgdat) {
- unsigned long i;
- struct page *page;
-
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- if (!pfn_valid(pgdat->node_start_pfn + i))
- continue;
- page = pgdat_page_nr(pgdat, i);
- if (PageReserved(page))
- reservedpages++;
- }
- }
-
-#ifdef CONFIG_HIGHMEM
- reservedpages -= highmem_setup();
-#endif
-
- codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
- datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
- initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
- bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
-
- pr_info("Memory: %luk/%luk available (%luk kernel code, "
- "%luk reserved, %luk data, %luk bss, %luk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- bsssize >> 10,
- initsize >> 10);
-
-#ifdef CONFIG_MMU
- pr_info("Kernel virtual memory layout:\n");
- pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
-#ifdef CONFIG_HIGHMEM
- pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
-#endif /* CONFIG_HIGHMEM */
- pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
- ioremap_bot, ioremap_base);
- pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
- (unsigned long)VMALLOC_START, VMALLOC_END);
-#endif
- mem_init_done = 1;
-}
-
-#ifndef CONFIG_MMU
-int page_is_ram(unsigned long pfn)
-{
- return __range_ok(pfn, 0);
-}
-#else
-int page_is_ram(unsigned long pfn)
-{
- return pfn < max_low_pfn;
-}
-
-/*
- * Check for command-line options that affect what MMU_init will do.
- */
-static void mm_cmdline_setup(void)
-{
- unsigned long maxmem = 0;
- char *p = cmd_line;
-
- /* Look for mem= option on command line */
- p = strstr(cmd_line, "mem=");
- if (p) {
- p += 4;
- maxmem = memparse(p, &p);
- if (maxmem && memory_size > maxmem) {
- memory_size = maxmem;
- memblock.memory.regions[0].size = memory_size;
- }
- }
-}
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-static void __init mmu_init_hw(void)
-{
- /*
- * The Zone Protection Register (ZPR) defines how protection will
- * be applied to every page which is a member of a given zone. At
- * present, we utilize only two of the zones.
- * The zone index bits (of ZSEL) in the PTE are used for software
- * indicators, except the LSB. For user access, zone 1 is used,
- * for kernel access, zone 0 is used. We set all but zone 1
- * to zero, allowing only kernel access as indicated in the PTE.
- * For zone 1, we set a 01 binary (a value of 10 will not work)
- * to allow user access as indicated in the PTE. This also allows
- * kernel access as indicated in the PTE.
- */
- __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
- "mts rzpr, r11;"
- : : : "r11");
-}
-
-/*
- * MMU_init sets up the basic memory mappings for the kernel,
- * including both RAM and possibly some I/O regions,
- * and sets up the page tables and the MMU hardware ready to go.
- */
-
-/* called from head.S */
-asmlinkage void __init mmu_init(void)
-{
- unsigned int kstart, ksize;
-
- if (!memblock.reserved.cnt) {
- printk(KERN_EMERG "Error memory count\n");
- machine_restart(NULL);
- }
-
- if ((u32) memblock.memory.regions[0].size < 0x400000) {
- printk(KERN_EMERG "Memory must be greater than 4MB\n");
- machine_restart(NULL);
- }
-
- if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
- printk(KERN_EMERG "Kernel size is greater than memory node\n");
- machine_restart(NULL);
- }
-
- /* Find main memory where the kernel is */
- memory_start = (u32) memblock.memory.regions[0].base;
- lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
-
- if (lowmem_size > CONFIG_LOWMEM_SIZE) {
- lowmem_size = CONFIG_LOWMEM_SIZE;
-#ifndef CONFIG_HIGHMEM
- memory_size = lowmem_size;
-#endif
- }
-
- mm_cmdline_setup(); /* FIXME parse args from command line - not used */
-
- /*
- * Map out the kernel text/data/bss from the available physical
- * memory.
- */
- kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
- /* kernel size */
- ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
- memblock_reserve(kstart, ksize);
-
-#if defined(CONFIG_BLK_DEV_INITRD)
- /* Remove the init RAM disk from the available memory. */
-/* if (initrd_start) {
- mem_pieces_remove(&phys_avail, __pa(initrd_start),
- initrd_end - initrd_start, 1);
- }*/
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- /* Initialize the MMU hardware */
- mmu_init_hw();
-
- /* Map in all of RAM starting at CONFIG_KERNEL_START */
- mapin_ram();
-
- /* Extend vmalloc and ioremap area as big as possible */
-#ifdef CONFIG_HIGHMEM
- ioremap_base = ioremap_bot = PKMAP_BASE;
-#else
- ioremap_base = ioremap_bot = FIXADDR_START;
-#endif
-
- /* Initialize the context management stuff */
- mmu_context_init();
-
- /* Shortly after that, the entire linear mapping will be available */
- /* This will also cause that unflatten device tree will be allocated
- * inside 768MB limit */
- memblock_set_current_limit(memory_start + lowmem_size - 1);
-}
-
-/* This is only called until mem_init is done. */
-void __init *early_get_page(void)
-{
- void *p;
- if (init_bootmem_done) {
- p = alloc_bootmem_pages(PAGE_SIZE);
- } else {
- /*
- * Mem start + kernel_tlb -> here is limit
- * because of mem mapping from head.S
- */
- p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
- memory_start + kernel_tlb));
- }
- return p;
-}
-
-#endif /* CONFIG_MMU */
-
-void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- if (mem_init_done)
- return kmalloc(size, mask);
- else
- return alloc_bootmem(size);
-}
-
-void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- void *p;
-
- if (mem_init_done)
- p = kzalloc(size, mask);
- else {
- p = alloc_bootmem(size);
- if (p)
- memset(p, 0, size);
- }
- return p;
-}
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/mmu_context.c b/ANDROID_3.4.5/arch/microblaze/mm/mmu_context.c
deleted file mode 100644
index 26ff82f4..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/mmu_context.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file contains the routines for handling the MMU.
- *
- * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
- *
- * Derived from arch/ppc/mm/4xx_mmu.c:
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-
-mm_context_t next_mmu_context;
-unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-atomic_t nr_free_contexts;
-struct mm_struct *context_mm[LAST_CONTEXT+1];
-
-/*
- * Initialize the context management stuff.
- */
-void __init mmu_context_init(void)
-{
- /*
- * The use of context zero is reserved for the kernel.
- * This code assumes FIRST_CONTEXT < 32.
- */
- context_map[0] = (1 << FIRST_CONTEXT) - 1;
- next_mmu_context = FIRST_CONTEXT;
- atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
-}
-
-/*
- * Steal a context from a task that has one at the moment.
- *
- * This isn't an LRU system, it just frees up each context in
- * turn (sort-of pseudo-random replacement :). This would be the
- * place to implement an LRU scheme if anyone were motivated to do it.
- */
-void steal_context(void)
-{
- struct mm_struct *mm;
-
- /* free up context `next_mmu_context' */
- /* if we shouldn't free context 0, don't... */
- if (next_mmu_context < FIRST_CONTEXT)
- next_mmu_context = FIRST_CONTEXT;
- mm = context_mm[next_mmu_context];
- flush_tlb_mm(mm);
- destroy_context(mm);
-}
diff --git a/ANDROID_3.4.5/arch/microblaze/mm/pgtable.c b/ANDROID_3.4.5/arch/microblaze/mm/pgtable.c
deleted file mode 100644
index d1c06d07..00000000
--- a/ANDROID_3.4.5/arch/microblaze/mm/pgtable.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * This file contains the routines setting up the linux page tables.
- *
- * Copyright (C) 2008 Michal Simek
- * Copyright (C) 2008 PetaLogix
- *
- * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
- *
- * Derived from arch/ppc/mm/pgtable.c:
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <linux/io.h>
-#include <asm/mmu.h>
-#include <asm/sections.h>
-#include <asm/fixmap.h>
-
-#define flush_HPTE(X, va, pg) _tlbie(va)
-
-unsigned long ioremap_base;
-unsigned long ioremap_bot;
-EXPORT_SYMBOL(ioremap_bot);
-
-#ifndef CONFIG_SMP
-struct pgtable_cache_struct quicklists;
-#endif
-
-static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
- unsigned long flags)
-{
- unsigned long v, i;
- phys_addr_t p;
- int err;
-
- /*
- * Choose an address to map it to.
- * Once the vmalloc system is running, we use it.
- * Before then, we use space going down from ioremap_base
- * (ioremap_bot records where we're up to).
- */
- p = addr & PAGE_MASK;
- size = PAGE_ALIGN(addr + size) - p;
-
- /*
- * Don't allow anybody to remap normal RAM that we're using.
- * mem_init() sets high_memory so only do the check after that.
- *
- * However, allow remap of rootfs: TBD
- */
- if (mem_init_done &&
- p >= memory_start && p < virt_to_phys(high_memory) &&
- !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
- p < virt_to_phys((unsigned long)__bss_stop))) {
- printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
- " is RAM lr %pf\n", (unsigned long)p,
- __builtin_return_address(0));
- return NULL;
- }
-
- if (size == 0)
- return NULL;
-
- /*
- * Is it already mapped? If the whole area is mapped then we're
- * done, otherwise remap it since we want to keep the virt addrs for
- * each request contiguous.
- *
- * We make the assumption here that if the bottom and top
- * of the range we want are mapped then it's mapped to the
- * same virt address (and this is contiguous).
- * -- Cort
- */
-
- if (mem_init_done) {
- struct vm_struct *area;
- area = get_vm_area(size, VM_IOREMAP);
- if (area == NULL)
- return NULL;
- v = (unsigned long) area->addr;
- } else {
- v = (ioremap_bot -= size);
- }
-
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= _PAGE_KERNEL;
- if (flags & _PAGE_NO_CACHE)
- flags |= _PAGE_GUARDED;
-
- err = 0;
- for (i = 0; i < size && err == 0; i += PAGE_SIZE)
- err = map_page(v + i, p + i, flags);
- if (err) {
- if (mem_init_done)
- vfree((void *)v);
- return NULL;
- }
-
- return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
-}
-
-void __iomem *ioremap(phys_addr_t addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-EXPORT_SYMBOL(ioremap);
-
-void iounmap(void *addr)
-{
- if (addr > high_memory && (unsigned long) addr < ioremap_bot)
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
-}
-EXPORT_SYMBOL(iounmap);
-
-
-int map_page(unsigned long va, phys_addr_t pa, int flags)
-{
- pmd_t *pd;
- pte_t *pg;
- int err = -ENOMEM;
- /* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pgd_offset_k(va), va);
- /* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
- /* pg = pte_alloc_kernel(&init_mm, pd, va); */
-
- if (pg != NULL) {
- err = 0;
- set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
- __pgprot(flags)));
- if (unlikely(mem_init_done))
- flush_HPTE(0, va, pmd_val(*pd));
- /* flush_HPTE(0, va, pg); */
- }
- return err;
-}
-
-/*
- * Map in all of physical memory starting at CONFIG_KERNEL_START.
- */
-void __init mapin_ram(void)
-{
- unsigned long v, p, s, f;
-
- v = CONFIG_KERNEL_START;
- p = memory_start;
- for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
- f = _PAGE_PRESENT | _PAGE_ACCESSED |
- _PAGE_SHARED | _PAGE_HWEXEC;
- if ((char *) v < _stext || (char *) v >= _etext)
- f |= _PAGE_WRENABLE;
- else
- /* On the MicroBlaze, no user access
- forces R/W kernel access */
- f |= _PAGE_USER;
- map_page(v, p, f);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- }
-}
-
-/* is x a power of 2? */
-#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
-
-/* Scan the real Linux page tables and return a PTE pointer for
- * a virtual address in a context.
- * Returns true (1) if PTE was found, zero otherwise. The pointer to
- * the PTE pointer is unmodified if PTE is not found.
- */
-static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int retval = 0;
-
- pgd = pgd_offset(mm, addr & PAGE_MASK);
- if (pgd) {
- pmd = pmd_offset(pgd, addr & PAGE_MASK);
- if (pmd_present(*pmd)) {
- pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
- if (pte) {
- retval = 1;
- *ptep = pte;
- }
- }
- }
- return retval;
-}
-
-/* Find physical address for this virtual address. Normally used by
- * I/O functions, but anyone can call it.
- */
-unsigned long iopa(unsigned long addr)
-{
- unsigned long pa;
-
- pte_t *pte;
- struct mm_struct *mm;
-
- /* Allow mapping of user addresses (within the thread)
- * for DMA if necessary.
- */
- if (addr < TASK_SIZE)
- mm = current->mm;
- else
- mm = &init_mm;
-
- pa = 0;
- if (get_pteptr(mm, addr, &pte))
- pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
-
- return pa;
-}
-
-__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- pte_t *pte;
- if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL |
- __GFP_REPEAT | __GFP_ZERO);
- } else {
- pte = (pte_t *)early_get_page();
- if (pte)
- clear_page(pte);
- }
- return pte;
-}
-
-void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses)
- BUG();
-
- map_page(address, phys, pgprot_val(flags));
-}