diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /arch/xtensa/mm | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r-- | arch/xtensa/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/xtensa/mm/cache.c | 255 | ||||
-rw-r--r-- | arch/xtensa/mm/fault.c | 236 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 243 | ||||
-rw-r--r-- | arch/xtensa/mm/misc.S | 447 | ||||
-rw-r--r-- | arch/xtensa/mm/mmu.c | 68 | ||||
-rw-r--r-- | arch/xtensa/mm/tlb.c | 143 |
7 files changed, 1398 insertions, 0 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile new file mode 100644 index 00000000..f0b646d2 --- /dev/null +++ b/arch/xtensa/mm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/Xtensa-specific parts of the memory manager. +# + +obj-y := init.o cache.o misc.o +obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c new file mode 100644 index 00000000..85df4655 --- /dev/null +++ b/arch/xtensa/mm/cache.c @@ -0,0 +1,255 @@ +/* + * arch/xtensa/mm/cache.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001-2006 Tensilica Inc. + * + * Chris Zankel <chris@zankel.net> + * Joe Taylor + * Marc Gauthier + * + */ + +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/bootmem.h> +#include <linux/swap.h> +#include <linux/pagemap.h> + +#include <asm/bootparam.h> +#include <asm/mmu_context.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> + +//#define printd(x...) printk(x) +#define printd(x...) do { } while(0) + +/* + * Note: + * The kernel provides one architecture bit PG_arch_1 in the page flags that + * can be used for cache coherency. + * + * I$-D$ coherency. + * + * The Xtensa architecture doesn't keep the instruction cache coherent with + * the data cache. We use the architecture bit to indicate if the caches + * are coherent. The kernel clears this bit whenever a page is added to the + * page cache. At that time, the caches might not be in sync. We, therefore, + * define this flag as 'clean' if set. + * + * D-cache aliasing. + * + * With cache aliasing, we have to always flush the cache when pages are + * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty + * page. + * + * + * + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + +/* + * Any time the kernel writes to a user page cache page, or it is about to + * read from a page cache page this routine is called. + * + */ + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + + /* + * If we have a mapping but the page is not mapped to user-space + * yet, we simply mark this page dirty and defer flushing the + * caches until update_mmu(). + */ + + if (mapping && !mapping_mapped(mapping)) { + if (!test_bit(PG_arch_1, &page->flags)) + set_bit(PG_arch_1, &page->flags); + return; + + } else { + + unsigned long phys = page_to_phys(page); + unsigned long temp = page->index << PAGE_SHIFT; + unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys)); + unsigned long virt; + + /* + * Flush the page in kernel space and user space. + * Note that we can omit that step if aliasing is not + * an issue, but we do have to synchronize I$ and D$ + * if we have a mapping. + */ + + if (!alias && !mapping) + return; + + __flush_invalidate_dcache_page((long)page_address(page)); + + virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); + + if (alias) + __flush_invalidate_dcache_page_alias(virt, phys); + + if (mapping) + __invalidate_icache_page_alias(virt, phys); + } + + /* There shouldn't be an entry in the cache for this page anymore. */ +} + + +/* + * For now, flush the whole cache. FIXME?? + */ + +void flush_cache_range(struct vm_area_struct* vma, + unsigned long start, unsigned long end) +{ + __flush_invalidate_dcache_all(); + __invalidate_icache_all(); +} + +/* + * Remove any entry in the cache for this page. + * + * Note that this function is only called for user pages, so use the + * alias versions of the cache flush functions. + */ + +void flush_cache_page(struct vm_area_struct* vma, unsigned long address, + unsigned long pfn) +{ + /* Note that we have to use the 'alias' address to avoid multi-hit */ + + unsigned long phys = page_to_phys(pfn_to_page(pfn)); + unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK); + + __flush_invalidate_dcache_page_alias(virt, phys); + __invalidate_icache_page_alias(virt, phys); +} + +#endif + +void +update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) +{ + unsigned long pfn = pte_pfn(*ptep); + struct page *page; + + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + + /* Invalidate old entry in TLBs */ + + invalidate_itlb_mapping(addr); + invalidate_dtlb_mapping(addr); + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + + if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { + + unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); + unsigned long paddr = (unsigned long) page_address(page); + unsigned long phys = page_to_phys(page); + + __flush_invalidate_dcache_page(paddr); + + __flush_invalidate_dcache_page_alias(vaddr, phys); + __invalidate_icache_page_alias(vaddr, phys); + + clear_bit(PG_arch_1, &page->flags); + } +#else + if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) + && (vma->vm_flags & VM_EXEC) != 0) { + unsigned long paddr = (unsigned long) page_address(page); + __flush_dcache_page(paddr); + __invalidate_icache_page(paddr); + set_bit(PG_arch_1, &page->flags); + } +#endif +} + +/* + * access_process_vm() has called get_user_pages(), which has done a + * flush_dcache_page() on the page. + */ + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + unsigned long phys = page_to_phys(page); + unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); + + /* Flush and invalidate user page if aliased. */ + + if (alias) { + unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(temp, phys); + } + + /* Copy data */ + + memcpy(dst, src, len); + + /* + * Flush and invalidate kernel page if aliased and synchronize + * data and instruction caches for executable pages. + */ + + if (alias) { + unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + + __flush_invalidate_dcache_range((unsigned long) dst, len); + if ((vma->vm_flags & VM_EXEC) != 0) { + __invalidate_icache_page_alias(temp, phys); + } + + } else if ((vma->vm_flags & VM_EXEC) != 0) { + __flush_dcache_range((unsigned long)dst,len); + __invalidate_icache_range((unsigned long) dst, len); + } +} + +extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + unsigned long phys = page_to_phys(page); + unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); + + /* + * Flush user page if aliased. + * (Note: a simply flush would be sufficient) + */ + + if (alias) { + unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(temp, phys); + } + + memcpy(dst, src, len); +} + +#endif diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c new file mode 100644 index 00000000..b17885a0 --- /dev/null +++ b/arch/xtensa/mm/fault.c @@ -0,0 +1,236 @@ +// TODO VM_EXEC flag work-around, cache aliasing +/* + * arch/xtensa/mm/fault.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + * + * Chris Zankel <chris@zankel.net> + * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> + */ + +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/cacheflush.h> +#include <asm/hardirq.h> +#include <asm/uaccess.h> +#include <asm/pgalloc.h> + +unsigned long asid_cache = ASID_USER_FIRST; +void bad_page_fault(struct pt_regs*, unsigned long, int); + +#undef DEBUG_PAGE_FAULT + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * Note: does not handle Miss and MultiHit. + */ + +void do_page_fault(struct pt_regs *regs) +{ + struct vm_area_struct * vma; + struct mm_struct *mm = current->mm; + unsigned int exccause = regs->exccause; + unsigned int address = regs->excvaddr; + siginfo_t info; + + int is_write, is_exec; + int fault; + + info.si_code = SEGV_MAPERR; + + /* We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + */ + if (address >= TASK_SIZE && !user_mode(regs)) + goto vmalloc_fault; + + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) { + bad_page_fault(regs, address, SIGSEGV); + return; + } + + is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; + is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE || + exccause == EXCCAUSE_ITLB_MISS || + exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; + +#ifdef DEBUG_PAGE_FAULT + printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, + address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); +#endif + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + + /* Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ + +good_area: + info.si_code = SEGV_ACCERR; + + if (is_write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else if (is_exec) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } else /* Allow read even from write-only pages. */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + + /* If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + + up_read(&mm->mmap_sem); + return; + + /* Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + if (user_mode(regs)) { + current->thread.bad_vaddr = address; + current->thread.error_code = is_write; + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* info.si_code has been set above */ + info.si_addr = (void *) address; + force_sig_info(SIGSEGV, &info, current); + return; + } + bad_page_fault(regs, address, SIGSEGV); + return; + + + /* We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + up_read(&mm->mmap_sem); + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGKILL); + else + pagefault_out_of_memory(); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + current->thread.bad_vaddr = address; + info.si_code = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *) address; + force_sig_info(SIGBUS, &info, current); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGBUS); + +vmalloc_fault: + { + /* Synchronize this task's top level page-table + * with the 'reference' page table. + */ + struct mm_struct *act_mm = current->active_mm; + int index = pgd_index(address); + pgd_t *pgd, *pgd_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + if (act_mm == NULL) + goto bad_page_fault; + + pgd = act_mm->pgd + index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + goto bad_page_fault; + + pgd_val(*pgd) = pgd_val(*pgd_k); + + pmd = pmd_offset(pgd, address); + pmd_k = pmd_offset(pgd_k, address); + if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_page_fault; + + pmd_val(*pmd) = pmd_val(*pmd_k); + pte_k = pte_offset_kernel(pmd_k, address); + + if (!pte_present(*pte_k)) + goto bad_page_fault; + return; + } +bad_page_fault: + bad_page_fault(regs, address, SIGKILL); + return; +} + + +void +bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +{ + extern void die(const char*, struct pt_regs*, long); + const struct exception_table_entry *entry; + + /* Are we prepared to handle this kernel fault? */ + if ((entry = search_exception_tables(regs->pc)) != NULL) { +#ifdef DEBUG_PAGE_FAULT + printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", + current->comm, regs->pc, entry->fixup); +#endif + current->thread.bad_uaddr = address; + regs->pc = entry->fixup; + return; + } + + /* Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + printk(KERN_ALERT "Unable to handle kernel paging request at virtual " + "address %08lx\n pc = %08lx, ra = %08lx\n", + address, regs->pc, regs->areg[0]); + die("Oops", regs, sig); + do_exit(sig); +} + diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c new file mode 100644 index 00000000..ba150e5d --- /dev/null +++ b/arch/xtensa/mm/init.c @@ -0,0 +1,243 @@ +/* + * arch/xtensa/mm/init.c + * + * Derived from MIPS, PPC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + * + * Chris Zankel <chris@zankel.net> + * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> + * Marc Gauthier + * Kevin Chea + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/bootmem.h> +#include <linux/gfp.h> +#include <linux/swap.h> +#include <linux/mman.h> +#include <linux/nodemask.h> +#include <linux/mm.h> + +#include <asm/bootparam.h> +#include <asm/page.h> + +/* References to section boundaries */ + +extern char _ftext, _etext, _fdata, _edata, _rodata_end; +extern char __init_begin, __init_end; + +/* + * mem_reserve(start, end, must_exist) + * + * Reserve some memory from the memory pool. + * + * Parameters: + * start Start of region, + * end End of region, + * must_exist Must exist in memory pool. + * + * Returns: + * 0 (memory area couldn't be mapped) + * -1 (success) + */ + +int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) +{ + int i; + + if (start == end) + return 0; + + start = start & PAGE_MASK; + end = PAGE_ALIGN(end); + + for (i = 0; i < sysmem.nr_banks; i++) + if (start < sysmem.bank[i].end + && end >= sysmem.bank[i].start) + break; + + if (i == sysmem.nr_banks) { + if (must_exist) + printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " + "not in any region!\n", start, end); + return 0; + } + + if (start > sysmem.bank[i].start) { + if (end < sysmem.bank[i].end) { + /* split entry */ + if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) + panic("meminfo overflow\n"); + sysmem.bank[sysmem.nr_banks].start = end; + sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; + sysmem.nr_banks++; + } + sysmem.bank[i].end = start; + } else { + if (end < sysmem.bank[i].end) + sysmem.bank[i].start = end; + else { + /* remove entry */ + sysmem.nr_banks--; + sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; + sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; + } + } + return -1; +} + + +/* + * Initialize the bootmem system and give it all the memory we have available. + */ + +void __init bootmem_init(void) +{ + unsigned long pfn; + unsigned long bootmap_start, bootmap_size; + int i; + + max_low_pfn = max_pfn = 0; + min_low_pfn = ~0; + + for (i=0; i < sysmem.nr_banks; i++) { + pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; + if (pfn < min_low_pfn) + min_low_pfn = pfn; + pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; + if (pfn > max_pfn) + max_pfn = pfn; + } + + if (min_low_pfn > max_pfn) + panic("No memory found!\n"); + + max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? + max_pfn : MAX_MEM_PFN >> PAGE_SHIFT; + + /* Find an area to use for the bootmem bitmap. */ + + bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn); + bootmap_size <<= PAGE_SHIFT; + bootmap_start = ~0; + + for (i=0; i<sysmem.nr_banks; i++) + if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) { + bootmap_start = sysmem.bank[i].start; + break; + } + + if (bootmap_start == ~0UL) + panic("Cannot find %ld bytes for bootmap\n", bootmap_size); + + /* Reserve the bootmem bitmap area */ + + mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); + bootmap_size = init_bootmem_node(NODE_DATA(0), + bootmap_start >> PAGE_SHIFT, + min_low_pfn, + max_low_pfn); + + /* Add all remaining memory pieces into the bootmem map */ + + for (i=0; i<sysmem.nr_banks; i++) + free_bootmem(sysmem.bank[i].start, + sysmem.bank[i].end - sysmem.bank[i].start); + +} + + +void __init zones_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES]; + int i; + + /* All pages are DMA-able, so we put them all in the DMA zone. */ + + zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; + for (i = 1; i < MAX_NR_ZONES; i++) + zones_size[i] = 0; + +#ifdef CONFIG_HIGHMEM + zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; +#endif + + free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); +} + +/* + * Initialize memory pages. + */ + +void __init mem_init(void) +{ + unsigned long codesize, reservedpages, datasize, initsize; + unsigned long highmemsize, tmp, ram; + + max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + highmemsize = 0; + +#ifdef CONFIG_HIGHMEM +#error HIGHGMEM not implemented in init.c +#endif + + totalram_pages += free_all_bootmem(); + + reservedpages = ram = 0; + for (tmp = 0; tmp < max_mapnr; tmp++) { + ram++; + if (PageReserved(mem_map+tmp)) + reservedpages++; + } + + codesize = (unsigned long) &_etext - (unsigned long) &_ftext; + datasize = (unsigned long) &_edata - (unsigned long) &_fdata; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " + "%ldk data, %ldk init %ldk highmem)\n", + nr_free_pages() << (PAGE_SHIFT-10), + ram << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + initsize >> 10, + highmemsize >> 10); +} + +void +free_reserved_mem(void *start, void *end) +{ + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page((unsigned long)start); + totalram_pages++; + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +extern int initrd_is_mapped; + +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (initrd_is_mapped) { + free_reserved_mem((void*)start, (void*)end); + printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); + } +} +#endif + +void free_initmem(void) +{ + free_reserved_mem(&__init_begin, &__init_end); + printk("Freeing unused kernel memory: %dk freed\n", + (&__init_end - &__init_begin) >> 10); +} diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S new file mode 100644 index 00000000..b048406d --- /dev/null +++ b/arch/xtensa/mm/misc.S @@ -0,0 +1,447 @@ +/* + * arch/xtensa/mm/misc.S + * + * Miscellaneous assembly functions. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2007 Tensilica Inc. + * + * Chris Zankel <chris@zankel.net> + */ + + +#include <linux/linkage.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/asmmacro.h> +#include <asm/cacheasm.h> +#include <asm/tlbflush.h> + + +/* + * clear_page and clear_user_page are the same for non-cache-aliased configs. + * + * clear_page (unsigned long page) + * a2 + */ + +ENTRY(clear_page) + entry a1, 16 + + movi a3, 0 + __loopi a2, a7, PAGE_SIZE, 32 + s32i a3, a2, 0 + s32i a3, a2, 4 + s32i a3, a2, 8 + s32i a3, a2, 12 + s32i a3, a2, 16 + s32i a3, a2, 20 + s32i a3, a2, 24 + s32i a3, a2, 28 + __endla a2, a7, 32 + + retw + +/* + * copy_page and copy_user_page are the same for non-cache-aliased configs. + * + * copy_page (void *to, void *from) + * a2 a3 + */ + +ENTRY(copy_page) + entry a1, 16 + + __loopi a2, a4, PAGE_SIZE, 32 + + l32i a8, a3, 0 + l32i a9, a3, 4 + s32i a8, a2, 0 + s32i a9, a2, 4 + + l32i a8, a3, 8 + l32i a9, a3, 12 + s32i a8, a2, 8 + s32i a9, a2, 12 + + l32i a8, a3, 16 + l32i a9, a3, 20 + s32i a8, a2, 16 + s32i a9, a2, 20 + + l32i a8, a3, 24 + l32i a9, a3, 28 + s32i a8, a2, 24 + s32i a9, a2, 28 + + addi a2, a2, 32 + addi a3, a3, 32 + + __endl a2, a4 + + retw + +#ifdef CONFIG_MMU +/* + * If we have to deal with cache aliasing, we use temporary memory mappings + * to ensure that the source and destination pages have the same color as + * the virtual address. We use way 0 and 1 for temporary mappings in such cases. + * + * The temporary DTLB entries shouldn't be flushed by interrupts, but are + * flushed by preemptive task switches. Special code in the + * fast_second_level_miss handler re-established the temporary mapping. + * It requires that the PPNs for the destination and source addresses are + * in a6, and a7, respectively. + */ + +/* TLB miss exceptions are treated special in the following region */ + +ENTRY(__tlbtemp_mapping_start) + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) + +/* + * clear_user_page (void *addr, unsigned long vaddr, struct page *page) + * a2 a3 a4 + */ + +ENTRY(clear_user_page) + entry a1, 32 + + /* Mark page dirty and determine alias. */ + + movi a7, (1 << PG_ARCH_1) + l32i a5, a4, PAGE_FLAGS + xor a6, a2, a3 + extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER + extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER + or a5, a5, a7 + slli a3, a3, PAGE_SHIFT + s32i a5, a4, PAGE_FLAGS + + /* Skip setting up a temporary DTLB if not aliased. */ + + beqz a6, 1f + + /* Invalidate kernel page. */ + + mov a10, a2 + call8 __invalidate_dcache_page + + /* Setup a temporary DTLB with the color of the VPN */ + + movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) + movi a5, TLBTEMP_BASE_1 # virt + add a6, a2, a4 # ppn + add a2, a5, a3 # add 'color' + + wdtlb a6, a2 + dsync + +1: movi a3, 0 + __loopi a2, a7, PAGE_SIZE, 32 + s32i a3, a2, 0 + s32i a3, a2, 4 + s32i a3, a2, 8 + s32i a3, a2, 12 + s32i a3, a2, 16 + s32i a3, a2, 20 + s32i a3, a2, 24 + s32i a3, a2, 28 + __endla a2, a7, 32 + + bnez a6, 1f + retw + + /* We need to invalidate the temporary idtlb entry, if any. */ + +1: addi a2, a2, -PAGE_SIZE + idtlb a2 + dsync + + retw + +/* + * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) + * a2 a3 a4 a5 + */ + +ENTRY(copy_user_page) + + entry a1, 32 + + /* Mark page dirty and determine alias for destination. */ + + movi a8, (1 << PG_ARCH_1) + l32i a9, a5, PAGE_FLAGS + xor a6, a2, a4 + xor a7, a3, a4 + extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER + extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER + extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER + or a9, a9, a8 + slli a4, a4, PAGE_SHIFT + s32i a9, a5, PAGE_FLAGS + movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) + + beqz a6, 1f + + /* Invalidate dcache */ + + mov a10, a2 + call8 __invalidate_dcache_page + + /* Setup a temporary DTLB with a matching color. */ + + movi a8, TLBTEMP_BASE_1 # base + add a6, a2, a5 # ppn + add a2, a8, a4 # add 'color' + + wdtlb a6, a2 + dsync + + /* Skip setting up a temporary DTLB for destination if not aliased. */ + +1: beqz a7, 1f + + /* Setup a temporary DTLB with a matching color. */ + + movi a8, TLBTEMP_BASE_2 # base + add a7, a3, a5 # ppn + add a3, a8, a4 + addi a8, a3, 1 # way1 + + wdtlb a7, a8 + dsync + +1: __loopi a2, a4, PAGE_SIZE, 32 + + l32i a8, a3, 0 + l32i a9, a3, 4 + s32i a8, a2, 0 + s32i a9, a2, 4 + + l32i a8, a3, 8 + l32i a9, a3, 12 + s32i a8, a2, 8 + s32i a9, a2, 12 + + l32i a8, a3, 16 + l32i a9, a3, 20 + s32i a8, a2, 16 + s32i a9, a2, 20 + + l32i a8, a3, 24 + l32i a9, a3, 28 + s32i a8, a2, 24 + s32i a9, a2, 28 + + addi a2, a2, 32 + addi a3, a3, 32 + + __endl a2, a4 + + /* We need to invalidate any temporary mapping! */ + + bnez a6, 1f + bnez a7, 2f + retw + +1: addi a2, a2, -PAGE_SIZE + idtlb a2 + dsync + bnez a7, 2f + retw + +2: addi a3, a3, -PAGE_SIZE+1 + idtlb a3 + dsync + + retw + +#endif + +#if (DCACHE_WAY_SIZE > PAGE_SIZE) + +/* + * void __flush_invalidate_dcache_page_alias (addr, phys) + * a2 a3 + */ + +ENTRY(__flush_invalidate_dcache_page_alias) + entry sp, 16 + + movi a7, 0 # required for exception handler + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) + mov a4, a2 + wdtlb a6, a2 + dsync + + ___flush_invalidate_dcache_page a2 a3 + + idtlb a4 + dsync + + retw + +#endif + +ENTRY(__tlbtemp_mapping_itlb) + +#if (ICACHE_WAY_SIZE > PAGE_SIZE) + +ENTRY(__invalidate_icache_page_alias) + entry sp, 16 + + addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) + mov a4, a2 + witlb a6, a2 + isync + + ___invalidate_icache_page a2 a3 + + iitlb a4 + isync + retw + +#endif + +/* End of special treatment in tlb miss exception */ + +ENTRY(__tlbtemp_mapping_end) +#endif /* CONFIG_MMU + +/* + * void __invalidate_icache_page(ulong start) + */ + +ENTRY(__invalidate_icache_page) + entry sp, 16 + + ___invalidate_icache_page a2 a3 + isync + + retw + +/* + * void __invalidate_dcache_page(ulong start) + */ + +ENTRY(__invalidate_dcache_page) + entry sp, 16 + + ___invalidate_dcache_page a2 a3 + dsync + + retw + +/* + * void __flush_invalidate_dcache_page(ulong start) + */ + +ENTRY(__flush_invalidate_dcache_page) + entry sp, 16 + + ___flush_invalidate_dcache_page a2 a3 + + dsync + retw + +/* + * void __flush_dcache_page(ulong start) + */ + +ENTRY(__flush_dcache_page) + entry sp, 16 + + ___flush_dcache_page a2 a3 + + dsync + retw + +/* + * void __invalidate_icache_range(ulong start, ulong size) + */ + +ENTRY(__invalidate_icache_range) + entry sp, 16 + + ___invalidate_icache_range a2 a3 a4 + isync + + retw + +/* + * void __flush_invalidate_dcache_range(ulong start, ulong size) + */ + +ENTRY(__flush_invalidate_dcache_range) + entry sp, 16 + + ___flush_invalidate_dcache_range a2 a3 a4 + dsync + + retw + +/* + * void _flush_dcache_range(ulong start, ulong size) + */ + +ENTRY(__flush_dcache_range) + entry sp, 16 + + ___flush_dcache_range a2 a3 a4 + dsync + + retw + +/* + * void _invalidate_dcache_range(ulong start, ulong size) + */ + +ENTRY(__invalidate_dcache_range) + entry sp, 16 + + ___invalidate_dcache_range a2 a3 a4 + + retw + +/* + * void _invalidate_icache_all(void) + */ + +ENTRY(__invalidate_icache_all) + entry sp, 16 + + ___invalidate_icache_all a2 a3 + isync + + retw + +/* + * void _flush_invalidate_dcache_all(void) + */ + +ENTRY(__flush_invalidate_dcache_all) + entry sp, 16 + + ___flush_invalidate_dcache_all a2 a3 + dsync + + retw + +/* + * void _invalidate_dcache_all(void) + */ + +ENTRY(__invalidate_dcache_all) + entry sp, 16 + + ___invalidate_dcache_all a2 a3 + dsync + + retw + diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c new file mode 100644 index 00000000..ca81654f --- /dev/null +++ b/arch/xtensa/mm/mmu.c @@ -0,0 +1,68 @@ +/* + * xtensa mmu stuff + * + * Extracted from init.c + */ +#include <linux/percpu.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/cache.h> + +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/mmu_context.h> +#include <asm/page.h> + +void __init paging_init(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); +} + +/* + * Flush the mmu and reset associated register to default values. + */ +void __init init_mmu(void) +{ + /* Writing zeros to the <t>TLBCFG special registers ensure + * that valid values exist in the register. For existing + * PGSZID<w> fields, zero selects the first element of the + * page-size array. For nonexistent PGSZID<w> fields, zero is + * the best value to write. Also, when changing PGSZID<w> + * fields, the corresponding TLB must be flushed. + */ + set_itlbcfg_register(0); + set_dtlbcfg_register(0); + flush_tlb_all(); + + /* Set rasid register to a known value. */ + + set_rasid_register(ASID_USER_FIRST); + + /* Set PTEVADDR special register to the start of the page + * table, which is in kernel mappable space (ie. not + * statically mapped). This register's value is undefined on + * reset. + */ + set_ptevaddr_register(PGTABLE_START); +} + +struct kmem_cache *pgtable_cache __read_mostly; + +static void pgd_ctor(void *addr) +{ + pte_t *ptep = (pte_t *)addr; + int i; + + for (i = 0; i < 1024; i++, ptep++) + pte_clear(NULL, 0, ptep); + +} + +void __init pgtable_cache_init(void) +{ + pgtable_cache = kmem_cache_create("pgd", + PAGE_SIZE, PAGE_SIZE, + SLAB_HWCACHE_ALIGN, + pgd_ctor); +} diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c new file mode 100644 index 00000000..e2700b21 --- /dev/null +++ b/arch/xtensa/mm/tlb.c @@ -0,0 +1,143 @@ +/* + * arch/xtensa/mm/tlb.c + * + * Logic that manipulates the Xtensa MMU. Derived from MIPS. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2003 Tensilica Inc. + * + * Joe Taylor + * Chris Zankel <chris@zankel.net> + * Marc Gauthier + */ + +#include <linux/mm.h> +#include <asm/processor.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> + + +static inline void __flush_itlb_all (void) +{ + int w, i; + + for (w = 0; w < ITLB_ARF_WAYS; w++) { + for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { + int e = w + (i << PAGE_SHIFT); + invalidate_itlb_entry_no_isync(e); + } + } + asm volatile ("isync\n"); +} + +static inline void __flush_dtlb_all (void) +{ + int w, i; + + for (w = 0; w < DTLB_ARF_WAYS; w++) { + for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { + int e = w + (i << PAGE_SHIFT); + invalidate_dtlb_entry_no_isync(e); + } + } + asm volatile ("isync\n"); +} + + +void flush_tlb_all (void) +{ + __flush_itlb_all(); + __flush_dtlb_all(); +} + +/* If mm is current, we simply assign the current task a new ASID, thus, + * invalidating all previous tlb entries. If mm is someone else's user mapping, + * wie invalidate the context, thus, when that user mapping is swapped in, + * a new context will be assigned to it. + */ + +void flush_tlb_mm(struct mm_struct *mm) +{ + if (mm == current->active_mm) { + int flags; + local_save_flags(flags); + __get_new_mmu_context(mm); + __load_mmu_context(mm); + local_irq_restore(flags); + } + else + mm->context = 0; +} + +#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) +#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) +#if _ITLB_ENTRIES > _DTLB_ENTRIES +# define _TLB_ENTRIES _ITLB_ENTRIES +#else +# define _TLB_ENTRIES _DTLB_ENTRIES +#endif + +void flush_tlb_range (struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long flags; + + if (mm->context == NO_CONTEXT) + return; + +#if 0 + printk("[tlbrange<%02lx,%08lx,%08lx>]\n", + (unsigned long)mm->context, start, end); +#endif + local_save_flags(flags); + + if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { + int oldpid = get_rasid_register(); + set_rasid_register (ASID_INSERT(mm->context)); + start &= PAGE_MASK; + if (vma->vm_flags & VM_EXEC) + while(start < end) { + invalidate_itlb_mapping(start); + invalidate_dtlb_mapping(start); + start += PAGE_SIZE; + } + else + while(start < end) { + invalidate_dtlb_mapping(start); + start += PAGE_SIZE; + } + + set_rasid_register(oldpid); + } else { + flush_tlb_mm(mm); + } + local_irq_restore(flags); +} + +void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) +{ + struct mm_struct* mm = vma->vm_mm; + unsigned long flags; + int oldpid; + + if(mm->context == NO_CONTEXT) + return; + + local_save_flags(flags); + + oldpid = get_rasid_register(); + + if (vma->vm_flags & VM_EXEC) + invalidate_itlb_mapping(page); + invalidate_dtlb_mapping(page); + + set_rasid_register(oldpid); + + local_irq_restore(flags); +} + |