summaryrefslogtreecommitdiff
path: root/ANDROID_3.4.5/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'ANDROID_3.4.5/arch/s390/mm')
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/Makefile9
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/cmm.c497
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/extmem.c778
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/fault.c664
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/gup.c236
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/hugetlbpage.c130
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/init.c262
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/maccess.c168
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/mmap.c178
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/page-states.c114
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/pageattr.c62
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/pgtable.c871
-rw-r--r--ANDROID_3.4.5/arch/s390/mm/vmem.c388
13 files changed, 0 insertions, 4357 deletions
diff --git a/ANDROID_3.4.5/arch/s390/mm/Makefile b/ANDROID_3.4.5/arch/s390/mm/Makefile
deleted file mode 100644
index d98fe900..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the linux s390-specific parts of the memory manager.
-#
-
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o gup.o
-obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
diff --git a/ANDROID_3.4.5/arch/s390/mm/cmm.c b/ANDROID_3.4.5/arch/s390/mm/cmm.c
deleted file mode 100644
index 1f1dba9d..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/cmm.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Collaborative memory management interface.
- *
- * Copyright IBM Corp 2003,2010
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- *
- */
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/sysctl.h>
-#include <linux/ctype.h>
-#include <linux/swap.h>
-#include <linux/kthread.h>
-#include <linux/oom.h>
-#include <linux/suspend.h>
-#include <linux/uaccess.h>
-
-#include <asm/pgalloc.h>
-#include <asm/diag.h>
-
-#ifdef CONFIG_CMM_IUCV
-static char *cmm_default_sender = "VMRMSVM";
-#endif
-static char *sender;
-module_param(sender, charp, 0400);
-MODULE_PARM_DESC(sender,
- "Guest name that may send SMSG messages (default VMRMSVM)");
-
-#include "../../../drivers/s390/net/smsgiucv.h"
-
-#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
-
-struct cmm_page_array {
- struct cmm_page_array *next;
- unsigned long index;
- unsigned long pages[CMM_NR_PAGES];
-};
-
-static long cmm_pages;
-static long cmm_timed_pages;
-static volatile long cmm_pages_target;
-static volatile long cmm_timed_pages_target;
-static long cmm_timeout_pages;
-static long cmm_timeout_seconds;
-static int cmm_suspended;
-
-static struct cmm_page_array *cmm_page_list;
-static struct cmm_page_array *cmm_timed_page_list;
-static DEFINE_SPINLOCK(cmm_lock);
-
-static struct task_struct *cmm_thread_ptr;
-static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
-
-static void cmm_timer_fn(unsigned long);
-static void cmm_set_timer(void);
-
-static long cmm_alloc_pages(long nr, long *counter,
- struct cmm_page_array **list)
-{
- struct cmm_page_array *pa, *npa;
- unsigned long addr;
-
- while (nr) {
- addr = __get_free_page(GFP_NOIO);
- if (!addr)
- break;
- spin_lock(&cmm_lock);
- pa = *list;
- if (!pa || pa->index >= CMM_NR_PAGES) {
- /* Need a new page for the page list. */
- spin_unlock(&cmm_lock);
- npa = (struct cmm_page_array *)
- __get_free_page(GFP_NOIO);
- if (!npa) {
- free_page(addr);
- break;
- }
- spin_lock(&cmm_lock);
- pa = *list;
- if (!pa || pa->index >= CMM_NR_PAGES) {
- npa->next = pa;
- npa->index = 0;
- pa = npa;
- *list = pa;
- } else
- free_page((unsigned long) npa);
- }
- diag10_range(addr >> PAGE_SHIFT, 1);
- pa->pages[pa->index++] = addr;
- (*counter)++;
- spin_unlock(&cmm_lock);
- nr--;
- }
- return nr;
-}
-
-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
-{
- struct cmm_page_array *pa;
- unsigned long addr;
-
- spin_lock(&cmm_lock);
- pa = *list;
- while (nr) {
- if (!pa || pa->index <= 0)
- break;
- addr = pa->pages[--pa->index];
- if (pa->index == 0) {
- pa = pa->next;
- free_page((unsigned long) *list);
- *list = pa;
- }
- free_page(addr);
- (*counter)--;
- nr--;
- }
- spin_unlock(&cmm_lock);
- return nr;
-}
-
-static int cmm_oom_notify(struct notifier_block *self,
- unsigned long dummy, void *parm)
-{
- unsigned long *freed = parm;
- long nr = 256;
-
- nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
- if (nr > 0)
- nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
- cmm_pages_target = cmm_pages;
- cmm_timed_pages_target = cmm_timed_pages;
- *freed += 256 - nr;
- return NOTIFY_OK;
-}
-
-static struct notifier_block cmm_oom_nb = {
- .notifier_call = cmm_oom_notify,
-};
-
-static int cmm_thread(void *dummy)
-{
- int rc;
-
- while (1) {
- rc = wait_event_interruptible(cmm_thread_wait,
- (!cmm_suspended && (cmm_pages != cmm_pages_target ||
- cmm_timed_pages != cmm_timed_pages_target)) ||
- kthread_should_stop());
- if (kthread_should_stop() || rc == -ERESTARTSYS) {
- cmm_pages_target = cmm_pages;
- cmm_timed_pages_target = cmm_timed_pages;
- break;
- }
- if (cmm_pages_target > cmm_pages) {
- if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
- cmm_pages_target = cmm_pages;
- } else if (cmm_pages_target < cmm_pages) {
- cmm_free_pages(1, &cmm_pages, &cmm_page_list);
- }
- if (cmm_timed_pages_target > cmm_timed_pages) {
- if (cmm_alloc_pages(1, &cmm_timed_pages,
- &cmm_timed_page_list))
- cmm_timed_pages_target = cmm_timed_pages;
- } else if (cmm_timed_pages_target < cmm_timed_pages) {
- cmm_free_pages(1, &cmm_timed_pages,
- &cmm_timed_page_list);
- }
- if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
- cmm_set_timer();
- }
- return 0;
-}
-
-static void cmm_kick_thread(void)
-{
- wake_up(&cmm_thread_wait);
-}
-
-static void cmm_set_timer(void)
-{
- if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
- if (timer_pending(&cmm_timer))
- del_timer(&cmm_timer);
- return;
- }
- if (timer_pending(&cmm_timer)) {
- if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
- return;
- }
- cmm_timer.function = cmm_timer_fn;
- cmm_timer.data = 0;
- cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
- add_timer(&cmm_timer);
-}
-
-static void cmm_timer_fn(unsigned long ignored)
-{
- long nr;
-
- nr = cmm_timed_pages_target - cmm_timeout_pages;
- if (nr < 0)
- cmm_timed_pages_target = 0;
- else
- cmm_timed_pages_target = nr;
- cmm_kick_thread();
- cmm_set_timer();
-}
-
-static void cmm_set_pages(long nr)
-{
- cmm_pages_target = nr;
- cmm_kick_thread();
-}
-
-static long cmm_get_pages(void)
-{
- return cmm_pages;
-}
-
-static void cmm_add_timed_pages(long nr)
-{
- cmm_timed_pages_target += nr;
- cmm_kick_thread();
-}
-
-static long cmm_get_timed_pages(void)
-{
- return cmm_timed_pages;
-}
-
-static void cmm_set_timeout(long nr, long seconds)
-{
- cmm_timeout_pages = nr;
- cmm_timeout_seconds = seconds;
- cmm_set_timer();
-}
-
-static int cmm_skip_blanks(char *cp, char **endp)
-{
- char *str;
-
- for (str = cp; *str == ' ' || *str == '\t'; str++)
- ;
- *endp = str;
- return str != cp;
-}
-
-static struct ctl_table cmm_table[];
-
-static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- char buf[16], *p;
- long nr;
- int len;
-
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
-
- if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- cmm_skip_blanks(buf, &p);
- nr = simple_strtoul(p, &p, 0);
- if (ctl == &cmm_table[0])
- cmm_set_pages(nr);
- else
- cmm_add_timed_pages(nr);
- } else {
- if (ctl == &cmm_table[0])
- nr = cmm_get_pages();
- else
- nr = cmm_get_timed_pages();
- len = sprintf(buf, "%ld\n", nr);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- }
- *lenp = len;
- *ppos += len;
- return 0;
-}
-
-static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- char buf[64], *p;
- long nr, seconds;
- int len;
-
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
-
- if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- cmm_skip_blanks(buf, &p);
- nr = simple_strtoul(p, &p, 0);
- cmm_skip_blanks(p, &p);
- seconds = simple_strtoul(p, &p, 0);
- cmm_set_timeout(nr, seconds);
- } else {
- len = sprintf(buf, "%ld %ld\n",
- cmm_timeout_pages, cmm_timeout_seconds);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- }
- *lenp = len;
- *ppos += len;
- return 0;
-}
-
-static struct ctl_table cmm_table[] = {
- {
- .procname = "cmm_pages",
- .mode = 0644,
- .proc_handler = cmm_pages_handler,
- },
- {
- .procname = "cmm_timed_pages",
- .mode = 0644,
- .proc_handler = cmm_pages_handler,
- },
- {
- .procname = "cmm_timeout",
- .mode = 0644,
- .proc_handler = cmm_timeout_handler,
- },
- { }
-};
-
-static struct ctl_table cmm_dir_table[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = cmm_table,
- },
- { }
-};
-
-#ifdef CONFIG_CMM_IUCV
-#define SMSG_PREFIX "CMM"
-static void cmm_smsg_target(const char *from, char *msg)
-{
- long nr, seconds;
-
- if (strlen(sender) > 0 && strcmp(from, sender) != 0)
- return;
- if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
- return;
- if (strncmp(msg, "SHRINK", 6) == 0) {
- if (!cmm_skip_blanks(msg + 6, &msg))
- return;
- nr = simple_strtoul(msg, &msg, 0);
- cmm_skip_blanks(msg, &msg);
- if (*msg == '\0')
- cmm_set_pages(nr);
- } else if (strncmp(msg, "RELEASE", 7) == 0) {
- if (!cmm_skip_blanks(msg + 7, &msg))
- return;
- nr = simple_strtoul(msg, &msg, 0);
- cmm_skip_blanks(msg, &msg);
- if (*msg == '\0')
- cmm_add_timed_pages(nr);
- } else if (strncmp(msg, "REUSE", 5) == 0) {
- if (!cmm_skip_blanks(msg + 5, &msg))
- return;
- nr = simple_strtoul(msg, &msg, 0);
- if (!cmm_skip_blanks(msg, &msg))
- return;
- seconds = simple_strtoul(msg, &msg, 0);
- cmm_skip_blanks(msg, &msg);
- if (*msg == '\0')
- cmm_set_timeout(nr, seconds);
- }
-}
-#endif
-
-static struct ctl_table_header *cmm_sysctl_header;
-
-static int cmm_suspend(void)
-{
- cmm_suspended = 1;
- cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
- cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
- return 0;
-}
-
-static int cmm_resume(void)
-{
- cmm_suspended = 0;
- cmm_kick_thread();
- return 0;
-}
-
-static int cmm_power_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- switch (event) {
- case PM_POST_HIBERNATION:
- return cmm_resume();
- case PM_HIBERNATION_PREPARE:
- return cmm_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block cmm_power_notifier = {
- .notifier_call = cmm_power_event,
-};
-
-static int __init cmm_init(void)
-{
- int rc = -ENOMEM;
-
- cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
- if (!cmm_sysctl_header)
- goto out_sysctl;
-#ifdef CONFIG_CMM_IUCV
- /* convert sender to uppercase characters */
- if (sender) {
- int len = strlen(sender);
- while (len--)
- sender[len] = toupper(sender[len]);
- } else {
- sender = cmm_default_sender;
- }
-
- rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
- if (rc < 0)
- goto out_smsg;
-#endif
- rc = register_oom_notifier(&cmm_oom_nb);
- if (rc < 0)
- goto out_oom_notify;
- rc = register_pm_notifier(&cmm_power_notifier);
- if (rc)
- goto out_pm;
- cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
-
-out_kthread:
- unregister_pm_notifier(&cmm_power_notifier);
-out_pm:
- unregister_oom_notifier(&cmm_oom_nb);
-out_oom_notify:
-#ifdef CONFIG_CMM_IUCV
- smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
-out_smsg:
-#endif
- unregister_sysctl_table(cmm_sysctl_header);
-out_sysctl:
- del_timer_sync(&cmm_timer);
- return rc;
-}
-module_init(cmm_init);
-
-static void __exit cmm_exit(void)
-{
- unregister_sysctl_table(cmm_sysctl_header);
-#ifdef CONFIG_CMM_IUCV
- smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
-#endif
- unregister_pm_notifier(&cmm_power_notifier);
- unregister_oom_notifier(&cmm_oom_nb);
- kthread_stop(cmm_thread_ptr);
- del_timer_sync(&cmm_timer);
- cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
- cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
-}
-module_exit(cmm_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/ANDROID_3.4.5/arch/s390/mm/extmem.c b/ANDROID_3.4.5/arch/s390/mm/extmem.c
deleted file mode 100644
index 075ddada..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/extmem.c
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * File...........: arch/s390/mm/extmem.c
- * Author(s)......: Carsten Otte <cotte@de.ibm.com>
- * Rob M van der Heij <rvdheij@nl.ibm.com>
- * Steven Shultz <shultzss@us.ibm.com>
- * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation 2002-2004
- */
-
-#define KMSG_COMPONENT "extmem"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <linux/ctype.h>
-#include <linux/ioport.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/ebcdic.h>
-#include <asm/errno.h>
-#include <asm/extmem.h>
-#include <asm/cpcmd.h>
-#include <asm/setup.h>
-
-#define DCSS_LOADSHR 0x00
-#define DCSS_LOADNSR 0x04
-#define DCSS_PURGESEG 0x08
-#define DCSS_FINDSEG 0x0c
-#define DCSS_LOADNOLY 0x10
-#define DCSS_SEGEXT 0x18
-#define DCSS_LOADSHRX 0x20
-#define DCSS_LOADNSRX 0x24
-#define DCSS_FINDSEGX 0x2c
-#define DCSS_SEGEXTX 0x38
-#define DCSS_FINDSEGA 0x0c
-
-struct qrange {
- unsigned long start; /* last byte type */
- unsigned long end; /* last byte reserved */
-};
-
-struct qout64 {
- unsigned long segstart;
- unsigned long segend;
- int segcnt;
- int segrcnt;
- struct qrange range[6];
-};
-
-#ifdef CONFIG_64BIT
-struct qrange_old {
- unsigned int start; /* last byte type */
- unsigned int end; /* last byte reserved */
-};
-
-/* output area format for the Diag x'64' old subcode x'18' */
-struct qout64_old {
- int segstart;
- int segend;
- int segcnt;
- int segrcnt;
- struct qrange_old range[6];
-};
-#endif
-
-struct qin64 {
- char qopcode;
- char rsrv1[3];
- char qrcode;
- char rsrv2[3];
- char qname[8];
- unsigned int qoutptr;
- short int qoutlen;
-};
-
-struct dcss_segment {
- struct list_head list;
- char dcss_name[8];
- char res_name[15];
- unsigned long start_addr;
- unsigned long end;
- atomic_t ref_count;
- int do_nonshared;
- unsigned int vm_segtype;
- struct qrange range[6];
- int segcnt;
- struct resource *res;
-};
-
-static DEFINE_MUTEX(dcss_lock);
-static LIST_HEAD(dcss_list);
-static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
- "EW/EN-MIXED" };
-static int loadshr_scode, loadnsr_scode, findseg_scode;
-static int segext_scode, purgeseg_scode;
-static int scode_set;
-
-/* set correct Diag x'64' subcodes. */
-static int
-dcss_set_subcodes(void)
-{
-#ifdef CONFIG_64BIT
- char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
- unsigned long rx, ry;
- int rc;
-
- if (name == NULL)
- return -ENOMEM;
-
- rx = (unsigned long) name;
- ry = DCSS_FINDSEGX;
-
- strcpy(name, "dummy");
- asm volatile(
- " diag %0,%1,0x64\n"
- "0: ipm %2\n"
- " srl %2,28\n"
- " j 2f\n"
- "1: la %2,3\n"
- "2:\n"
- EX_TABLE(0b, 1b)
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-
- kfree(name);
- /* Diag x'64' new subcodes are supported, set to new subcodes */
- if (rc != 3) {
- loadshr_scode = DCSS_LOADSHRX;
- loadnsr_scode = DCSS_LOADNSRX;
- purgeseg_scode = DCSS_PURGESEG;
- findseg_scode = DCSS_FINDSEGX;
- segext_scode = DCSS_SEGEXTX;
- return 0;
- }
-#endif
- /* Diag x'64' new subcodes are not supported, set to old subcodes */
- loadshr_scode = DCSS_LOADNOLY;
- loadnsr_scode = DCSS_LOADNSR;
- purgeseg_scode = DCSS_PURGESEG;
- findseg_scode = DCSS_FINDSEG;
- segext_scode = DCSS_SEGEXT;
- return 0;
-}
-
-/*
- * Create the 8 bytes, ebcdic VM segment name from
- * an ascii name.
- */
-static void
-dcss_mkname(char *name, char *dcss_name)
-{
- int i;
-
- for (i = 0; i < 8; i++) {
- if (name[i] == '\0')
- break;
- dcss_name[i] = toupper(name[i]);
- };
- for (; i < 8; i++)
- dcss_name[i] = ' ';
- ASCEBC(dcss_name, 8);
-}
-
-
-/*
- * search all segments in dcss_list, and return the one
- * namend *name. If not found, return NULL.
- */
-static struct dcss_segment *
-segment_by_name (char *name)
-{
- char dcss_name[9];
- struct list_head *l;
- struct dcss_segment *tmp, *retval = NULL;
-
- BUG_ON(!mutex_is_locked(&dcss_lock));
- dcss_mkname (name, dcss_name);
- list_for_each (l, &dcss_list) {
- tmp = list_entry (l, struct dcss_segment, list);
- if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
- retval = tmp;
- break;
- }
- }
- return retval;
-}
-
-
-/*
- * Perform a function on a dcss segment.
- */
-static inline int
-dcss_diag(int *func, void *parameter,
- unsigned long *ret1, unsigned long *ret2)
-{
- unsigned long rx, ry;
- int rc;
-
- if (scode_set == 0) {
- rc = dcss_set_subcodes();
- if (rc < 0)
- return rc;
- scode_set = 1;
- }
- rx = (unsigned long) parameter;
- ry = (unsigned long) *func;
-
-#ifdef CONFIG_64BIT
- /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
- if (*func > DCSS_SEGEXT)
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
- /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
- else
- asm volatile(
- " sam31\n"
- " diag %0,%1,0x64\n"
- " sam64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#else
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#endif
- *ret1 = rx;
- *ret2 = ry;
- return rc;
-}
-
-static inline int
-dcss_diag_translate_rc (int vm_rc) {
- if (vm_rc == 44)
- return -ENOENT;
- return -EIO;
-}
-
-
-/* do a diag to get info about a segment.
- * fills start_address, end and vm_segtype fields
- */
-static int
-query_segment_type (struct dcss_segment *seg)
-{
- unsigned long dummy, vmrc;
- int diag_cc, rc, i;
- struct qout64 *qout;
- struct qin64 *qin;
-
- qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
- qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
- if ((qin == NULL) || (qout == NULL)) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- /* initialize diag input parameters */
- qin->qopcode = DCSS_FINDSEGA;
- qin->qoutptr = (unsigned long) qout;
- qin->qoutlen = sizeof(struct qout64);
- memcpy (qin->qname, seg->dcss_name, 8);
-
- diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
-
- if (diag_cc < 0) {
- rc = diag_cc;
- goto out_free;
- }
- if (diag_cc > 1) {
- pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
- rc = dcss_diag_translate_rc (vmrc);
- goto out_free;
- }
-
-#ifdef CONFIG_64BIT
- /* Only old format of output area of Diagnose x'64' is supported,
- copy data for the new format. */
- if (segext_scode == DCSS_SEGEXT) {
- struct qout64_old *qout_old;
- qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
- if (qout_old == NULL) {
- rc = -ENOMEM;
- goto out_free;
- }
- memcpy(qout_old, qout, sizeof(struct qout64_old));
- qout->segstart = (unsigned long) qout_old->segstart;
- qout->segend = (unsigned long) qout_old->segend;
- qout->segcnt = qout_old->segcnt;
- qout->segrcnt = qout_old->segrcnt;
-
- if (qout->segcnt > 6)
- qout->segrcnt = 6;
- for (i = 0; i < qout->segrcnt; i++) {
- qout->range[i].start =
- (unsigned long) qout_old->range[i].start;
- qout->range[i].end =
- (unsigned long) qout_old->range[i].end;
- }
- kfree(qout_old);
- }
-#endif
- if (qout->segcnt > 6) {
- rc = -EOPNOTSUPP;
- goto out_free;
- }
-
- if (qout->segcnt == 1) {
- seg->vm_segtype = qout->range[0].start & 0xff;
- } else {
- /* multi-part segment. only one type supported here:
- - all parts are contiguous
- - all parts are either EW or EN type
- - maximum 6 parts allowed */
- unsigned long start = qout->segstart >> PAGE_SHIFT;
- for (i=0; i<qout->segcnt; i++) {
- if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
- ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
- rc = -EOPNOTSUPP;
- goto out_free;
- }
- if (start != qout->range[i].start >> PAGE_SHIFT) {
- rc = -EOPNOTSUPP;
- goto out_free;
- }
- start = (qout->range[i].end >> PAGE_SHIFT) + 1;
- }
- seg->vm_segtype = SEG_TYPE_EWEN;
- }
-
- /* analyze diag output and update seg */
- seg->start_addr = qout->segstart;
- seg->end = qout->segend;
-
- memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
- seg->segcnt = qout->segcnt;
-
- rc = 0;
-
- out_free:
- kfree(qin);
- kfree(qout);
- return rc;
-}
-
-/*
- * get info about a segment
- * possible return values:
- * -ENOSYS : we are not running on VM
- * -EIO : could not perform query diagnose
- * -ENOENT : no such segment
- * -EOPNOTSUPP: multi-part segment cannot be used with linux
- * -ENOMEM : out of memory
- * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
- */
-int
-segment_type (char* name)
-{
- int rc;
- struct dcss_segment seg;
-
- if (!MACHINE_IS_VM)
- return -ENOSYS;
-
- dcss_mkname(name, seg.dcss_name);
- rc = query_segment_type (&seg);
- if (rc < 0)
- return rc;
- return seg.vm_segtype;
-}
-
-/*
- * check if segment collides with other segments that are currently loaded
- * returns 1 if this is the case, 0 if no collision was found
- */
-static int
-segment_overlaps_others (struct dcss_segment *seg)
-{
- struct list_head *l;
- struct dcss_segment *tmp;
-
- BUG_ON(!mutex_is_locked(&dcss_lock));
- list_for_each(l, &dcss_list) {
- tmp = list_entry(l, struct dcss_segment, list);
- if ((tmp->start_addr >> 20) > (seg->end >> 20))
- continue;
- if ((tmp->end >> 20) < (seg->start_addr >> 20))
- continue;
- if (seg == tmp)
- continue;
- return 1;
- }
- return 0;
-}
-
-/*
- * real segment loading function, called from segment_load
- */
-static int
-__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
-{
- unsigned long start_addr, end_addr, dummy;
- struct dcss_segment *seg;
- int rc, diag_cc;
-
- start_addr = end_addr = 0;
- seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
- if (seg == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- dcss_mkname (name, seg->dcss_name);
- rc = query_segment_type (seg);
- if (rc < 0)
- goto out_free;
-
- if (loadshr_scode == DCSS_LOADSHRX) {
- if (segment_overlaps_others(seg)) {
- rc = -EBUSY;
- goto out_free;
- }
- }
-
- rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
-
- if (rc)
- goto out_free;
-
- seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
- if (seg->res == NULL) {
- rc = -ENOMEM;
- goto out_shared;
- }
- seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- seg->res->start = seg->start_addr;
- seg->res->end = seg->end;
- memcpy(&seg->res_name, seg->dcss_name, 8);
- EBCASC(seg->res_name, 8);
- seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
- seg->res->name = seg->res_name;
- rc = seg->vm_segtype;
- if (rc == SEG_TYPE_SC ||
- ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
- seg->res->flags |= IORESOURCE_READONLY;
- if (request_resource(&iomem_resource, seg->res)) {
- rc = -EBUSY;
- kfree(seg->res);
- goto out_shared;
- }
-
- if (do_nonshared)
- diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
- &start_addr, &end_addr);
- else
- diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
- &start_addr, &end_addr);
- if (diag_cc < 0) {
- dcss_diag(&purgeseg_scode, seg->dcss_name,
- &dummy, &dummy);
- rc = diag_cc;
- goto out_resource;
- }
- if (diag_cc > 1) {
- pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
- end_addr);
- rc = dcss_diag_translate_rc(end_addr);
- dcss_diag(&purgeseg_scode, seg->dcss_name,
- &dummy, &dummy);
- goto out_resource;
- }
- seg->start_addr = start_addr;
- seg->end = end_addr;
- seg->do_nonshared = do_nonshared;
- atomic_set(&seg->ref_count, 1);
- list_add(&seg->list, &dcss_list);
- *addr = seg->start_addr;
- *end = seg->end;
- if (do_nonshared)
- pr_info("DCSS %s of range %p to %p and type %s loaded as "
- "exclusive-writable\n", name, (void*) seg->start_addr,
- (void*) seg->end, segtype_string[seg->vm_segtype]);
- else {
- pr_info("DCSS %s of range %p to %p and type %s loaded in "
- "shared access mode\n", name, (void*) seg->start_addr,
- (void*) seg->end, segtype_string[seg->vm_segtype]);
- }
- goto out;
- out_resource:
- release_resource(seg->res);
- kfree(seg->res);
- out_shared:
- vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
- out_free:
- kfree(seg);
- out:
- return rc;
-}
-
-/*
- * this function loads a DCSS segment
- * name : name of the DCSS
- * do_nonshared : 0 indicates that the dcss should be shared with other linux images
- * 1 indicates that the dcss should be exclusive for this linux image
- * addr : will be filled with start address of the segment
- * end : will be filled with end address of the segment
- * return values:
- * -ENOSYS : we are not running on VM
- * -EIO : could not perform query or load diagnose
- * -ENOENT : no such segment
- * -EOPNOTSUPP: multi-part segment cannot be used with linux
- * -ENOSPC : segment cannot be used (overlaps with storage)
- * -EBUSY : segment can temporarily not be used (overlaps with dcss)
- * -ERANGE : segment cannot be used (exceeds kernel mapping range)
- * -EPERM : segment is currently loaded with incompatible permissions
- * -ENOMEM : out of memory
- * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
- */
-int
-segment_load (char *name, int do_nonshared, unsigned long *addr,
- unsigned long *end)
-{
- struct dcss_segment *seg;
- int rc;
-
- if (!MACHINE_IS_VM)
- return -ENOSYS;
-
- mutex_lock(&dcss_lock);
- seg = segment_by_name (name);
- if (seg == NULL)
- rc = __segment_load (name, do_nonshared, addr, end);
- else {
- if (do_nonshared == seg->do_nonshared) {
- atomic_inc(&seg->ref_count);
- *addr = seg->start_addr;
- *end = seg->end;
- rc = seg->vm_segtype;
- } else {
- *addr = *end = 0;
- rc = -EPERM;
- }
- }
- mutex_unlock(&dcss_lock);
- return rc;
-}
-
-/*
- * this function modifies the shared state of a DCSS segment. note that
- * name : name of the DCSS
- * do_nonshared : 0 indicates that the dcss should be shared with other linux images
- * 1 indicates that the dcss should be exclusive for this linux image
- * return values:
- * -EIO : could not perform load diagnose (segment gone!)
- * -ENOENT : no such segment (segment gone!)
- * -EAGAIN : segment is in use by other exploiters, try later
- * -EINVAL : no segment with the given name is currently loaded - name invalid
- * -EBUSY : segment can temporarily not be used (overlaps with dcss)
- * 0 : operation succeeded
- */
-int
-segment_modify_shared (char *name, int do_nonshared)
-{
- struct dcss_segment *seg;
- unsigned long start_addr, end_addr, dummy;
- int rc, diag_cc;
-
- start_addr = end_addr = 0;
- mutex_lock(&dcss_lock);
- seg = segment_by_name (name);
- if (seg == NULL) {
- rc = -EINVAL;
- goto out_unlock;
- }
- if (do_nonshared == seg->do_nonshared) {
- pr_info("DCSS %s is already in the requested access "
- "mode\n", name);
- rc = 0;
- goto out_unlock;
- }
- if (atomic_read (&seg->ref_count) != 1) {
- pr_warning("DCSS %s is in use and cannot be reloaded\n",
- name);
- rc = -EAGAIN;
- goto out_unlock;
- }
- release_resource(seg->res);
- if (do_nonshared)
- seg->res->flags &= ~IORESOURCE_READONLY;
- else
- if (seg->vm_segtype == SEG_TYPE_SR ||
- seg->vm_segtype == SEG_TYPE_ER)
- seg->res->flags |= IORESOURCE_READONLY;
-
- if (request_resource(&iomem_resource, seg->res)) {
- pr_warning("DCSS %s overlaps with used memory resources "
- "and cannot be reloaded\n", name);
- rc = -EBUSY;
- kfree(seg->res);
- goto out_del_mem;
- }
-
- dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
- if (do_nonshared)
- diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
- &start_addr, &end_addr);
- else
- diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
- &start_addr, &end_addr);
- if (diag_cc < 0) {
- rc = diag_cc;
- goto out_del_res;
- }
- if (diag_cc > 1) {
- pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
- end_addr);
- rc = dcss_diag_translate_rc(end_addr);
- goto out_del_res;
- }
- seg->start_addr = start_addr;
- seg->end = end_addr;
- seg->do_nonshared = do_nonshared;
- rc = 0;
- goto out_unlock;
- out_del_res:
- release_resource(seg->res);
- kfree(seg->res);
- out_del_mem:
- vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
- list_del(&seg->list);
- dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
- kfree(seg);
- out_unlock:
- mutex_unlock(&dcss_lock);
- return rc;
-}
-
-/*
- * Decrease the use count of a DCSS segment and remove
- * it from the address space if nobody is using it
- * any longer.
- */
-void
-segment_unload(char *name)
-{
- unsigned long dummy;
- struct dcss_segment *seg;
-
- if (!MACHINE_IS_VM)
- return;
-
- mutex_lock(&dcss_lock);
- seg = segment_by_name (name);
- if (seg == NULL) {
- pr_err("Unloading unknown DCSS %s failed\n", name);
- goto out_unlock;
- }
- if (atomic_dec_return(&seg->ref_count) != 0)
- goto out_unlock;
- release_resource(seg->res);
- kfree(seg->res);
- vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
- list_del(&seg->list);
- dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
- kfree(seg);
-out_unlock:
- mutex_unlock(&dcss_lock);
-}
-
-/*
- * save segment content permanently
- */
-void
-segment_save(char *name)
-{
- struct dcss_segment *seg;
- char cmd1[160];
- char cmd2[80];
- int i, response;
-
- if (!MACHINE_IS_VM)
- return;
-
- mutex_lock(&dcss_lock);
- seg = segment_by_name (name);
-
- if (seg == NULL) {
- pr_err("Saving unknown DCSS %s failed\n", name);
- goto out;
- }
-
- sprintf(cmd1, "DEFSEG %s", name);
- for (i=0; i<seg->segcnt; i++) {
- sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
- seg->range[i].start >> PAGE_SHIFT,
- seg->range[i].end >> PAGE_SHIFT,
- segtype_string[seg->range[i].start & 0xff]);
- }
- sprintf(cmd2, "SAVESEG %s", name);
- response = 0;
- cpcmd(cmd1, NULL, 0, &response);
- if (response) {
- pr_err("Saving a DCSS failed with DEFSEG response code "
- "%i\n", response);
- goto out;
- }
- cpcmd(cmd2, NULL, 0, &response);
- if (response) {
- pr_err("Saving a DCSS failed with SAVESEG response code "
- "%i\n", response);
- goto out;
- }
-out:
- mutex_unlock(&dcss_lock);
-}
-
-/*
- * print appropriate error message for segment_load()/segment_type()
- * return code
- */
-void segment_warning(int rc, char *seg_name)
-{
- switch (rc) {
- case -ENOENT:
- pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
- break;
- case -ENOSYS:
- pr_err("DCSS %s cannot be loaded or queried without "
- "z/VM\n", seg_name);
- break;
- case -EIO:
- pr_err("Loading or querying DCSS %s resulted in a "
- "hardware error\n", seg_name);
- break;
- case -EOPNOTSUPP:
- pr_err("DCSS %s has multiple page ranges and cannot be "
- "loaded or queried\n", seg_name);
- break;
- case -ENOSPC:
- pr_err("DCSS %s overlaps with used storage and cannot "
- "be loaded\n", seg_name);
- break;
- case -EBUSY:
- pr_err("%s needs used memory resources and cannot be "
- "loaded or queried\n", seg_name);
- break;
- case -EPERM:
- pr_err("DCSS %s is already loaded in a different access "
- "mode\n", seg_name);
- break;
- case -ENOMEM:
- pr_err("There is not enough memory to load or query "
- "DCSS %s\n", seg_name);
- break;
- case -ERANGE:
- pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
- "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
- break;
- default:
- break;
- }
-}
-
-EXPORT_SYMBOL(segment_load);
-EXPORT_SYMBOL(segment_unload);
-EXPORT_SYMBOL(segment_save);
-EXPORT_SYMBOL(segment_type);
-EXPORT_SYMBOL(segment_modify_shared);
-EXPORT_SYMBOL(segment_warning);
diff --git a/ANDROID_3.4.5/arch/s390/mm/fault.c b/ANDROID_3.4.5/arch/s390/mm/fault.c
deleted file mode 100644
index 4e668600..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/fault.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * arch/s390/mm/fault.c
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Hartmut Penner (hp@de.ibm.com)
- * Ulrich Weigand (uweigand@de.ibm.com)
- *
- * Derived from "arch/i386/mm/fault.c"
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/perf_event.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/smp.h>
-#include <linux/kdebug.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/kprobes.h>
-#include <linux/uaccess.h>
-#include <linux/hugetlb.h>
-#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/facility.h>
-#include "../kernel/entry.h"
-
-#ifndef CONFIG_64BIT
-#define __FAIL_ADDR_MASK 0x7ffff000
-#define __SUBCODE_MASK 0x0200
-#define __PF_RES_FIELD 0ULL
-#else /* CONFIG_64BIT */
-#define __FAIL_ADDR_MASK -4096L
-#define __SUBCODE_MASK 0x0600
-#define __PF_RES_FIELD 0x8000000000000000ULL
-#endif /* CONFIG_64BIT */
-
-#define VM_FAULT_BADCONTEXT 0x010000
-#define VM_FAULT_BADMAP 0x020000
-#define VM_FAULT_BADACCESS 0x040000
-
-static unsigned long store_indication;
-
-void fault_init(void)
-{
- if (test_facility(2) && test_facility(75))
- store_indication = 0xc00;
-}
-
-static inline int notify_page_fault(struct pt_regs *regs)
-{
- int ret = 0;
-
- /* kprobe_running() needs smp_processor_id() */
- if (kprobes_built_in() && !user_mode(regs)) {
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, 14))
- ret = 1;
- preempt_enable();
- }
- return ret;
-}
-
-
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out.
- */
-void bust_spinlocks(int yes)
-{
- if (yes) {
- oops_in_progress = 1;
- } else {
- int loglevel_save = console_loglevel;
- console_unblank();
- oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk will give klogd
- * a poke. Hold onto your hats...
- */
- console_loglevel = 15;
- printk(" ");
- console_loglevel = loglevel_save;
- }
-}
-
-/*
- * Returns the address space associated with the fault.
- * Returns 0 for kernel space and 1 for user space.
- */
-static inline int user_space_fault(unsigned long trans_exc_code)
-{
- /*
- * The lowest two bits of the translation exception
- * identification indicate which paging table was used.
- */
- trans_exc_code &= 3;
- if (trans_exc_code == 2)
- /* Access via secondary space, set_fs setting decides */
- return current->thread.mm_segment.ar4;
- if (user_mode == HOME_SPACE_MODE)
- /* User space if the access has been done via home space. */
- return trans_exc_code == 3;
- /*
- * If the user space is not the home space the kernel runs in home
- * space. Access via secondary space has already been covered,
- * access via primary space or access register is from user space
- * and access via home space is from the kernel.
- */
- return trans_exc_code != 3;
-}
-
-static inline void report_user_fault(struct pt_regs *regs, long signr)
-{
- if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
- return;
- if (!unhandled_signal(current, signr))
- return;
- if (!printk_ratelimit())
- return;
- printk(KERN_ALERT "User process fault: interruption code 0x%X ",
- regs->int_code);
- print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
- printk(KERN_CONT "\n");
- printk(KERN_ALERT "failing address: %lX\n",
- regs->int_parm_long & __FAIL_ADDR_MASK);
- show_regs(regs);
-}
-
-/*
- * Send SIGSEGV to task. This is an external routine
- * to keep the stack usage of do_page_fault small.
- */
-static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
-{
- struct siginfo si;
-
- report_user_fault(regs, SIGSEGV);
- si.si_signo = SIGSEGV;
- si.si_code = si_code;
- si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
- force_sig_info(SIGSEGV, &si, current);
-}
-
-static noinline void do_no_context(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
- unsigned long address;
-
- /* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
- if (fixup) {
- regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
- return;
- }
-
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- address = regs->int_parm_long & __FAIL_ADDR_MASK;
- if (!user_space_fault(regs->int_parm_long))
- printk(KERN_ALERT "Unable to handle kernel pointer dereference"
- " at virtual kernel address %p\n", (void *)address);
- else
- printk(KERN_ALERT "Unable to handle kernel paging request"
- " at virtual user address %p\n", (void *)address);
-
- die(regs, "Oops");
- do_exit(SIGKILL);
-}
-
-static noinline void do_low_address(struct pt_regs *regs)
-{
- /* Low-address protection hit in kernel mode means
- NULL pointer write access in kernel mode. */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- /* Low-address protection hit in user mode 'cannot happen'. */
- die (regs, "Low-address protection");
- do_exit(SIGKILL);
- }
-
- do_no_context(regs);
-}
-
-static noinline void do_sigbus(struct pt_regs *regs)
-{
- struct task_struct *tsk = current;
- struct siginfo si;
-
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_code = BUS_ADRERR;
- si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
- force_sig_info(SIGBUS, &si, tsk);
-}
-
-static noinline void do_fault_error(struct pt_regs *regs, int fault)
-{
- int si_code;
-
- switch (fault) {
- case VM_FAULT_BADACCESS:
- case VM_FAULT_BADMAP:
- /* Bad memory access. Check if it is kernel or user space. */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- /* User mode accesses just cause a SIGSEGV */
- si_code = (fault == VM_FAULT_BADMAP) ?
- SEGV_MAPERR : SEGV_ACCERR;
- do_sigsegv(regs, si_code);
- return;
- }
- case VM_FAULT_BADCONTEXT:
- do_no_context(regs);
- break;
- default: /* fault & VM_FAULT_ERROR */
- if (fault & VM_FAULT_OOM) {
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs);
- else
- pagefault_out_of_memory();
- } else if (fault & VM_FAULT_SIGBUS) {
- /* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs);
- else
- do_sigbus(regs);
- } else
- BUG();
- break;
- }
-}
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * interruption code (int_code):
- * 04 Protection -> Write-Protection (suprression)
- * 10 Segment translation -> Not present (nullification)
- * 11 Page translation -> Not present (nullification)
- * 3b Region third trans. -> Not present (nullification)
- */
-static inline int do_exception(struct pt_regs *regs, int access)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long trans_exc_code;
- unsigned long address;
- unsigned int flags;
- int fault;
-
- if (notify_page_fault(regs))
- return 0;
-
- tsk = current;
- mm = tsk->mm;
- trans_exc_code = regs->int_parm_long;
-
- /*
- * Verify that the fault happened in user space, that
- * we are not in an interrupt and that there is a
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto out;
-
- address = trans_exc_code & __FAIL_ADDR_MASK;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- flags = FAULT_FLAG_ALLOW_RETRY;
- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
- flags |= FAULT_FLAG_WRITE;
- down_read(&mm->mmap_sem);
-
-#ifdef CONFIG_PGSTE
- if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
- address = __gmap_fault(address,
- (struct gmap *) S390_lowcore.gmap);
- if (address == -EFAULT) {
- fault = VM_FAULT_BADMAP;
- goto out_up;
- }
- if (address == -ENOMEM) {
- fault = VM_FAULT_OOM;
- goto out_up;
- }
- }
-#endif
-
-retry:
- fault = VM_FAULT_BADMAP;
- vma = find_vma(mm, address);
- if (!vma)
- goto out_up;
-
- if (unlikely(vma->vm_start > address)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto out_up;
- if (expand_stack(vma, address))
- goto out_up;
- }
-
- /*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
- fault = VM_FAULT_BADACCESS;
- if (unlikely(!(vma->vm_flags & access)))
- goto out_up;
-
- if (is_vm_hugetlb_page(vma))
- address &= HPAGE_MASK;
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(mm, vma, address, flags);
- if (unlikely(fault & VM_FAULT_ERROR))
- goto out_up;
-
- /*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
- */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- down_read(&mm->mmap_sem);
- goto retry;
- }
- }
- /*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
- fault = 0;
-out_up:
- up_read(&mm->mmap_sem);
-out:
- return fault;
-}
-
-void __kprobes do_protection_exception(struct pt_regs *regs)
-{
- unsigned long trans_exc_code;
- int fault;
-
- trans_exc_code = regs->int_parm_long;
- /* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
- /*
- * Check for low-address protection. This needs to be treated
- * as a special case because the translation exception code
- * field is not guaranteed to contain valid data in this case.
- */
- if (unlikely(!(trans_exc_code & 4))) {
- do_low_address(regs);
- return;
- }
- fault = do_exception(regs, VM_WRITE);
- if (unlikely(fault))
- do_fault_error(regs, fault);
-}
-
-void __kprobes do_dat_exception(struct pt_regs *regs)
-{
- int access, fault;
-
- access = VM_READ | VM_EXEC | VM_WRITE;
- fault = do_exception(regs, access);
- if (unlikely(fault))
- do_fault_error(regs, fault);
-}
-
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long trans_exc_code;
-
- trans_exc_code = regs->int_parm_long;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- do_sigsegv(regs, SEGV_MAPERR);
- return;
- }
-
-no_context:
- do_no_context(regs);
-}
-#endif
-
-int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
-{
- struct pt_regs regs;
- int access, fault;
-
- regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
- if (!irqs_disabled())
- regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
- regs.psw.addr = (unsigned long) __builtin_return_address(0);
- regs.psw.addr |= PSW_ADDR_AMODE;
- regs.int_code = pgm_int_code;
- regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
- access = write ? VM_WRITE : VM_READ;
- fault = do_exception(&regs, access);
- if (unlikely(fault)) {
- if (fault & VM_FAULT_OOM)
- return -EFAULT;
- else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(&regs);
- }
- return fault ? -EFAULT : 0;
-}
-
-#ifdef CONFIG_PFAULT
-/*
- * 'pfault' pseudo page faults routines.
- */
-static int pfault_disable;
-
-static int __init nopfault(char *str)
-{
- pfault_disable = 1;
- return 1;
-}
-
-__setup("nopfault", nopfault);
-
-struct pfault_refbk {
- u16 refdiagc;
- u16 reffcode;
- u16 refdwlen;
- u16 refversn;
- u64 refgaddr;
- u64 refselmk;
- u64 refcmpmk;
- u64 reserved;
-} __attribute__ ((packed, aligned(8)));
-
-int pfault_init(void)
-{
- struct pfault_refbk refbk = {
- .refdiagc = 0x258,
- .reffcode = 0,
- .refdwlen = 5,
- .refversn = 2,
- .refgaddr = __LC_CURRENT_PID,
- .refselmk = 1ULL << 48,
- .refcmpmk = 1ULL << 48,
- .reserved = __PF_RES_FIELD };
- int rc;
-
- if (pfault_disable)
- return -1;
- asm volatile(
- " diag %1,%0,0x258\n"
- "0: j 2f\n"
- "1: la %0,8\n"
- "2:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
- return rc;
-}
-
-void pfault_fini(void)
-{
- struct pfault_refbk refbk = {
- .refdiagc = 0x258,
- .reffcode = 1,
- .refdwlen = 5,
- .refversn = 2,
- };
-
- if (pfault_disable)
- return;
- asm volatile(
- " diag %0,0,0x258\n"
- "0:\n"
- EX_TABLE(0b,0b)
- : : "a" (&refbk), "m" (refbk) : "cc");
-}
-
-static DEFINE_SPINLOCK(pfault_lock);
-static LIST_HEAD(pfault_list);
-
-static void pfault_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct task_struct *tsk;
- __u16 subcode;
- pid_t pid;
-
- /*
- * Get the external interruption subcode & pfault
- * initial/completion signal bit. VM stores this
- * in the 'cpu address' field associated with the
- * external interrupt.
- */
- subcode = ext_code.subcode;
- if ((subcode & 0xff00) != __SUBCODE_MASK)
- return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
- if (subcode & 0x0080) {
- /* Get the token (= pid of the affected task). */
- pid = sizeof(void *) == 4 ? param32 : param64;
- rcu_read_lock();
- tsk = find_task_by_pid_ns(pid, &init_pid_ns);
- if (tsk)
- get_task_struct(tsk);
- rcu_read_unlock();
- if (!tsk)
- return;
- } else {
- tsk = current;
- }
- spin_lock(&pfault_lock);
- if (subcode & 0x0080) {
- /* signal bit is set -> a page has been swapped in by VM */
- if (tsk->thread.pfault_wait == 1) {
- /* Initial interrupt was faster than the completion
- * interrupt. pfault_wait is valid. Set pfault_wait
- * back to zero and wake up the process. This can
- * safely be done because the task is still sleeping
- * and can't produce new pfaults. */
- tsk->thread.pfault_wait = 0;
- list_del(&tsk->thread.list);
- wake_up_process(tsk);
- put_task_struct(tsk);
- } else {
- /* Completion interrupt was faster than initial
- * interrupt. Set pfault_wait to -1 so the initial
- * interrupt doesn't put the task to sleep.
- * If the task is not running, ignore the completion
- * interrupt since it must be a leftover of a PFAULT
- * CANCEL operation which didn't remove all pending
- * completion interrupts. */
- if (tsk->state == TASK_RUNNING)
- tsk->thread.pfault_wait = -1;
- }
- put_task_struct(tsk);
- } else {
- /* signal bit not set -> a real page is missing. */
- if (tsk->thread.pfault_wait == 1) {
- /* Already on the list with a reference: put to sleep */
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- set_tsk_need_resched(tsk);
- } else if (tsk->thread.pfault_wait == -1) {
- /* Completion interrupt was faster than the initial
- * interrupt (pfault_wait == -1). Set pfault_wait
- * back to zero and exit. */
- tsk->thread.pfault_wait = 0;
- } else {
- /* Initial interrupt arrived before completion
- * interrupt. Let the task sleep.
- * An extra task reference is needed since a different
- * cpu may set the task state to TASK_RUNNING again
- * before the scheduler is reached. */
- get_task_struct(tsk);
- tsk->thread.pfault_wait = 1;
- list_add(&tsk->thread.list, &pfault_list);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- set_tsk_need_resched(tsk);
- }
- }
- spin_unlock(&pfault_lock);
-}
-
-static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- struct thread_struct *thread, *next;
- struct task_struct *tsk;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- spin_lock_irq(&pfault_lock);
- list_for_each_entry_safe(thread, next, &pfault_list, list) {
- thread->pfault_wait = 0;
- list_del(&thread->list);
- tsk = container_of(thread, struct task_struct, thread);
- wake_up_process(tsk);
- put_task_struct(tsk);
- }
- spin_unlock_irq(&pfault_lock);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static int __init pfault_irq_init(void)
-{
- int rc;
-
- rc = register_external_interrupt(0x2603, pfault_interrupt);
- if (rc)
- goto out_extint;
- rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
- if (rc)
- goto out_pfault;
- service_subclass_irq_register();
- hotcpu_notifier(pfault_cpu_notify, 0);
- return 0;
-
-out_pfault:
- unregister_external_interrupt(0x2603, pfault_interrupt);
-out_extint:
- pfault_disable = 1;
- return rc;
-}
-early_initcall(pfault_irq_init);
-
-#endif /* CONFIG_PFAULT */
diff --git a/ANDROID_3.4.5/arch/s390/mm/gup.c b/ANDROID_3.4.5/arch/s390/mm/gup.c
deleted file mode 100644
index 65cb06e2..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/gup.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Lockless get_user_pages_fast for s390
- *
- * Copyright IBM Corp. 2010
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long mask;
- pte_t *ptep, pte;
- struct page *page;
-
- mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
-
- ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
- do {
- pte = *ptep;
- barrier();
- if ((pte_val(pte) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- if (!page_cache_get_speculative(page))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(page);
- return 0;
- }
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long mask, result;
- struct page *head, *page, *tail;
- int refs;
-
- result = write ? 0 : _SEGMENT_ENTRY_RO;
- mask = result | _SEGMENT_ENTRY_INV;
- if ((pmd_val(pmd) & mask) != result)
- return 0;
- VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
-
- refs = 0;
- head = pmd_page(pmd);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- tail = page;
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- /*
- * Any tail page need their mapcount reference taken before we
- * return.
- */
- while (refs--) {
- if (PageTail(tail))
- get_huge_page_tail(tail);
- tail++;
- }
-
- return 1;
-}
-
-
-static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp, pmd;
-
- pmdp = (pmd_t *) pudp;
-#ifdef CONFIG_64BIT
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmdp = (pmd_t *) pud_deref(pud);
- pmdp += pmd_index(addr);
-#endif
- do {
- pmd = *pmdp;
- barrier();
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_huge(pmd))) {
- if (!gup_huge_pmd(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp, pud;
-
- pudp = (pud_t *) pgdp;
-#ifdef CONFIG_64BIT
- if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pudp = (pud_t *) pgd_deref(pgd);
- pudp += pud_index(addr);
-#endif
- do {
- pud = *pudp;
- barrier();
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp, pgd;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (end < start)
- goto slow_irqon;
-
- /*
- * local_irq_disable() doesn't prevent pagetable teardown, but does
- * prevent the pagetables from being freed on s390.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd = *pgdp;
- barrier();
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
diff --git a/ANDROID_3.4.5/arch/s390/mm/hugetlbpage.c b/ANDROID_3.4.5/arch/s390/mm/hugetlbpage.c
deleted file mode 100644
index 597bb2d2..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/hugetlbpage.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * IBM System z Huge TLB Page Support for Kernel.
- *
- * Copyright 2007 IBM Corp.
- * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
- */
-
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-
-
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pteptr, pte_t pteval)
-{
- pmd_t *pmdp = (pmd_t *) pteptr;
- unsigned long mask;
-
- if (!MACHINE_HAS_HPAGE) {
- pteptr = (pte_t *) pte_page(pteval)[1].index;
- mask = pte_val(pteval) &
- (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
- pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
- }
-
- pmd_val(*pmdp) = pte_val(pteval);
-}
-
-int arch_prepare_hugepage(struct page *page)
-{
- unsigned long addr = page_to_phys(page);
- pte_t pte;
- pte_t *ptep;
- int i;
-
- if (MACHINE_HAS_HPAGE)
- return 0;
-
- ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
- if (!ptep)
- return -ENOMEM;
-
- pte = mk_pte(page, PAGE_RW);
- for (i = 0; i < PTRS_PER_PTE; i++) {
- set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
- pte_val(pte) += PAGE_SIZE;
- }
- page[1].index = (unsigned long) ptep;
- return 0;
-}
-
-void arch_release_hugepage(struct page *page)
-{
- pte_t *ptep;
-
- if (MACHINE_HAS_HPAGE)
- return;
-
- ptep = (pte_t *) page[1].index;
- if (!ptep)
- return;
- page_table_free(&init_mm, (unsigned long *) ptep);
- page[1].index = 0;
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp = NULL;
-
- pgdp = pgd_offset(mm, addr);
- pudp = pud_alloc(mm, pgdp, addr);
- if (pudp)
- pmdp = pmd_alloc(mm, pudp, addr);
- return (pte_t *) pmdp;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp = NULL;
-
- pgdp = pgd_offset(mm, addr);
- if (pgd_present(*pgdp)) {
- pudp = pud_offset(pgdp, addr);
- if (pud_present(*pudp))
- pmdp = pmd_offset(pudp, addr);
- }
- return (pte_t *) pmdp;
-}
-
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
-int pmd_huge(pmd_t pmd)
-{
- if (!MACHINE_HAS_HPAGE)
- return 0;
-
- return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmdp, int write)
-{
- struct page *page;
-
- if (!MACHINE_HAS_HPAGE)
- return NULL;
-
- page = pmd_page(*pmdp);
- if (page)
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- return page;
-}
diff --git a/ANDROID_3.4.5/arch/s390/mm/init.c b/ANDROID_3.4.5/arch/s390/mm/init.c
deleted file mode 100644
index 2bea0605..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/init.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * arch/s390/mm/init.c
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Hartmut Penner (hp@de.ibm.com)
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/pfn.h>
-#include <linux/poison.h>
-#include <linux/initrd.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/dma.h>
-#include <asm/lowcore.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-#include <asm/ctl_reg.h>
-
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
-
-unsigned long empty_zero_page, zero_page_mask;
-EXPORT_SYMBOL(empty_zero_page);
-
-static unsigned long setup_zero_pages(void)
-{
- struct cpuid cpu_id;
- unsigned int order;
- unsigned long size;
- struct page *page;
- int i;
-
- get_cpu_id(&cpu_id);
- switch (cpu_id.machine) {
- case 0x9672: /* g5 */
- case 0x2064: /* z900 */
- case 0x2066: /* z900 */
- case 0x2084: /* z990 */
- case 0x2086: /* z990 */
- case 0x2094: /* z9-109 */
- case 0x2096: /* z9-109 */
- order = 0;
- break;
- case 0x2097: /* z10 */
- case 0x2098: /* z10 */
- default:
- order = 2;
- break;
- }
-
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Out of memory in setup_zero_pages");
-
- page = virt_to_page((void *) empty_zero_page);
- split_page(page, order);
- for (i = 1 << order; i > 0; i--) {
- SetPageReserved(page);
- page++;
- }
-
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
-
- return 1UL << order;
-}
-
-/*
- * paging_init() sets up the page tables
- */
-void __init paging_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type, asce_bits;
-
- init_mm.pgd = swapper_pg_dir;
-#ifdef CONFIG_64BIT
- if (VMALLOC_END > (1UL << 42)) {
- asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION2_ENTRY_EMPTY;
- } else {
- asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
- }
-#else
- asce_bits = _ASCE_TABLE_LENGTH;
- pgd_type = _SEGMENT_ENTRY_EMPTY;
-#endif
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
- clear_table((unsigned long *) init_mm.pgd, pgd_type,
- sizeof(unsigned long)*2048);
- vmem_map_init();
-
- /* enable virtual mapping in kernel mode */
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
- __ctl_load(S390_lowcore.kernel_asce, 13, 13);
- arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
-
- atomic_set(&init_mm.context.attach_count, 1);
-
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
- sparse_init();
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
- max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- free_area_init_nodes(max_zone_pfns);
- fault_init();
-}
-
-void __init mem_init(void)
-{
- unsigned long codesize, reservedpages, datasize, initsize;
-
- max_mapnr = num_physpages = max_low_pfn;
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
- /* Setup guest page hinting */
- cmma_init();
-
- /* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
-
- reservedpages = 0;
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >>10,
- initsize >> 10);
- printk("Write protected kernel read-only data: %#lx - %#lx\n",
- (unsigned long)&_stext,
- PFN_ALIGN((unsigned long)&_eshared) - 1);
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
-
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- __ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_TYPE_EMPTY;
- continue;
- }
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
- }
-}
-#endif
-
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr = begin;
-
- if (begin >= end)
- return;
- for (; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
- PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-}
-
-void free_initmem(void)
-{
- free_init_pages("unused kernel memory",
- (unsigned long)&__init_begin,
- (unsigned long)&__init_end);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_init_pages("initrd memory", start, end);
-}
-#endif
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size)
-{
- unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
- unsigned long start_pfn = PFN_DOWN(start);
- unsigned long size_pages = PFN_DOWN(size);
- struct zone *zone;
- int rc;
-
- rc = vmem_add_mapping(start, size);
- if (rc)
- return rc;
- for_each_zone(zone) {
- if (zone_idx(zone) != ZONE_MOVABLE) {
- /* Add range within existing zone limits */
- zone_start_pfn = zone->zone_start_pfn;
- zone_end_pfn = zone->zone_start_pfn +
- zone->spanned_pages;
- } else {
- /* Add remaining range to ZONE_MOVABLE */
- zone_start_pfn = start_pfn;
- zone_end_pfn = start_pfn + size_pages;
- }
- if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
- continue;
- nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
- zone_end_pfn - start_pfn : size_pages;
- rc = __add_pages(nid, zone, start_pfn, nr_pages);
- if (rc)
- break;
- start_pfn += nr_pages;
- size_pages -= nr_pages;
- if (!size_pages)
- break;
- }
- if (rc)
- vmem_remove_mapping(start, size);
- return rc;
-}
-#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/ANDROID_3.4.5/arch/s390/mm/maccess.c b/ANDROID_3.4.5/arch/s390/mm/maccess.c
deleted file mode 100644
index e1335dc2..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/maccess.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Access kernel memory without faulting -- s390 specific implementation.
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- *
- */
-
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/gfp.h>
-#include <asm/ctl_reg.h>
-
-/*
- * This function writes to kernel memory bypassing DAT and possible
- * write protection. It copies one to four bytes from src to dst
- * using the stura instruction.
- * Returns the number of bytes copied or -EFAULT.
- */
-static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
-{
- unsigned long count, aligned;
- int offset, mask;
- int rc = -EFAULT;
-
- aligned = (unsigned long) dst & ~3UL;
- offset = (unsigned long) dst & 3;
- count = min_t(unsigned long, 4 - offset, size);
- mask = (0xf << (4 - count)) & 0xf;
- mask >>= offset;
- asm volatile(
- " bras 1,0f\n"
- " icm 0,0,0(%3)\n"
- "0: l 0,0(%1)\n"
- " lra %1,0(%1)\n"
- "1: ex %2,0(1)\n"
- "2: stura 0,%1\n"
- " la %0,0\n"
- "3:\n"
- EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
- : "+d" (rc), "+a" (aligned)
- : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
- return rc ? rc : count;
-}
-
-long probe_kernel_write(void *dst, const void *src, size_t size)
-{
- long copied = 0;
-
- while (size) {
- copied = probe_kernel_write_odd(dst, src, size);
- if (copied < 0)
- break;
- dst += copied;
- src += copied;
- size -= copied;
- }
- return copied < 0 ? -EFAULT : 0;
-}
-
-static int __memcpy_real(void *dest, void *src, size_t count)
-{
- register unsigned long _dest asm("2") = (unsigned long) dest;
- register unsigned long _len1 asm("3") = (unsigned long) count;
- register unsigned long _src asm("4") = (unsigned long) src;
- register unsigned long _len2 asm("5") = (unsigned long) count;
- int rc = -EFAULT;
-
- asm volatile (
- "0: mvcle %1,%2,0x0\n"
- "1: jo 0b\n"
- " lhi %0,0x0\n"
- "2:\n"
- EX_TABLE(1b,2b)
- : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
- "+d" (_len2), "=m" (*((long *) dest))
- : "m" (*((long *) src))
- : "cc", "memory");
- return rc;
-}
-
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
-{
- unsigned long flags;
- int rc;
-
- if (!count)
- return 0;
- local_irq_save(flags);
- __arch_local_irq_stnsm(0xfbUL);
- rc = __memcpy_real(dest, src, count);
- local_irq_restore(flags);
- return rc;
-}
-
-/*
- * Copy memory to absolute zero
- */
-void copy_to_absolute_zero(void *dest, void *src, size_t count)
-{
- unsigned long cr0;
-
- BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
- preempt_disable();
- __ctl_store(cr0, 0, 0);
- __ctl_clear_bit(0, 28); /* disable lowcore protection */
- memcpy_real(dest + store_prefix(), src, count);
- __ctl_load(cr0, 0, 0);
- preempt_enable();
-}
-
-/*
- * Copy memory from kernel (real) to user (virtual)
- */
-int copy_to_user_real(void __user *dest, void *src, size_t count)
-{
- int offs = 0, size, rc;
- char *buf;
-
- buf = (char *) __get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rc = -EFAULT;
- while (offs < count) {
- size = min(PAGE_SIZE, count - offs);
- if (memcpy_real(buf, src + offs, size))
- goto out;
- if (copy_to_user(dest + offs, buf, size))
- goto out;
- offs += size;
- }
- rc = 0;
-out:
- free_page((unsigned long) buf);
- return rc;
-}
-
-/*
- * Copy memory from user (virtual) to kernel (real)
- */
-int copy_from_user_real(void *dest, void __user *src, size_t count)
-{
- int offs = 0, size, rc;
- char *buf;
-
- buf = (char *) __get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rc = -EFAULT;
- while (offs < count) {
- size = min(PAGE_SIZE, count - offs);
- if (copy_from_user(buf, src + offs, size))
- goto out;
- if (memcpy_real(dest + offs, buf, size))
- goto out;
- offs += size;
- }
- rc = 0;
-out:
- free_page((unsigned long) buf);
- return rc;
-}
diff --git a/ANDROID_3.4.5/arch/s390/mm/mmap.c b/ANDROID_3.4.5/arch/s390/mm/mmap.c
deleted file mode 100644
index 2857c484..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/mmap.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * linux/arch/s390/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/compat.h>
-#include <asm/pgalloc.h>
-
-static unsigned long stack_maxrandom_size(void)
-{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
- if (current->personality & ADDR_NO_RANDOMIZE)
- return 0;
- return STACK_RND_MASK << PAGE_SHIFT;
-}
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~32 MB hole.
- */
-#define MIN_GAP (32*1024*1024)
-#define MAX_GAP (STACK_TOP/6*5)
-
-static inline int mmap_is_legacy(void)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
- if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
- return 1;
- return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_rnd(void)
-{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
- /* 8MB randomization for mmap_base */
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-}
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = rlimit(RLIMIT_STACK);
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
- gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
-}
-
-#ifndef CONFIG_64BIT
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
-
-#else
-
-int s390_mmap_check(unsigned long addr, unsigned long len)
-{
- if (!is_compat_task() &&
- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
- return crst_table_upgrade(current->mm, 1UL << 53);
- return 0;
-}
-
-static unsigned long
-s390_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- unsigned long area;
- int rc;
-
- area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
- if (!(area & ~PAGE_MASK))
- return area;
- if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
- /* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
- if (rc)
- return (unsigned long) rc;
- area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
- }
- return area;
-}
-
-static unsigned long
-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- unsigned long area;
- int rc;
-
- area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
- if (!(area & ~PAGE_MASK))
- return area;
- if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
- /* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, 1UL << 53);
- if (rc)
- return (unsigned long) rc;
- area = arch_get_unmapped_area_topdown(filp, addr, len,
- pgoff, flags);
- }
- return area;
-}
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = s390_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = s390_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
-
-#endif
diff --git a/ANDROID_3.4.5/arch/s390/mm/page-states.c b/ANDROID_3.4.5/arch/s390/mm/page-states.c
deleted file mode 100644
index a90d45e9..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/page-states.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright IBM Corp. 2008
- *
- * Guest page hinting for unused pages.
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-
-#define ESSA_SET_STABLE 1
-#define ESSA_SET_UNUSED 2
-
-static int cmma_flag = 1;
-
-static int __init cmma(char *str)
-{
- char *parm;
-
- parm = strstrip(str);
- if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
- cmma_flag = 1;
- return 1;
- }
- cmma_flag = 0;
- if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
- return 1;
- return 0;
-}
-__setup("cmma=", cmma);
-
-void __init cmma_init(void)
-{
- register unsigned long tmp asm("0") = 0;
- register int rc asm("1") = -EOPNOTSUPP;
-
- if (!cmma_flag)
- return;
- asm volatile(
- " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+&d" (rc), "+&d" (tmp));
- if (rc)
- cmma_flag = 0;
-}
-
-static inline void set_page_unstable(struct page *page, int order)
-{
- int i, rc;
-
- for (i = 0; i < (1 << order); i++)
- asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
- : "=&d" (rc)
- : "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_UNUSED));
-}
-
-void arch_free_page(struct page *page, int order)
-{
- if (!cmma_flag)
- return;
- set_page_unstable(page, order);
-}
-
-static inline void set_page_stable(struct page *page, int order)
-{
- int i, rc;
-
- for (i = 0; i < (1 << order); i++)
- asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
- : "=&d" (rc)
- : "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_STABLE));
-}
-
-void arch_alloc_page(struct page *page, int order)
-{
- if (!cmma_flag)
- return;
- set_page_stable(page, order);
-}
-
-void arch_set_page_states(int make_stable)
-{
- unsigned long flags, order, t;
- struct list_head *l;
- struct page *page;
- struct zone *zone;
-
- if (!cmma_flag)
- return;
- if (make_stable)
- drain_local_pages(NULL);
- for_each_populated_zone(zone) {
- spin_lock_irqsave(&zone->lock, flags);
- for_each_migratetype_order(order, t) {
- list_for_each(l, &zone->free_area[order].free_list[t]) {
- page = list_entry(l, struct page, lru);
- if (make_stable)
- set_page_stable(page, order);
- else
- set_page_unstable(page, order);
- }
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- }
-}
diff --git a/ANDROID_3.4.5/arch/s390/mm/pageattr.c b/ANDROID_3.4.5/arch/s390/mm/pageattr.c
deleted file mode 100644
index b36537a5..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/pageattr.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright IBM Corp. 2011
- * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
- */
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-static void change_page_attr(unsigned long addr, int numpages,
- pte_t (*set) (pte_t))
-{
- pte_t *ptep, pte;
- pmd_t *pmdp;
- pud_t *pudp;
- pgd_t *pgdp;
- int i;
-
- for (i = 0; i < numpages; i++) {
- pgdp = pgd_offset(&init_mm, addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- if (pmd_huge(*pmdp)) {
- WARN_ON_ONCE(1);
- continue;
- }
- ptep = pte_offset_kernel(pmdp, addr);
-
- pte = *ptep;
- pte = set(pte);
- __ptep_ipte(addr, ptep);
- *ptep = pte;
- addr += PAGE_SIZE;
- }
-}
-
-int set_memory_ro(unsigned long addr, int numpages)
-{
- change_page_attr(addr, numpages, pte_wrprotect);
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_memory_ro);
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
- change_page_attr(addr, numpages, pte_mkwrite);
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_memory_rw);
-
-/* not possible */
-int set_memory_nx(unsigned long addr, int numpages)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_memory_nx);
-
-int set_memory_x(unsigned long addr, int numpages)
-{
- return 0;
-}
diff --git a/ANDROID_3.4.5/arch/s390/mm/pgtable.c b/ANDROID_3.4.5/arch/s390/mm/pgtable.c
deleted file mode 100644
index 6e765bf0..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/pgtable.c
+++ /dev/null
@@ -1,871 +0,0 @@
-/*
- * Copyright IBM Corp. 2007,2011
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/quicklist.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-
-#ifndef CONFIG_64BIT
-#define ALLOC_ORDER 1
-#define FRAG_MASK 0x0f
-#else
-#define ALLOC_ORDER 2
-#define FRAG_MASK 0x03
-#endif
-
-
-unsigned long *crst_table_alloc(struct mm_struct *mm)
-{
- struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
-
- if (!page)
- return NULL;
- return (unsigned long *) page_to_phys(page);
-}
-
-void crst_table_free(struct mm_struct *mm, unsigned long *table)
-{
- free_pages((unsigned long) table, ALLOC_ORDER);
-}
-
-#ifdef CONFIG_64BIT
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
-{
- unsigned long *table, *pgd;
- unsigned long entry;
-
- BUG_ON(limit > (1UL << 53));
-repeat:
- table = crst_table_alloc(mm);
- if (!table)
- return -ENOMEM;
- spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- }
- spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- update_mm(mm, current);
- return 0;
-}
-
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
-{
- pgd_t *pgd;
-
- if (mm->context.asce_limit <= limit)
- return;
- __tlb_flush_mm(mm);
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
- update_mm(mm, current);
-}
-#endif
-
-#ifdef CONFIG_PGSTE
-
-/**
- * gmap_alloc - allocate a guest address space
- * @mm: pointer to the parent mm_struct
- *
- * Returns a guest address space structure.
- */
-struct gmap *gmap_alloc(struct mm_struct *mm)
-{
- struct gmap *gmap;
- struct page *page;
- unsigned long *table;
-
- gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
- if (!gmap)
- goto out;
- INIT_LIST_HEAD(&gmap->crst_list);
- gmap->mm = mm;
- page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
- if (!page)
- goto out_free;
- list_add(&page->lru, &gmap->crst_list);
- table = (unsigned long *) page_to_phys(page);
- crst_table_init(table, _REGION1_ENTRY_EMPTY);
- gmap->table = table;
- gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | __pa(table);
- list_add(&gmap->list, &mm->context.gmap_list);
- return gmap;
-
-out_free:
- kfree(gmap);
-out:
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gmap_alloc);
-
-static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
-{
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct page *page;
-
- if (*table & _SEGMENT_ENTRY_INV)
- return 0;
- page = pfn_to_page(*table >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry(rmap, &mp->mapper, list) {
- if (rmap->entry != table)
- continue;
- list_del(&rmap->list);
- kfree(rmap);
- break;
- }
- *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- return 1;
-}
-
-static void gmap_flush_tlb(struct gmap *gmap)
-{
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) gmap->table |
- _ASCE_TYPE_REGION1);
- else
- __tlb_flush_global();
-}
-
-/**
- * gmap_free - free a guest address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_free(struct gmap *gmap)
-{
- struct page *page, *next;
- unsigned long *table;
- int i;
-
-
- /* Flush tlb. */
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) gmap->table |
- _ASCE_TYPE_REGION1);
- else
- __tlb_flush_global();
-
- /* Free all segment & region tables. */
- down_read(&gmap->mm->mmap_sem);
- spin_lock(&gmap->mm->page_table_lock);
- list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
- table = (unsigned long *) page_to_phys(page);
- if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
- /* Remove gmap rmap structures for segment table. */
- for (i = 0; i < PTRS_PER_PMD; i++, table++)
- gmap_unlink_segment(gmap, table);
- __free_pages(page, ALLOC_ORDER);
- }
- spin_unlock(&gmap->mm->page_table_lock);
- up_read(&gmap->mm->mmap_sem);
- list_del(&gmap->list);
- kfree(gmap);
-}
-EXPORT_SYMBOL_GPL(gmap_free);
-
-/**
- * gmap_enable - switch primary space to the guest address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_enable(struct gmap *gmap)
-{
- S390_lowcore.gmap = (unsigned long) gmap;
-}
-EXPORT_SYMBOL_GPL(gmap_enable);
-
-/**
- * gmap_disable - switch back to the standard primary address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_disable(struct gmap *gmap)
-{
- S390_lowcore.gmap = 0UL;
-}
-EXPORT_SYMBOL_GPL(gmap_disable);
-
-/*
- * gmap_alloc_table is assumed to be called with mmap_sem held
- */
-static int gmap_alloc_table(struct gmap *gmap,
- unsigned long *table, unsigned long init)
-{
- struct page *page;
- unsigned long *new;
-
- /* since we dont free the gmap table until gmap_free we can unlock */
- spin_unlock(&gmap->mm->page_table_lock);
- page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
- spin_lock(&gmap->mm->page_table_lock);
- if (!page)
- return -ENOMEM;
- new = (unsigned long *) page_to_phys(page);
- crst_table_init(new, init);
- if (*table & _REGION_ENTRY_INV) {
- list_add(&page->lru, &gmap->crst_list);
- *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
- (*table & _REGION_ENTRY_TYPE_MASK);
- } else
- __free_pages(page, ALLOC_ORDER);
- return 0;
-}
-
-/**
- * gmap_unmap_segment - unmap segment from the guest address space
- * @gmap: pointer to the guest address space structure
- * @addr: address in the guest address space
- * @len: length of the memory area to unmap
- *
- * Returns 0 if the unmap succeded, -EINVAL if not.
- */
-int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
-{
- unsigned long *table;
- unsigned long off;
- int flush;
-
- if ((to | len) & (PMD_SIZE - 1))
- return -EINVAL;
- if (len == 0 || to + len < to)
- return -EINVAL;
-
- flush = 0;
- down_read(&gmap->mm->mmap_sem);
- spin_lock(&gmap->mm->page_table_lock);
- for (off = 0; off < len; off += PMD_SIZE) {
- /* Walk the guest addr space page table */
- table = gmap->table + (((to + off) >> 53) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
- goto out;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 42) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
- goto out;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 31) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
- goto out;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 20) & 0x7ff);
-
- /* Clear segment table entry in guest address space. */
- flush |= gmap_unlink_segment(gmap, table);
- *table = _SEGMENT_ENTRY_INV;
- }
-out:
- spin_unlock(&gmap->mm->page_table_lock);
- up_read(&gmap->mm->mmap_sem);
- if (flush)
- gmap_flush_tlb(gmap);
- return 0;
-}
-EXPORT_SYMBOL_GPL(gmap_unmap_segment);
-
-/**
- * gmap_mmap_segment - map a segment to the guest address space
- * @gmap: pointer to the guest address space structure
- * @from: source address in the parent address space
- * @to: target address in the guest address space
- *
- * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
- */
-int gmap_map_segment(struct gmap *gmap, unsigned long from,
- unsigned long to, unsigned long len)
-{
- unsigned long *table;
- unsigned long off;
- int flush;
-
- if ((from | to | len) & (PMD_SIZE - 1))
- return -EINVAL;
- if (len == 0 || from + len > PGDIR_SIZE ||
- from + len < from || to + len < to)
- return -EINVAL;
-
- flush = 0;
- down_read(&gmap->mm->mmap_sem);
- spin_lock(&gmap->mm->page_table_lock);
- for (off = 0; off < len; off += PMD_SIZE) {
- /* Walk the gmap address space page table */
- table = gmap->table + (((to + off) >> 53) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
- gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
- goto out_unmap;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 42) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
- gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
- goto out_unmap;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 31) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
- gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
- goto out_unmap;
- table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
- table = table + (((to + off) >> 20) & 0x7ff);
-
- /* Store 'from' address in an invalid segment table entry. */
- flush |= gmap_unlink_segment(gmap, table);
- *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
- }
- spin_unlock(&gmap->mm->page_table_lock);
- up_read(&gmap->mm->mmap_sem);
- if (flush)
- gmap_flush_tlb(gmap);
- return 0;
-
-out_unmap:
- spin_unlock(&gmap->mm->page_table_lock);
- up_read(&gmap->mm->mmap_sem);
- gmap_unmap_segment(gmap, to, len);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(gmap_map_segment);
-
-/*
- * this function is assumed to be called with mmap_sem held
- */
-unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
-{
- unsigned long *table, vmaddr, segment;
- struct mm_struct *mm;
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct vm_area_struct *vma;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- current->thread.gmap_addr = address;
- mm = gmap->mm;
- /* Walk the gmap address space page table */
- table = gmap->table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 20) & 0x7ff);
-
- /* Convert the gmap address to an mm address. */
- segment = *table;
- if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
- page = pfn_to_page(segment >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- return mp->vmaddr | (address & ~PMD_MASK);
- } else if (segment & _SEGMENT_ENTRY_RO) {
- vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
- vma = find_vma(mm, vmaddr);
- if (!vma || vma->vm_start > vmaddr)
- return -EFAULT;
-
- /* Walk the parent mm page table */
- pgd = pgd_offset(mm, vmaddr);
- pud = pud_alloc(mm, pgd, vmaddr);
- if (!pud)
- return -ENOMEM;
- pmd = pmd_alloc(mm, pud, vmaddr);
- if (!pmd)
- return -ENOMEM;
- if (!pmd_present(*pmd) &&
- __pte_alloc(mm, vma, pmd, vmaddr))
- return -ENOMEM;
- /* pmd now points to a valid segment table entry. */
- rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
- if (!rmap)
- return -ENOMEM;
- /* Link gmap segment table entry location to page table. */
- page = pmd_page(*pmd);
- mp = (struct gmap_pgtable *) page->index;
- rmap->entry = table;
- spin_lock(&mm->page_table_lock);
- list_add(&rmap->list, &mp->mapper);
- spin_unlock(&mm->page_table_lock);
- /* Set gmap segment table entry to page table. */
- *table = pmd_val(*pmd) & PAGE_MASK;
- return vmaddr | (address & ~PMD_MASK);
- }
- return -EFAULT;
-}
-
-unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
-{
- unsigned long rc;
-
- down_read(&gmap->mm->mmap_sem);
- rc = __gmap_fault(address, gmap);
- up_read(&gmap->mm->mmap_sem);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_fault);
-
-void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
-{
-
- unsigned long *table, address, size;
- struct vm_area_struct *vma;
- struct gmap_pgtable *mp;
- struct page *page;
-
- down_read(&gmap->mm->mmap_sem);
- address = from;
- while (address < to) {
- /* Walk the gmap address space page table */
- table = gmap->table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- page = pfn_to_page(*table >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- vma = find_vma(gmap->mm, mp->vmaddr);
- size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
- zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
- size, NULL);
- address = (address + PMD_SIZE) & PMD_MASK;
- }
- up_read(&gmap->mm->mmap_sem);
-}
-EXPORT_SYMBOL_GPL(gmap_discard);
-
-void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
-{
- struct gmap_rmap *rmap, *next;
- struct gmap_pgtable *mp;
- struct page *page;
- int flush;
-
- flush = 0;
- spin_lock(&mm->page_table_lock);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- list_del(&rmap->list);
- kfree(rmap);
- flush = 1;
- }
- spin_unlock(&mm->page_table_lock);
- if (flush)
- __tlb_flush_global();
-}
-
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
- unsigned long vmaddr)
-{
- struct page *page;
- unsigned long *table;
- struct gmap_pgtable *mp;
-
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
- if (!page)
- return NULL;
- mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
- if (!mp) {
- __free_page(page);
- return NULL;
- }
- pgtable_page_ctor(page);
- mp->vmaddr = vmaddr & PMD_MASK;
- INIT_LIST_HEAD(&mp->mapper);
- page->index = (unsigned long) mp;
- atomic_set(&page->_mapcount, 3);
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
- return table;
-}
-
-static inline void page_table_free_pgste(unsigned long *table)
-{
- struct page *page;
- struct gmap_pgtable *mp;
-
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- BUG_ON(!list_empty(&mp->mapper));
- pgtable_page_dtor(page);
- atomic_set(&page->_mapcount, -1);
- kfree(mp);
- __free_page(page);
-}
-
-#else /* CONFIG_PGSTE */
-
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
- unsigned long vmaddr)
-{
- return NULL;
-}
-
-static inline void page_table_free_pgste(unsigned long *table)
-{
-}
-
-static inline void gmap_unmap_notifier(struct mm_struct *mm,
- unsigned long *table)
-{
-}
-
-#endif /* CONFIG_PGSTE */
-
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
-{
- unsigned int old, new;
-
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
-}
-
-/*
- * page table entry allocation/free routines.
- */
-unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
-{
- struct page *page;
- unsigned long *table;
- unsigned int mask, bit;
-
- if (mm_has_pgste(mm))
- return page_table_alloc_pgste(mm, vmaddr);
- /* Allocate fragments of a 4K page as 1K/2K page table */
- spin_lock_bh(&mm->context.list_lock);
- mask = FRAG_MASK;
- if (!list_empty(&mm->context.pgtable_list)) {
- page = list_first_entry(&mm->context.pgtable_list,
- struct page, lru);
- table = (unsigned long *) page_to_phys(page);
- mask = atomic_read(&page->_mapcount);
- mask = mask | (mask >> 4);
- }
- if ((mask & FRAG_MASK) == FRAG_MASK) {
- spin_unlock_bh(&mm->context.list_lock);
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
- if (!page)
- return NULL;
- pgtable_page_ctor(page);
- atomic_set(&page->_mapcount, 1);
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
- spin_lock_bh(&mm->context.list_lock);
- list_add(&page->lru, &mm->context.pgtable_list);
- } else {
- for (bit = 1; mask & bit; bit <<= 1)
- table += PTRS_PER_PTE;
- mask = atomic_xor_bits(&page->_mapcount, bit);
- if ((mask & FRAG_MASK) == FRAG_MASK)
- list_del(&page->lru);
- }
- spin_unlock_bh(&mm->context.list_lock);
- return table;
-}
-
-void page_table_free(struct mm_struct *mm, unsigned long *table)
-{
- struct page *page;
- unsigned int bit, mask;
-
- if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
- return page_table_free_pgste(table);
- }
- /* Free 1K/2K page table fragment of a 4K page */
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
- spin_lock_bh(&mm->context.list_lock);
- if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
- list_del(&page->lru);
- mask = atomic_xor_bits(&page->_mapcount, bit);
- if (mask & FRAG_MASK)
- list_add(&page->lru, &mm->context.pgtable_list);
- spin_unlock_bh(&mm->context.list_lock);
- if (mask == 0) {
- pgtable_page_dtor(page);
- atomic_set(&page->_mapcount, -1);
- __free_page(page);
- }
-}
-
-static void __page_table_free_rcu(void *table, unsigned bit)
-{
- struct page *page;
-
- if (bit == FRAG_MASK)
- return page_table_free_pgste(table);
- /* Free 1K/2K page table fragment of a 4K page */
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
- pgtable_page_dtor(page);
- atomic_set(&page->_mapcount, -1);
- __free_page(page);
- }
-}
-
-void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
-{
- struct mm_struct *mm;
- struct page *page;
- unsigned int bit, mask;
-
- mm = tlb->mm;
- if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
- table = (unsigned long *) (__pa(table) | FRAG_MASK);
- tlb_remove_table(tlb, table);
- return;
- }
- bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- spin_lock_bh(&mm->context.list_lock);
- if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
- list_del(&page->lru);
- mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
- if (mask & FRAG_MASK)
- list_add_tail(&page->lru, &mm->context.pgtable_list);
- spin_unlock_bh(&mm->context.list_lock);
- table = (unsigned long *) (__pa(table) | (bit << 4));
- tlb_remove_table(tlb, table);
-}
-
-void __tlb_remove_table(void *_table)
-{
- const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
- void *table = (void *)((unsigned long) _table & ~mask);
- unsigned type = (unsigned long) _table & mask;
-
- if (type)
- __page_table_free_rcu(table, type);
- else
- free_pages((unsigned long) table, ALLOC_ORDER);
-}
-
-static void tlb_remove_table_smp_sync(void *arg)
-{
- /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely
- * on IRQ disabling. See the comment near struct mmu_table_batch.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
- __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
- struct mmu_table_batch *batch;
- int i;
-
- batch = container_of(head, struct mmu_table_batch, rcu);
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch) {
- __tlb_flush_mm(tlb->mm);
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
- *batch = NULL;
- }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)
- __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- if (*batch == NULL) {
- __tlb_flush_mm(tlb->mm);
- tlb_remove_table_one(table);
- return;
- }
- (*batch)->nr = 0;
- }
- (*batch)->tables[(*batch)->nr++] = table;
- if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_table_flush(tlb);
-}
-
-/*
- * switch on pgstes for its userspace process (for kvm)
- */
-int s390_enable_sie(void)
-{
- struct task_struct *tsk = current;
- struct mm_struct *mm, *old_mm;
-
- /* Do we have switched amode? If no, we cannot do sie */
- if (user_mode == HOME_SPACE_MODE)
- return -EINVAL;
-
- /* Do we have pgstes? if yes, we are done */
- if (mm_has_pgste(tsk->mm))
- return 0;
-
- /* lets check if we are allowed to replace the mm */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- task_unlock(tsk);
- return -EINVAL;
- }
- task_unlock(tsk);
-
- /* we copy the mm and let dup_mm create the page tables with_pgstes */
- tsk->mm->context.alloc_pgste = 1;
- mm = dup_mm(tsk);
- tsk->mm->context.alloc_pgste = 0;
- if (!mm)
- return -ENOMEM;
-
- /* Now lets check again if something happened */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- mmput(mm);
- task_unlock(tsk);
- return -EINVAL;
- }
-
- /* ok, we are alone. No ptrace, no threads, etc. */
- old_mm = tsk->mm;
- tsk->mm = tsk->active_mm = mm;
- preempt_disable();
- update_mm(mm, tsk);
- atomic_inc(&mm->context.attach_count);
- atomic_dec(&old_mm->context.attach_count);
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- preempt_enable();
- task_unlock(tsk);
- mmput(old_mm);
- return 0;
-}
-EXPORT_SYMBOL_GPL(s390_enable_sie);
-
-#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
-bool kernel_page_present(struct page *page)
-{
- unsigned long addr;
- int cc;
-
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
-}
-#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
diff --git a/ANDROID_3.4.5/arch/s390/mm/vmem.c b/ANDROID_3.4.5/arch/s390/mm/vmem.c
deleted file mode 100644
index 4799383e..00000000
--- a/ANDROID_3.4.5/arch/s390/mm/vmem.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * arch/s390/mm/vmem.c
- *
- * Copyright IBM Corp. 2006
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/bootmem.h>
-#include <linux/pfn.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/hugetlb.h>
-#include <linux/slab.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/setup.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-
-static DEFINE_MUTEX(vmem_mutex);
-
-struct memory_segment {
- struct list_head list;
- unsigned long start;
- unsigned long size;
-};
-
-static LIST_HEAD(mem_segs);
-
-static void __ref *vmem_alloc_pages(unsigned int order)
-{
- if (slab_is_available())
- return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
-}
-
-static inline pud_t *vmem_pud_alloc(void)
-{
- pud_t *pud = NULL;
-
-#ifdef CONFIG_64BIT
- pud = vmem_alloc_pages(2);
- if (!pud)
- return NULL;
- clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
- return pud;
-}
-
-static inline pmd_t *vmem_pmd_alloc(void)
-{
- pmd_t *pmd = NULL;
-
-#ifdef CONFIG_64BIT
- pmd = vmem_alloc_pages(2);
- if (!pmd)
- return NULL;
- clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
- return pmd;
-}
-
-static pte_t __ref *vmem_pte_alloc(unsigned long address)
-{
- pte_t *pte;
-
- if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm, address);
- else
- pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
- if (!pte)
- return NULL;
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
- PTRS_PER_PTE * sizeof(pte_t));
- return pte;
-}
-
-/*
- * Add a physical memory range to the 1:1 mapping.
- */
-static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
-{
- unsigned long address;
- pgd_t *pg_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pte_t pte;
- int ret = -ENOMEM;
-
- for (address = start; address < start + size; address += PAGE_SIZE) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- pu_dir = vmem_pud_alloc();
- if (!pu_dir)
- goto out;
- pgd_populate(&init_mm, pg_dir, pu_dir);
- }
-
- pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir)) {
- pm_dir = vmem_pmd_alloc();
- if (!pm_dir)
- goto out;
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
-
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
- pm_dir = pmd_offset(pu_dir, address);
-
-#ifdef __s390x__
- if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
- (address + HPAGE_SIZE <= start + size) &&
- (address >= HPAGE_SIZE)) {
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
- pmd_val(*pm_dir) = pte_val(pte);
- address += HPAGE_SIZE - PAGE_SIZE;
- continue;
- }
-#endif
- if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc(address);
- if (!pt_dir)
- goto out;
- pmd_populate(&init_mm, pm_dir, pt_dir);
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- *pt_dir = pte;
- }
- ret = 0;
-out:
- flush_tlb_kernel_range(start, start + size);
- return ret;
-}
-
-/*
- * Remove a physical memory range from the 1:1 mapping.
- * Currently only invalidates page table entries.
- */
-static void vmem_remove_range(unsigned long start, unsigned long size)
-{
- unsigned long address;
- pgd_t *pg_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pte_t pte;
-
- pte_val(pte) = _PAGE_TYPE_EMPTY;
- for (address = start; address < start + size; address += PAGE_SIZE) {
- pg_dir = pgd_offset_k(address);
- pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir))
- continue;
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir))
- continue;
-
- if (pmd_huge(*pm_dir)) {
- pmd_clear(pm_dir);
- address += HPAGE_SIZE - PAGE_SIZE;
- continue;
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- *pt_dir = pte;
- }
- flush_tlb_kernel_range(start, start + size);
-}
-
-/*
- * Add a backed mem_map array to the virtual mem_map array.
- */
-int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
-{
- unsigned long address, start_addr, end_addr;
- pgd_t *pg_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pte_t pte;
- int ret = -ENOMEM;
-
- start_addr = (unsigned long) start;
- end_addr = (unsigned long) (start + nr);
-
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- pu_dir = vmem_pud_alloc();
- if (!pu_dir)
- goto out;
- pgd_populate(&init_mm, pg_dir, pu_dir);
- }
-
- pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir)) {
- pm_dir = vmem_pmd_alloc();
- if (!pm_dir)
- goto out;
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
-
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc(address);
- if (!pt_dir)
- goto out;
- pmd_populate(&init_mm, pm_dir, pt_dir);
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- if (pte_none(*pt_dir)) {
- unsigned long new_page;
-
- new_page =__pa(vmem_alloc_pages(0));
- if (!new_page)
- goto out;
- pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
- *pt_dir = pte;
- }
- }
- memset(start, 0, nr * sizeof(struct page));
- ret = 0;
-out:
- flush_tlb_kernel_range(start_addr, end_addr);
- return ret;
-}
-
-/*
- * Add memory segment to the segment list if it doesn't overlap with
- * an already present segment.
- */
-static int insert_memory_segment(struct memory_segment *seg)
-{
- struct memory_segment *tmp;
-
- if (seg->start + seg->size > VMEM_MAX_PHYS ||
- seg->start + seg->size < seg->start)
- return -ERANGE;
-
- list_for_each_entry(tmp, &mem_segs, list) {
- if (seg->start >= tmp->start + tmp->size)
- continue;
- if (seg->start + seg->size <= tmp->start)
- continue;
- return -ENOSPC;
- }
- list_add(&seg->list, &mem_segs);
- return 0;
-}
-
-/*
- * Remove memory segment from the segment list.
- */
-static void remove_memory_segment(struct memory_segment *seg)
-{
- list_del(&seg->list);
-}
-
-static void __remove_shared_memory(struct memory_segment *seg)
-{
- remove_memory_segment(seg);
- vmem_remove_range(seg->start, seg->size);
-}
-
-int vmem_remove_mapping(unsigned long start, unsigned long size)
-{
- struct memory_segment *seg;
- int ret;
-
- mutex_lock(&vmem_mutex);
-
- ret = -ENOENT;
- list_for_each_entry(seg, &mem_segs, list) {
- if (seg->start == start && seg->size == size)
- break;
- }
-
- if (seg->start != start || seg->size != size)
- goto out;
-
- ret = 0;
- __remove_shared_memory(seg);
- kfree(seg);
-out:
- mutex_unlock(&vmem_mutex);
- return ret;
-}
-
-int vmem_add_mapping(unsigned long start, unsigned long size)
-{
- struct memory_segment *seg;
- int ret;
-
- mutex_lock(&vmem_mutex);
- ret = -ENOMEM;
- seg = kzalloc(sizeof(*seg), GFP_KERNEL);
- if (!seg)
- goto out;
- seg->start = start;
- seg->size = size;
-
- ret = insert_memory_segment(seg);
- if (ret)
- goto out_free;
-
- ret = vmem_add_mem(start, size, 0);
- if (ret)
- goto out_remove;
- goto out;
-
-out_remove:
- __remove_shared_memory(seg);
-out_free:
- kfree(seg);
-out:
- mutex_unlock(&vmem_mutex);
- return ret;
-}
-
-/*
- * map whole physical memory to virtual memory (identity mapping)
- * we reserve enough space in the vmalloc area for vmemmap to hotplug
- * additional memory segments.
- */
-void __init vmem_map_init(void)
-{
- unsigned long ro_start, ro_end;
- unsigned long start, end;
- int i;
-
- ro_start = ((unsigned long)&_stext) & PAGE_MASK;
- ro_end = PFN_ALIGN((unsigned long)&_eshared);
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
- if (memory_chunk[i].type == CHUNK_CRASHK ||
- memory_chunk[i].type == CHUNK_OLDMEM)
- continue;
- start = memory_chunk[i].addr;
- end = memory_chunk[i].addr + memory_chunk[i].size;
- if (start >= ro_end || end <= ro_start)
- vmem_add_mem(start, end - start, 0);
- else if (start >= ro_start && end <= ro_end)
- vmem_add_mem(start, end - start, 1);
- else if (start >= ro_start) {
- vmem_add_mem(start, ro_end - start, 1);
- vmem_add_mem(ro_end, end - ro_end, 0);
- } else if (end < ro_end) {
- vmem_add_mem(start, ro_start - start, 0);
- vmem_add_mem(ro_start, end - ro_start, 1);
- } else {
- vmem_add_mem(start, ro_start - start, 0);
- vmem_add_mem(ro_start, ro_end - ro_start, 1);
- vmem_add_mem(ro_end, end - ro_end, 0);
- }
- }
-}
-
-/*
- * Convert memory chunk array to a memory segment list so there is a single
- * list that contains both r/w memory and shared memory segments.
- */
-static int __init vmem_convert_memory_chunk(void)
-{
- struct memory_segment *seg;
- int i;
-
- mutex_lock(&vmem_mutex);
- for (i = 0; i < MEMORY_CHUNKS; i++) {
- if (!memory_chunk[i].size)
- continue;
- if (memory_chunk[i].type == CHUNK_CRASHK ||
- memory_chunk[i].type == CHUNK_OLDMEM)
- continue;
- seg = kzalloc(sizeof(*seg), GFP_KERNEL);
- if (!seg)
- panic("Out of memory...\n");
- seg->start = memory_chunk[i].addr;
- seg->size = memory_chunk[i].size;
- insert_memory_segment(seg);
- }
- mutex_unlock(&vmem_mutex);
- return 0;
-}
-
-core_initcall(vmem_convert_memory_chunk);