aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile7
-rw-r--r--arch/s390/mm/cmm.c200
-rw-r--r--arch/s390/mm/dump_pagetables.c246
-rw-r--r--arch/s390/mm/extable.c81
-rw-r--r--arch/s390/mm/extmem.c416
-rw-r--r--arch/s390/mm/fault.c843
-rw-r--r--arch/s390/mm/gup.c246
-rw-r--r--arch/s390/mm/hugetlbpage.c235
-rw-r--r--arch/s390/mm/init.c267
-rw-r--r--arch/s390/mm/maccess.c204
-rw-r--r--arch/s390/mm/mem_detect.c65
-rw-r--r--arch/s390/mm/mmap.c117
-rw-r--r--arch/s390/mm/page-states.c114
-rw-r--r--arch/s390/mm/pageattr.c146
-rw-r--r--arch/s390/mm/pgtable.c1442
-rw-r--r--arch/s390/mm/vmem.c266
16 files changed, 3954 insertions, 941 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 66401930f83..839592ca265 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
-obj-$(CONFIG_CMM) += cmm.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o
+obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 413c240cbca..79ddd580d60 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,29 +1,32 @@
/*
- * arch/s390/mm/cmm.c
+ * Collaborative memory management interface.
*
- * S390 version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Copyright IBM Corp 2003, 2010
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
- * Collaborative memory management interface.
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
+#include <linux/suspend.h>
+#include <linux/uaccess.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
#include <asm/diag.h>
-static char *sender = "VMRMSVM";
+#ifdef CONFIG_CMM_IUCV
+static char *cmm_default_sender = "VMRMSVM";
+#endif
+static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender,
"Guest name that may send SMSG messages (default VMRMSVM)");
@@ -44,20 +47,21 @@ static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
+static int cmm_suspended;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
-static wait_queue_head_t cmm_thread_wait;
-static struct timer_list cmm_timer;
+static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
+static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
static void cmm_timer_fn(unsigned long);
static void cmm_set_timer(void);
-static long
-cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
+static long cmm_alloc_pages(long nr, long *counter,
+ struct cmm_page_array **list)
{
struct cmm_page_array *pa, *npa;
unsigned long addr;
@@ -87,7 +91,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
} else
free_page((unsigned long) npa);
}
- diag10(addr);
+ diag10_range(addr >> PAGE_SHIFT, 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
@@ -96,8 +100,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
-static long
-cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -137,19 +140,18 @@ static int cmm_oom_notify(struct notifier_block *self,
}
static struct notifier_block cmm_oom_nb = {
- .notifier_call = cmm_oom_notify
+ .notifier_call = cmm_oom_notify,
};
-static int
-cmm_thread(void *dummy)
+static int cmm_thread(void *dummy)
{
int rc;
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
- (cmm_pages != cmm_pages_target ||
- cmm_timed_pages != cmm_timed_pages_target ||
- kthread_should_stop()));
+ (!cmm_suspended && (cmm_pages != cmm_pages_target ||
+ cmm_timed_pages != cmm_timed_pages_target)) ||
+ kthread_should_stop());
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
@@ -167,7 +169,7 @@ cmm_thread(void *dummy)
cmm_timed_pages_target = cmm_timed_pages;
} else if (cmm_timed_pages_target < cmm_timed_pages) {
cmm_free_pages(1, &cmm_timed_pages,
- &cmm_timed_page_list);
+ &cmm_timed_page_list);
}
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
cmm_set_timer();
@@ -175,14 +177,12 @@ cmm_thread(void *dummy)
return 0;
}
-static void
-cmm_kick_thread(void)
+static void cmm_kick_thread(void)
{
wake_up(&cmm_thread_wait);
}
-static void
-cmm_set_timer(void)
+static void cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
@@ -199,8 +199,7 @@ cmm_set_timer(void)
add_timer(&cmm_timer);
}
-static void
-cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(unsigned long ignored)
{
long nr;
@@ -213,61 +212,53 @@ cmm_timer_fn(unsigned long ignored)
cmm_set_timer();
}
-void
-cmm_set_pages(long nr)
+static void cmm_set_pages(long nr)
{
cmm_pages_target = nr;
cmm_kick_thread();
}
-long
-cmm_get_pages(void)
+static long cmm_get_pages(void)
{
return cmm_pages;
}
-void
-cmm_add_timed_pages(long nr)
+static void cmm_add_timed_pages(long nr)
{
cmm_timed_pages_target += nr;
cmm_kick_thread();
}
-long
-cmm_get_timed_pages(void)
+static long cmm_get_timed_pages(void)
{
return cmm_timed_pages;
}
-void
-cmm_set_timeout(long nr, long seconds)
+static void cmm_set_timeout(long nr, long seconds)
{
cmm_timeout_pages = nr;
cmm_timeout_seconds = seconds;
cmm_set_timer();
}
-static int
-cmm_skip_blanks(char *cp, char **endp)
+static int cmm_skip_blanks(char *cp, char **endp)
{
char *str;
- for (str = cp; *str == ' ' || *str == '\t'; str++);
+ for (str = cp; *str == ' ' || *str == '\t'; str++)
+ ;
*endp = str;
return str != cp;
}
-#ifdef CONFIG_CMM_PROC
-
static struct ctl_table cmm_table[];
-static int
-cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int cmm_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
+ unsigned int len;
long nr;
- int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -302,13 +293,12 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
return 0;
}
-static int
-cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
- int len;
+ unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -343,37 +333,34 @@ static struct ctl_table cmm_table[] = {
{
.procname = "cmm_pages",
.mode = 0644,
- .proc_handler = &cmm_pages_handler,
+ .proc_handler = cmm_pages_handler,
},
{
.procname = "cmm_timed_pages",
.mode = 0644,
- .proc_handler = &cmm_pages_handler,
+ .proc_handler = cmm_pages_handler,
},
{
.procname = "cmm_timeout",
.mode = 0644,
- .proc_handler = &cmm_timeout_handler,
+ .proc_handler = cmm_timeout_handler,
},
- { .ctl_name = 0 }
+ { }
};
static struct ctl_table cmm_dir_table[] = {
{
- .ctl_name = CTL_VM,
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = cmm_table,
},
- { .ctl_name = 0 }
+ { }
};
-#endif
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
-static void
-cmm_smsg_target(char *from, char *msg)
+static void cmm_smsg_target(const char *from, char *msg)
{
long nr, seconds;
@@ -411,17 +398,55 @@ cmm_smsg_target(char *from, char *msg)
static struct ctl_table_header *cmm_sysctl_header;
-static int
-cmm_init (void)
+static int cmm_suspend(void)
+{
+ cmm_suspended = 1;
+ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
+ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
+ return 0;
+}
+
+static int cmm_resume(void)
+{
+ cmm_suspended = 0;
+ cmm_kick_thread();
+ return 0;
+}
+
+static int cmm_power_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ return cmm_resume();
+ case PM_HIBERNATION_PREPARE:
+ return cmm_suspend();
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block cmm_power_notifier = {
+ .notifier_call = cmm_power_event,
+};
+
+static int __init cmm_init(void)
{
int rc = -ENOMEM;
-#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
if (!cmm_sysctl_header)
- goto out;
-#endif
+ goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
+ /* convert sender to uppercase characters */
+ if (sender) {
+ int len = strlen(sender);
+ while (len--)
+ sender[len] = toupper(sender[len]);
+ } else {
+ sender = cmm_default_sender;
+ }
+
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
goto out_smsg;
@@ -429,51 +454,42 @@ cmm_init (void)
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
- init_waitqueue_head(&cmm_thread_wait);
- init_timer(&cmm_timer);
+ rc = register_pm_notifier(&cmm_power_notifier);
+ if (rc)
+ goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (!rc)
- goto out;
- /*
- * kthread_create failed. undo all the stuff from above again.
- */
- unregister_oom_notifier(&cmm_oom_nb);
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
+ rc = PTR_ERR(cmm_thread_ptr);
+ unregister_pm_notifier(&cmm_power_notifier);
+out_pm:
+ unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
out_smsg:
#endif
-#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
-#endif
-out:
+out_sysctl:
+ del_timer_sync(&cmm_timer);
return rc;
}
+module_init(cmm_init);
-static void
-cmm_exit(void)
+static void __exit cmm_exit(void)
{
- kthread_stop(cmm_thread_ptr);
- unregister_oom_notifier(&cmm_oom_nb);
- cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
- cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
-#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
-#endif
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
+ unregister_pm_notifier(&cmm_power_notifier);
+ unregister_oom_notifier(&cmm_oom_nb);
+ kthread_stop(cmm_thread_ptr);
+ del_timer_sync(&cmm_timer);
+ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
+ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
}
-
-module_init(cmm_init);
module_exit(cmm_exit);
-EXPORT_SYMBOL(cmm_set_pages);
-EXPORT_SYMBOL(cmm_get_pages);
-EXPORT_SYMBOL(cmm_add_timed_pages);
-EXPORT_SYMBOL(cmm_get_timed_pages);
-EXPORT_SYMBOL(cmm_set_timeout);
-
MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
new file mode 100644
index 00000000000..46d517c3c76
--- /dev/null
+++ b/arch/s390/mm/dump_pagetables.c
@@ -0,0 +1,246 @@
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+
+static unsigned long max_addr;
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+enum address_markers_idx {
+ IDENTITY_NR = 0,
+ KERNEL_START_NR,
+ KERNEL_END_NR,
+ VMEMMAP_NR,
+ VMALLOC_NR,
+#ifdef CONFIG_64BIT
+ MODULES_NR,
+#endif
+};
+
+static struct addr_marker address_markers[] = {
+ [IDENTITY_NR] = {0, "Identity Mapping"},
+ [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"},
+ [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
+ [VMEMMAP_NR] = {0, "vmemmap Area"},
+ [VMALLOC_NR] = {0, "vmalloc Area"},
+#ifdef CONFIG_64BIT
+ [MODULES_NR] = {0, "Modules Area"},
+#endif
+ { -1, NULL }
+};
+
+struct pg_state {
+ int level;
+ unsigned int current_prot;
+ unsigned long start_address;
+ unsigned long current_address;
+ const struct addr_marker *marker;
+};
+
+static void print_prot(struct seq_file *m, unsigned int pr, int level)
+{
+ static const char * const level_name[] =
+ { "ASCE", "PGD", "PUD", "PMD", "PTE" };
+
+ seq_printf(m, "%s ", level_name[level]);
+ if (pr & _PAGE_INVALID) {
+ seq_printf(m, "I\n");
+ return;
+ }
+ seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
+ seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
+ seq_putc(m, '\n');
+}
+
+static void note_page(struct seq_file *m, struct pg_state *st,
+ unsigned int new_prot, int level)
+{
+ static const char units[] = "KMGTPE";
+ int width = sizeof(unsigned long) * 2;
+ const char *unit = units;
+ unsigned int prot, cur;
+ unsigned long delta;
+
+ /*
+ * If we have a "break" in the series, we need to flush the state
+ * that we have now. "break" is either changing perms, levels or
+ * address space marker.
+ */
+ prot = new_prot;
+ cur = st->current_prot;
+
+ if (!st->level) {
+ /* First entry */
+ st->current_prot = new_prot;
+ st->level = level;
+ st->marker = address_markers;
+ seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ } else if (prot != cur || level != st->level ||
+ st->current_address >= st->marker[1].start_address) {
+ /* Print the actual finished series */
+ seq_printf(m, "0x%0*lx-0x%0*lx",
+ width, st->start_address,
+ width, st->current_address);
+ delta = (st->current_address - st->start_address) >> 10;
+ while (!(delta & 0x3ff) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(m, "%9lu%c ", delta, *unit);
+ print_prot(m, st->current_prot, st->level);
+ if (st->current_address >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ }
+ st->start_address = st->current_address;
+ st->current_prot = new_prot;
+ st->level = level;
+ }
+}
+
+/*
+ * The actual page table walker functions. In order to keep the
+ * implementation of print_prot() short, we only check and pass
+ * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
+ * segment or page table entry is invalid or read-only.
+ * After all it's just a hint that the current level being walked
+ * contains an invalid or read-only entry.
+ */
+static void walk_pte_level(struct seq_file *m, struct pg_state *st,
+ pmd_t *pmd, unsigned long addr)
+{
+ unsigned int prot;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
+ st->current_address = addr;
+ pte = pte_offset_kernel(pmd, addr);
+ prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
+ note_page(m, st, prot, 4);
+ addr += PAGE_SIZE;
+ }
+}
+
+#ifdef CONFIG_64BIT
+#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
+#else
+#define _PMD_PROT_MASK 0
+#endif
+
+static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
+ pud_t *pud, unsigned long addr)
+{
+ unsigned int prot;
+ pmd_t *pmd;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
+ st->current_address = addr;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ if (pmd_large(*pmd)) {
+ prot = pmd_val(*pmd) & _PMD_PROT_MASK;
+ note_page(m, st, prot, 3);
+ } else
+ walk_pte_level(m, st, pmd, addr);
+ } else
+ note_page(m, st, _PAGE_INVALID, 3);
+ addr += PMD_SIZE;
+ }
+}
+
+#ifdef CONFIG_64BIT
+#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
+#else
+#define _PUD_PROT_MASK 0
+#endif
+
+static void walk_pud_level(struct seq_file *m, struct pg_state *st,
+ pgd_t *pgd, unsigned long addr)
+{
+ unsigned int prot;
+ pud_t *pud;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
+ st->current_address = addr;
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud))
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) & _PUD_PROT_MASK;
+ note_page(m, st, prot, 2);
+ } else
+ walk_pmd_level(m, st, pud, addr);
+ else
+ note_page(m, st, _PAGE_INVALID, 2);
+ addr += PUD_SIZE;
+ }
+}
+
+static void walk_pgd_level(struct seq_file *m)
+{
+ unsigned long addr = 0;
+ struct pg_state st;
+ pgd_t *pgd;
+ int i;
+
+ memset(&st, 0, sizeof(st));
+ for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
+ st.current_address = addr;
+ pgd = pgd_offset_k(addr);
+ if (!pgd_none(*pgd))
+ walk_pud_level(m, &st, pgd, addr);
+ else
+ note_page(m, &st, _PAGE_INVALID, 1);
+ addr += PGDIR_SIZE;
+ }
+ /* Flush out the last page */
+ st.current_address = max_addr;
+ note_page(m, &st, 0, 0);
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ walk_pgd_level(m);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pt_dump_init(void)
+{
+ /*
+ * Figure out the maximum virtual address being accessible with the
+ * kernel ASCE. We need this to keep the page table walker functions
+ * from accessing non-existent entries.
+ */
+#ifdef CONFIG_32BIT
+ max_addr = 1UL << 31;
+#else
+ max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
+ max_addr = 1UL << (max_addr * 11 + 31);
+ address_markers[MODULES_NR].start_address = MODULES_VADDR;
+#endif
+ address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
+ address_markers[VMALLOC_NR].start_address = VMALLOC_START;
+ debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
+ return 0;
+}
+device_initcall(pt_dump_init);
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
new file mode 100644
index 00000000000..4d1ee88864e
--- /dev/null
+++ b/arch/s390/mm/extable.c
@@ -0,0 +1,81 @@
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+
+/*
+ * Search one exception table for an entry corresponding to the
+ * given instruction address, and return the address of the entry,
+ * or NULL if none is found.
+ * We use a binary search, and thus we assume that the table is
+ * already sorted.
+ */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ const struct exception_table_entry *mid;
+ unsigned long addr;
+
+ while (first <= last) {
+ mid = ((last - first) >> 1) + first;
+ addr = extable_insn(mid);
+ if (addr < value)
+ first = mid + 1;
+ else if (addr > value)
+ last = mid - 1;
+ else
+ return mid;
+ }
+ return NULL;
+}
+
+/*
+ * The exception table needs to be sorted so that the binary
+ * search that we use to find entries in it works properly.
+ * This is used both for the kernel exception table and for
+ * the exception tables of modules that get loaded.
+ *
+ */
+static int cmp_ex(const void *a, const void *b)
+{
+ const struct exception_table_entry *x = a, *y = b;
+
+ /* This compare is only valid after normalization. */
+ return x->insn - y->insn;
+}
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+ struct exception_table_entry *p;
+ int i;
+
+ /* Normalize entries to being relative to the start of the section */
+ for (p = start, i = 0; p < finish; p++, i += 8)
+ p->insn += i;
+ sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ /* Denormalize all entries */
+ for (p = start, i = 0; p < finish; p++, i += 8)
+ p->insn -= i;
+}
+
+#ifdef CONFIG_MODULES
+/*
+ * If the exception table is sorted, any referring to the module init
+ * will be at the beginning or the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /* Trim the beginning */
+ while (m->num_exentries &&
+ within_module_init(extable_insn(&m->extable[0]), m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /* Trim the end */
+ while (m->num_exentries &&
+ within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 880b0ebf894..519bba716cc 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -1,12 +1,14 @@
/*
- * File...........: arch/s390/mm/extmem.c
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
* Steven Shultz <shultzss@us.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation 2002-2004
+ * Copyright IBM Corp. 2002, 2004
*/
+#define KMSG_COMPONENT "extmem"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -24,39 +26,46 @@
#include <asm/cpcmd.h>
#include <asm/setup.h>
-#define DCSS_DEBUG /* Debug messages on/off */
-
-#define DCSS_NAME "extmem"
-#ifdef DCSS_DEBUG
-#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
-#else
-#define PRINT_DEBUG(x...) do {} while (0)
-#endif
-#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
-#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
-#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
-
-
#define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
#define DCSS_FINDSEG 0x0c
#define DCSS_LOADNOLY 0x10
#define DCSS_SEGEXT 0x18
+#define DCSS_LOADSHRX 0x20
+#define DCSS_LOADNSRX 0x24
+#define DCSS_FINDSEGX 0x2c
+#define DCSS_SEGEXTX 0x38
#define DCSS_FINDSEGA 0x0c
struct qrange {
- unsigned int start; // 3byte start address, 1 byte type
- unsigned int end; // 3byte end address, 1 byte reserved
+ unsigned long start; /* last byte type */
+ unsigned long end; /* last byte reserved */
};
struct qout64 {
+ unsigned long segstart;
+ unsigned long segend;
+ int segcnt;
+ int segrcnt;
+ struct qrange range[6];
+};
+
+#ifdef CONFIG_64BIT
+struct qrange_old {
+ unsigned int start; /* last byte type */
+ unsigned int end; /* last byte reserved */
+};
+
+/* output area format for the Diag x'64' old subcode x'18' */
+struct qout64_old {
int segstart;
int segend;
int segcnt;
int segrcnt;
- struct qrange range[6];
+ struct qrange_old range[6];
};
+#endif
struct qin64 {
char qopcode;
@@ -86,6 +95,55 @@ static DEFINE_MUTEX(dcss_lock);
static LIST_HEAD(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
+static int loadshr_scode, loadnsr_scode, findseg_scode;
+static int segext_scode, purgeseg_scode;
+static int scode_set;
+
+/* set correct Diag x'64' subcodes. */
+static int
+dcss_set_subcodes(void)
+{
+#ifdef CONFIG_64BIT
+ char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
+ unsigned long rx, ry;
+ int rc;
+
+ if (name == NULL)
+ return -ENOMEM;
+
+ rx = (unsigned long) name;
+ ry = DCSS_FINDSEGX;
+
+ strcpy(name, "dummy");
+ asm volatile(
+ " diag %0,%1,0x64\n"
+ "0: ipm %2\n"
+ " srl %2,28\n"
+ " j 2f\n"
+ "1: la %2,3\n"
+ "2:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+
+ kfree(name);
+ /* Diag x'64' new subcodes are supported, set to new subcodes */
+ if (rc != 3) {
+ loadshr_scode = DCSS_LOADSHRX;
+ loadnsr_scode = DCSS_LOADNSRX;
+ purgeseg_scode = DCSS_PURGESEG;
+ findseg_scode = DCSS_FINDSEGX;
+ segext_scode = DCSS_SEGEXTX;
+ return 0;
+ }
+#endif
+ /* Diag x'64' new subcodes are not supported, set to old subcodes */
+ loadshr_scode = DCSS_LOADNOLY;
+ loadnsr_scode = DCSS_LOADNSR;
+ purgeseg_scode = DCSS_PURGESEG;
+ findseg_scode = DCSS_FINDSEG;
+ segext_scode = DCSS_SEGEXT;
+ return 0;
+}
/*
* Create the 8 bytes, ebcdic VM segment name from
@@ -135,25 +193,45 @@ segment_by_name (char *name)
* Perform a function on a dcss segment.
*/
static inline int
-dcss_diag (__u8 func, void *parameter,
+dcss_diag(int *func, void *parameter,
unsigned long *ret1, unsigned long *ret2)
{
unsigned long rx, ry;
int rc;
+ if (scode_set == 0) {
+ rc = dcss_set_subcodes();
+ if (rc < 0)
+ return rc;
+ scode_set = 1;
+ }
rx = (unsigned long) parameter;
- ry = (unsigned long) func;
- asm volatile(
+ ry = (unsigned long) *func;
+
#ifdef CONFIG_64BIT
- " sam31\n"
- " diag %0,%1,0x64\n"
- " sam64\n"
+ /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
+ if (*func > DCSS_SEGEXT)
+ asm volatile(
+ " diag %0,%1,0x64\n"
+ " ipm %2\n"
+ " srl %2,28\n"
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+ /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
+ else
+ asm volatile(
+ " sam31\n"
+ " diag %0,%1,0x64\n"
+ " sam64\n"
+ " ipm %2\n"
+ " srl %2,28\n"
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
#else
+ asm volatile(
" diag %0,%1,0x64\n"
-#endif
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+#endif
*ret1 = rx;
*ret2 = ry;
return rc;
@@ -173,12 +251,13 @@ dcss_diag_translate_rc (int vm_rc) {
static int
query_segment_type (struct dcss_segment *seg)
{
- struct qin64 *qin = kmalloc (sizeof(struct qin64), GFP_DMA);
- struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA);
-
- int diag_cc, rc, i;
unsigned long dummy, vmrc;
+ int diag_cc, rc, i;
+ struct qout64 *qout;
+ struct qin64 *qin;
+ qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
+ qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
if ((qin == NULL) || (qout == NULL)) {
rc = -ENOMEM;
goto out_free;
@@ -190,16 +269,47 @@ query_segment_type (struct dcss_segment *seg)
qin->qoutlen = sizeof(struct qout64);
memcpy (qin->qname, seg->dcss_name, 8);
- diag_cc = dcss_diag (DCSS_SEGEXT, qin, &dummy, &vmrc);
+ diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
+ if (diag_cc < 0) {
+ rc = diag_cc;
+ goto out_free;
+ }
if (diag_cc > 1) {
- PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc);
+ pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc);
goto out_free;
}
+#ifdef CONFIG_64BIT
+ /* Only old format of output area of Diagnose x'64' is supported,
+ copy data for the new format. */
+ if (segext_scode == DCSS_SEGEXT) {
+ struct qout64_old *qout_old;
+ qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
+ if (qout_old == NULL) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+ memcpy(qout_old, qout, sizeof(struct qout64_old));
+ qout->segstart = (unsigned long) qout_old->segstart;
+ qout->segend = (unsigned long) qout_old->segend;
+ qout->segcnt = qout_old->segcnt;
+ qout->segrcnt = qout_old->segrcnt;
+
+ if (qout->segcnt > 6)
+ qout->segrcnt = 6;
+ for (i = 0; i < qout->segrcnt; i++) {
+ qout->range[i].start =
+ (unsigned long) qout_old->range[i].start;
+ qout->range[i].end =
+ (unsigned long) qout_old->range[i].end;
+ }
+ kfree(qout_old);
+ }
+#endif
if (qout->segcnt > 6) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto out_free;
}
@@ -214,11 +324,11 @@ query_segment_type (struct dcss_segment *seg)
for (i=0; i<qout->segcnt; i++) {
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto out_free;
}
if (start != qout->range[i].start >> PAGE_SHIFT) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto out_free;
}
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
@@ -247,8 +357,7 @@ query_segment_type (struct dcss_segment *seg)
* -ENOSYS : we are not running on VM
* -EIO : could not perform query diagnose
* -ENOENT : no such segment
- * -ENOTSUPP: multi-part segment cannot be used with linux
- * -ENOSPC : segment cannot be used (overlaps with storage)
+ * -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
@@ -269,15 +378,41 @@ segment_type (char* name)
}
/*
+ * check if segment collides with other segments that are currently loaded
+ * returns 1 if this is the case, 0 if no collision was found
+ */
+static int
+segment_overlaps_others (struct dcss_segment *seg)
+{
+ struct list_head *l;
+ struct dcss_segment *tmp;
+
+ BUG_ON(!mutex_is_locked(&dcss_lock));
+ list_for_each(l, &dcss_list) {
+ tmp = list_entry(l, struct dcss_segment, list);
+ if ((tmp->start_addr >> 20) > (seg->end >> 20))
+ continue;
+ if ((tmp->end >> 20) < (seg->start_addr >> 20))
+ continue;
+ if (seg == tmp)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
* real segment loading function, called from segment_load
*/
static int
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
{
- struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment),
- GFP_DMA);
- int dcss_command, rc, diag_cc;
+ unsigned long start_addr, end_addr, dummy;
+ struct dcss_segment *seg;
+ int rc, diag_cc;
+ start_addr = end_addr = 0;
+ seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
goto out;
@@ -287,24 +422,17 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
if (rc < 0)
goto out_free;
- rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+ if (loadshr_scode == DCSS_LOADSHRX) {
+ if (segment_overlaps_others(seg)) {
+ rc = -EBUSY;
+ goto out_free;
+ }
+ }
- switch (rc) {
- case 0:
- break;
- case -ENOSPC:
- PRINT_WARN("segment_load: not loading segment %s - overlaps "
- "storage/segment\n", name);
- goto out_free;
- case -ERANGE:
- PRINT_WARN("segment_load: not loading segment %s - exceeds "
- "kernel mapping range\n", name);
- goto out_free;
- default:
- PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
- name, rc);
+ rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
+
+ if (rc)
goto out_free;
- }
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
@@ -330,42 +458,47 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
}
if (do_nonshared)
- dcss_command = DCSS_LOADNSR;
+ diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
+ &start_addr, &end_addr);
else
- dcss_command = DCSS_LOADNOLY;
-
- diag_cc = dcss_diag(dcss_command, seg->dcss_name,
- &seg->start_addr, &seg->end);
+ diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
+ &start_addr, &end_addr);
+ if (diag_cc < 0) {
+ dcss_diag(&purgeseg_scode, seg->dcss_name,
+ &dummy, &dummy);
+ rc = diag_cc;
+ goto out_resource;
+ }
if (diag_cc > 1) {
- PRINT_WARN ("segment_load: could not load segment %s - "
- "diag returned error (%ld)\n",name,seg->end);
- rc = dcss_diag_translate_rc (seg->end);
- dcss_diag(DCSS_PURGESEG, seg->dcss_name,
- &seg->start_addr, &seg->end);
+ pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
+ end_addr);
+ rc = dcss_diag_translate_rc(end_addr);
+ dcss_diag(&purgeseg_scode, seg->dcss_name,
+ &dummy, &dummy);
goto out_resource;
}
+ seg->start_addr = start_addr;
+ seg->end = end_addr;
seg->do_nonshared = do_nonshared;
atomic_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
- PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
- "type %s in non-shared mode\n", name,
- (void*)seg->start_addr, (void*)seg->end,
- segtype_string[seg->vm_segtype]);
+ pr_info("DCSS %s of range %p to %p and type %s loaded as "
+ "exclusive-writable\n", name, (void*) seg->start_addr,
+ (void*) seg->end, segtype_string[seg->vm_segtype]);
else {
- PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
- "type %s in shared mode\n", name,
- (void*)seg->start_addr, (void*)seg->end,
- segtype_string[seg->vm_segtype]);
+ pr_info("DCSS %s of range %p to %p and type %s loaded in "
+ "shared access mode\n", name, (void*) seg->start_addr,
+ (void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_resource:
release_resource(seg->res);
kfree(seg->res);
out_shared:
- remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+ vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
out_free:
kfree(seg);
out:
@@ -383,7 +516,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
* -ENOSYS : we are not running on VM
* -EIO : could not perform query or load diagnose
* -ENOENT : no such segment
- * -ENOTSUPP: multi-part segment cannot be used with linux
+ * -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage)
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
@@ -437,9 +570,10 @@ int
segment_modify_shared (char *name, int do_nonshared)
{
struct dcss_segment *seg;
- unsigned long dummy;
- int dcss_command, rc, diag_cc;
+ unsigned long start_addr, end_addr, dummy;
+ int rc, diag_cc;
+ start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
@@ -447,50 +581,62 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_unlock;
}
if (do_nonshared == seg->do_nonshared) {
- PRINT_INFO ("segment_modify_shared: not reloading segment %s"
- " - already in requested mode\n",name);
+ pr_info("DCSS %s is already in the requested access "
+ "mode\n", name);
rc = 0;
goto out_unlock;
}
if (atomic_read (&seg->ref_count) != 1) {
- PRINT_WARN ("segment_modify_shared: not reloading segment %s - "
- "segment is in use by other driver(s)\n",name);
+ pr_warning("DCSS %s is in use and cannot be reloaded\n",
+ name);
rc = -EAGAIN;
goto out_unlock;
}
release_resource(seg->res);
- if (do_nonshared) {
- dcss_command = DCSS_LOADNSR;
+ if (do_nonshared)
seg->res->flags &= ~IORESOURCE_READONLY;
- } else {
- dcss_command = DCSS_LOADNOLY;
+ else
if (seg->vm_segtype == SEG_TYPE_SR ||
seg->vm_segtype == SEG_TYPE_ER)
seg->res->flags |= IORESOURCE_READONLY;
- }
+
if (request_resource(&iomem_resource, seg->res)) {
- PRINT_WARN("segment_modify_shared: could not reload segment %s"
- " - overlapping resources\n", name);
+ pr_warning("DCSS %s overlaps with used memory resources "
+ "and cannot be reloaded\n", name);
rc = -EBUSY;
kfree(seg->res);
- goto out_del;
+ goto out_del_mem;
+ }
+
+ dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
+ if (do_nonshared)
+ diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
+ &start_addr, &end_addr);
+ else
+ diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
+ &start_addr, &end_addr);
+ if (diag_cc < 0) {
+ rc = diag_cc;
+ goto out_del_res;
}
- dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
- diag_cc = dcss_diag(dcss_command, seg->dcss_name,
- &seg->start_addr, &seg->end);
if (diag_cc > 1) {
- PRINT_WARN ("segment_modify_shared: could not reload segment %s"
- " - diag returned error (%ld)\n",name,seg->end);
- rc = dcss_diag_translate_rc (seg->end);
- goto out_del;
+ pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
+ end_addr);
+ rc = dcss_diag_translate_rc(end_addr);
+ goto out_del_res;
}
+ seg->start_addr = start_addr;
+ seg->end = end_addr;
seg->do_nonshared = do_nonshared;
rc = 0;
goto out_unlock;
- out_del:
- remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+ out_del_res:
+ release_resource(seg->res);
+ kfree(seg->res);
+ out_del_mem:
+ vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
- dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
+ dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
@@ -514,17 +660,16 @@ segment_unload(char *name)
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
- PRINT_ERR ("could not find segment %s in segment_unload, "
- "please report to linux390@de.ibm.com\n",name);
+ pr_err("Unloading unknown DCSS %s failed\n", name);
goto out_unlock;
}
if (atomic_dec_return(&seg->ref_count) != 0)
goto out_unlock;
release_resource(seg->res);
kfree(seg->res);
- remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+ vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list);
- dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
+ dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
kfree(seg);
out_unlock:
mutex_unlock(&dcss_lock);
@@ -537,8 +682,6 @@ void
segment_save(char *name)
{
struct dcss_segment *seg;
- int startpfn = 0;
- int endpfn = 0;
char cmd1[160];
char cmd2[80];
int i, response;
@@ -550,16 +693,13 @@ segment_save(char *name)
seg = segment_by_name (name);
if (seg == NULL) {
- PRINT_ERR("could not find segment %s in segment_save, please "
- "report to linux390@de.ibm.com\n", name);
+ pr_err("Saving unknown DCSS %s failed\n", name);
goto out;
}
- startpfn = seg->start_addr >> PAGE_SHIFT;
- endpfn = (seg->end) >> PAGE_SHIFT;
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
- sprintf(cmd1+strlen(cmd1), " %X-%X %s",
+ sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
seg->range[i].start >> PAGE_SHIFT,
seg->range[i].end >> PAGE_SHIFT,
segtype_string[seg->range[i].start & 0xff]);
@@ -568,22 +708,70 @@ segment_save(char *name)
response = 0;
cpcmd(cmd1, NULL, 0, &response);
if (response) {
- PRINT_ERR("segment_save: DEFSEG failed with response code %i\n",
- response);
+ pr_err("Saving a DCSS failed with DEFSEG response code "
+ "%i\n", response);
goto out;
}
cpcmd(cmd2, NULL, 0, &response);
if (response) {
- PRINT_ERR("segment_save: SAVESEG failed with response code %i\n",
- response);
+ pr_err("Saving a DCSS failed with SAVESEG response code "
+ "%i\n", response);
goto out;
}
out:
mutex_unlock(&dcss_lock);
}
+/*
+ * print appropriate error message for segment_load()/segment_type()
+ * return code
+ */
+void segment_warning(int rc, char *seg_name)
+{
+ switch (rc) {
+ case -ENOENT:
+ pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
+ break;
+ case -ENOSYS:
+ pr_err("DCSS %s cannot be loaded or queried without "
+ "z/VM\n", seg_name);
+ break;
+ case -EIO:
+ pr_err("Loading or querying DCSS %s resulted in a "
+ "hardware error\n", seg_name);
+ break;
+ case -EOPNOTSUPP:
+ pr_err("DCSS %s has multiple page ranges and cannot be "
+ "loaded or queried\n", seg_name);
+ break;
+ case -ENOSPC:
+ pr_err("DCSS %s overlaps with used storage and cannot "
+ "be loaded\n", seg_name);
+ break;
+ case -EBUSY:
+ pr_err("%s needs used memory resources and cannot be "
+ "loaded or queried\n", seg_name);
+ break;
+ case -EPERM:
+ pr_err("DCSS %s is already loaded in a different access "
+ "mode\n", seg_name);
+ break;
+ case -ENOMEM:
+ pr_err("There is not enough memory to load or query "
+ "DCSS %s\n", seg_name);
+ break;
+ case -ERANGE:
+ pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
+ "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
+ break;
+ default:
+ break;
+ }
+}
+
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared);
+EXPORT_SYMBOL(segment_warning);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ed13d429a48..3f3b35403d0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/fault.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
@@ -10,6 +8,8 @@
* Copyright (C) 1995 Linus Torvalds
*/
+#include <linux/kernel_stat.h>
+#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -19,60 +19,64 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-
-#include <asm/system.h>
+#include <linux/hugetlb.h>
+#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/mmu_context.h>
+#include <asm/facility.h>
+#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
-#define __FIXUP_MASK 0x7fffffff
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
-#define __FIXUP_MASK ~0L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
-#ifdef CONFIG_SYSCTL
-extern int sysctl_userprocess_debug;
-#endif
+#define VM_FAULT_BADCONTEXT 0x010000
+#define VM_FAULT_BADMAP 0x020000
+#define VM_FAULT_BADACCESS 0x040000
+#define VM_FAULT_SIGNAL 0x080000
+#define VM_FAULT_PFAULT 0x100000
-extern void die(const char *,struct pt_regs *,long);
+static unsigned long store_indication __read_mostly;
+
+#ifdef CONFIG_64BIT
+static int __init fault_init(void)
+{
+ if (test_facility(75))
+ store_indication = 0xc00;
+ return 0;
+}
+early_initcall(fault_init);
+#endif
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs, long err)
+static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
- if (!user_mode(regs)) {
+ if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
-
return ret;
}
-#else
-static inline int notify_page_fault(struct pt_regs *regs, long err)
-{
- return 0;
-}
-#endif
/*
@@ -100,76 +104,197 @@ void bust_spinlocks(int yes)
/*
* Returns the address space associated with the fault.
- * Returns 0 for kernel space, 1 for user space and
- * 2 for code execution in user space with noexec=on.
+ * Returns 0 for kernel space and 1 for user space.
*/
-static inline int check_space(struct task_struct *tsk)
+static inline int user_space_fault(struct pt_regs *regs)
{
+ unsigned long trans_exc_code;
+
/*
- * The lowest two bits of S390_lowcore.trans_exc_code
- * indicate which paging table was used.
+ * The lowest two bits of the translation exception
+ * identification indicate which paging table was used.
*/
- int desc = S390_lowcore.trans_exc_code & 3;
-
- if (desc == 3) /* Home Segment Table Descriptor */
- return switch_amode == 0;
- if (desc == 2) /* Secondary Segment Table Descriptor */
- return tsk->thread.mm_segment.ar4;
-#ifdef CONFIG_S390_SWITCH_AMODE
- if (unlikely(desc == 1)) { /* STD determined via access register */
- /* %a0 always indicates primary space. */
- if (S390_lowcore.exc_access_id != 0) {
- save_access_regs(tsk->thread.acrs);
- /*
- * An alet of 0 indicates primary space.
- * An alet of 1 indicates secondary space.
- * Any other alet values generate an
- * alen-translation exception.
- */
- if (tsk->thread.acrs[S390_lowcore.exc_access_id])
- return tsk->thread.mm_segment.ar4;
- }
+ trans_exc_code = regs->int_parm_long & 3;
+ if (trans_exc_code == 3) /* home space -> kernel */
+ return 0;
+ if (user_mode(regs))
+ return 1;
+ if (trans_exc_code == 2) /* secondary space -> set_fs */
+ return current->thread.mm_segment.ar4;
+ if (current->flags & PF_VCPU)
+ return 1;
+ return 0;
+}
+
+static int bad_address(void *p)
+{
+ unsigned long dummy;
+
+ return probe_kernel_address((unsigned long *)p, dummy);
+}
+
+#ifdef CONFIG_64BIT
+static void dump_pagetable(unsigned long asce, unsigned long address)
+{
+ unsigned long *table = __va(asce & PAGE_MASK);
+
+ pr_alert("AS:%016lx ", asce);
+ switch (asce & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R1:%016lx ", *table);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_REGION2:
+ table = table + ((address >> 42) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R2:%016lx ", *table);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_REGION3:
+ table = table + ((address >> 31) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R3:%016lx ", *table);
+ if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_SEGMENT:
+ table = table + ((address >> 20) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont(KERN_CONT "S:%016lx ", *table);
+ if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
+ goto out;
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ }
+ table = table + ((address >> 12) & 0xff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("P:%016lx ", *table);
+out:
+ pr_cont("\n");
+ return;
+bad:
+ pr_cont("BAD\n");
+}
+
+#else /* CONFIG_64BIT */
+
+static void dump_pagetable(unsigned long asce, unsigned long address)
+{
+ unsigned long *table = __va(asce & PAGE_MASK);
+
+ pr_alert("AS:%08lx ", asce);
+ table = table + ((address >> 20) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("S:%08lx ", *table);
+ if (*table & _SEGMENT_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("P:%08lx ", *table);
+out:
+ pr_cont("\n");
+ return;
+bad:
+ pr_cont("BAD\n");
+}
+
+#endif /* CONFIG_64BIT */
+
+static void dump_fault_info(struct pt_regs *regs)
+{
+ unsigned long asce;
+
+ pr_alert("Fault in ");
+ switch (regs->int_parm_long & 3) {
+ case 3:
+ pr_cont("home space ");
+ break;
+ case 2:
+ pr_cont("secondary space ");
+ break;
+ case 1:
+ pr_cont("access register ");
+ break;
+ case 0:
+ pr_cont("primary space ");
+ break;
+ }
+ pr_cont("mode while using ");
+ if (!user_space_fault(regs)) {
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ }
+#ifdef CONFIG_PGSTE
+ else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
+ struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ asce = gmap->asce;
+ pr_cont("gmap ");
}
#endif
- /* Primary Segment Table Descriptor */
- return switch_amode << s390_noexec;
+ else {
+ asce = S390_lowcore.user_asce;
+ pr_cont("user ");
+ }
+ pr_cont("ASCE.\n");
+ dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
+}
+
+static inline void report_user_fault(struct pt_regs *regs, long signr)
+{
+ if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
+ return;
+ if (!unhandled_signal(current, signr))
+ return;
+ if (!printk_ratelimit())
+ return;
+ printk(KERN_ALERT "User process fault: interruption code 0x%X ",
+ regs->int_code);
+ print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
+ printk(KERN_CONT "\n");
+ printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
+ regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
+ dump_fault_info(regs);
+ show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
-static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
- int si_code, unsigned long address)
+static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
struct siginfo si;
-#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
-#if defined(CONFIG_SYSCTL)
- if (sysctl_userprocess_debug)
-#endif
- {
- printk("User process fault: interruption code 0x%lX\n",
- error_code);
- printk("failing address: %lX\n", address);
- show_regs(regs);
- }
-#endif
+ report_user_fault(regs, SIGSEGV);
si.si_signo = SIGSEGV;
si.si_code = si_code;
- si.si_addr = (void __user *) address;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
}
-static void do_no_context(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
+ unsigned long address;
/* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
+ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
- regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+ regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
return;
}
@@ -177,319 +302,283 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (check_space(current) == 0)
+ address = regs->int_parm_long & __FAIL_ADDR_MASK;
+ if (!user_space_fault(regs))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
- " at virtual kernel address %p\n", (void *)address);
+ " in virtual kernel address space\n");
else
printk(KERN_ALERT "Unable to handle kernel paging request"
- " at virtual user address %p\n", (void *)address);
-
- die("Oops", regs, error_code);
+ " in virtual user address space\n");
+ printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
+ regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
+ dump_fault_info(regs);
+ die(regs, "Oops");
do_exit(SIGKILL);
}
-static void do_low_address(struct pt_regs *regs, unsigned long error_code)
+static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
- die ("Low-address protection", regs, error_code);
+ die (regs, "Low-address protection");
do_exit(SIGKILL);
}
- do_no_context(regs, error_code, 0);
+ do_no_context(regs);
}
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static noinline void do_sigbus(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
-
- up_read(&mm->mmap_sem);
- if (is_global_init(tsk)) {
- yield();
- down_read(&mm->mmap_sem);
- return 1;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (regs->psw.mask & PSW_MASK_PSTATE)
- do_group_exit(SIGKILL);
- do_no_context(regs, error_code, address);
- return 0;
-}
-
-static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
-{
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
+ struct siginfo si;
- up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
- force_sig(SIGBUS, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs, error_code, address);
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRERR;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
+ force_sig_info(SIGBUS, &si, tsk);
}
-#ifdef CONFIG_S390_EXEC_PROTECT
-extern long sys_sigreturn(struct pt_regs *regs);
-extern long sys_rt_sigreturn(struct pt_regs *regs);
-extern long sys32_sigreturn(struct pt_regs *regs);
-extern long sys32_rt_sigreturn(struct pt_regs *regs);
-
-static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
- unsigned long address, unsigned long error_code)
+static noinline void do_fault_error(struct pt_regs *regs, int fault)
{
- u16 instruction;
- int rc;
-#ifdef CONFIG_COMPAT
- int compat;
-#endif
-
- pagefault_disable();
- rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
- pagefault_enable();
- if (rc)
- return -EFAULT;
+ int si_code;
- up_read(&mm->mmap_sem);
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
-#ifdef CONFIG_COMPAT
- compat = test_tsk_thread_flag(current, TIF_31BIT);
- if (compat && instruction == 0x0a77)
- sys32_sigreturn(regs);
- else if (compat && instruction == 0x0aad)
- sys32_rt_sigreturn(regs);
- else
-#endif
- if (instruction == 0x0a77)
- sys_sigreturn(regs);
- else if (instruction == 0x0aad)
- sys_rt_sigreturn(regs);
- else {
- current->thread.prot_addr = address;
- current->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, SEGV_MAPERR, address);
+ switch (fault) {
+ case VM_FAULT_BADACCESS:
+ case VM_FAULT_BADMAP:
+ /* Bad memory access. Check if it is kernel or user space. */
+ if (user_mode(regs)) {
+ /* User mode accesses just cause a SIGSEGV */
+ si_code = (fault == VM_FAULT_BADMAP) ?
+ SEGV_MAPERR : SEGV_ACCERR;
+ do_sigsegv(regs, si_code);
+ return;
+ }
+ case VM_FAULT_BADCONTEXT:
+ case VM_FAULT_PFAULT:
+ do_no_context(regs);
+ break;
+ case VM_FAULT_SIGNAL:
+ if (!user_mode(regs))
+ do_no_context(regs);
+ break;
+ default: /* fault & VM_FAULT_ERROR */
+ if (fault & VM_FAULT_OOM) {
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGBUS) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ do_sigbus(regs);
+ } else
+ BUG();
+ break;
}
- return 0;
}
-#endif /* CONFIG_S390_EXEC_PROTECT */
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
- * error_code:
+ * interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline void
-do_exception(struct pt_regs *regs, unsigned long error_code, int write)
+static inline int do_exception(struct pt_regs *regs, int access)
{
+#ifdef CONFIG_PGSTE
+ struct gmap *gmap;
+#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ unsigned long trans_exc_code;
unsigned long address;
- int space;
- int si_code;
+ unsigned int flags;
int fault;
- if (notify_page_fault(regs, error_code))
- return;
-
tsk = current;
- mm = tsk->mm;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_pt_regs_flag(regs, PIF_PER_TRAP);
- /* get the failing address and the affected space */
- address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- space = check_space(tsk);
+ if (notify_page_fault(regs))
+ return 0;
+
+ mm = tsk->mm;
+ trans_exc_code = regs->int_parm_long;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
- if (unlikely(space == 0 || in_atomic() || !mm))
- goto no_context;
-
- /*
- * When we get here, the fault happened in the current
- * task's user address space, so we can switch on the
- * interrupts again and then search the VMAs
- */
- local_irq_enable();
-
+ fault = VM_FAULT_BADCONTEXT;
+ if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
+ goto out;
+
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+ flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
- si_code = SEGV_MAPERR;
+#ifdef CONFIG_PGSTE
+ gmap = (struct gmap *)
+ ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
+ if (gmap) {
+ address = __gmap_fault(address, gmap);
+ if (address == -EFAULT) {
+ fault = VM_FAULT_BADMAP;
+ goto out_up;
+ }
+ if (address == -ENOMEM) {
+ fault = VM_FAULT_OOM;
+ goto out_up;
+ }
+ if (gmap->pfault_enabled)
+ flags |= FAULT_FLAG_RETRY_NOWAIT;
+ }
+#endif
+
+retry:
+ fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
- goto bad_area;
-
-#ifdef CONFIG_S390_EXEC_PROTECT
- if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
- if (!signal_return(mm, regs, address, error_code))
- /*
- * signal_return() has done an up_read(&mm->mmap_sem)
- * if it returns 0.
- */
- return;
-#endif
+ goto out_up;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- si_code = SEGV_ACCERR;
- if (!write) {
- /* page not present, check vm flags */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- } else {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
+ if (unlikely(vma->vm_start > address)) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto out_up;
+ if (expand_stack(vma, address))
+ goto out_up;
}
-survive:
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+ fault = VM_FAULT_BADACCESS;
+ if (unlikely(!(vma->vm_flags & access)))
+ goto out_up;
+
+ if (is_vm_hugetlb_page(vma))
+ address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM) {
- if (do_out_of_memory(regs, error_code, address))
- goto survive;
- return;
- } else if (fault & VM_FAULT_SIGBUS) {
- do_sigbus(regs, error_code, address);
- return;
- }
- BUG();
+ fault = handle_mm_fault(mm, vma, address, flags);
+ /* No reason to continue if interrupted by SIGKILL. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ fault = VM_FAULT_SIGNAL;
+ goto out;
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+ if (unlikely(fault & VM_FAULT_ERROR))
+ goto out_up;
- up_read(&mm->mmap_sem);
/*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
*/
- clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, si_code, address);
- return;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+#ifdef CONFIG_PGSTE
+ if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* FAULT_FLAG_RETRY_NOWAIT has been set,
+ * mmap_sem has not been released */
+ current->thread.gmap_pfault = 1;
+ fault = VM_FAULT_PFAULT;
+ goto out_up;
+ }
+#endif
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~(FAULT_FLAG_ALLOW_RETRY |
+ FAULT_FLAG_RETRY_NOWAIT);
+ flags |= FAULT_FLAG_TRIED;
+ down_read(&mm->mmap_sem);
+ goto retry;
+ }
}
-
-no_context:
- do_no_context(regs, error_code, address);
+ fault = 0;
+out_up:
+ up_read(&mm->mmap_sem);
+out:
+ return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs,
- unsigned long error_code)
+void __kprobes do_protection_exception(struct pt_regs *regs)
{
- /* Protection exception is supressing, decrement psw address. */
- regs->psw.addr -= (error_code >> 16);
+ unsigned long trans_exc_code;
+ int fault;
+
+ trans_exc_code = regs->int_parm_long;
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
- if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
- do_low_address(regs, error_code);
+ if (unlikely(!(trans_exc_code & 4))) {
+ do_low_address(regs);
return;
}
- do_exception(regs, 4, 1);
+ fault = do_exception(regs, VM_WRITE);
+ if (unlikely(fault))
+ do_fault_error(regs, fault);
}
-void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_dat_exception(struct pt_regs *regs)
{
- do_exception(regs, error_code & 0xff, 0);
-}
-
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
-{
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long address;
- int space;
+ int access, fault;
- mm = current->mm;
- address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- space = check_space(current);
-
- if (unlikely(space == 0 || in_atomic() || !mm))
- goto no_context;
-
- local_irq_enable();
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- current->thread.prot_addr = address;
- current->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, SEGV_MAPERR, address);
- return;
- }
-
-no_context:
- do_no_context(regs, error_code, address);
+ access = VM_READ | VM_EXEC | VM_WRITE;
+ fault = do_exception(regs, access);
+ if (unlikely(fault))
+ do_fault_error(regs, fault);
}
-#endif
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
-static ext_int_info_t ext_int_pfault;
-static int pfault_disable = 0;
+static int pfault_disable;
static int __init nopfault(char *str)
{
@@ -499,25 +588,31 @@ static int __init nopfault(char *str)
__setup("nopfault", nopfault);
-typedef struct {
- __u16 refdiagc;
- __u16 reffcode;
- __u16 refdwlen;
- __u16 refversn;
- __u64 refgaddr;
- __u64 refselmk;
- __u64 refcmpmk;
- __u64 reserved;
-} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
+struct pfault_refbk {
+ u16 refdiagc;
+ u16 reffcode;
+ u16 refdwlen;
+ u16 refversn;
+ u64 refgaddr;
+ u64 refselmk;
+ u64 refcmpmk;
+ u64 reserved;
+} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
- pfault_refbk_t refbk =
- { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
- __PF_RES_FIELD };
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 0,
+ .refdwlen = 5,
+ .refversn = 2,
+ .refgaddr = __LC_CURRENT_PID,
+ .refselmk = 1ULL << 48,
+ .refcmpmk = 1ULL << 48,
+ .reserved = __PF_RES_FIELD };
int rc;
- if (!MACHINE_IS_VM || pfault_disable)
+ if (pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
@@ -526,18 +621,20 @@ int pfault_init(void)
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
- __ctl_set_bit(0, 9);
return rc;
}
void pfault_fini(void)
{
- pfault_refbk_t refbk =
- { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
-
- if (!MACHINE_IS_VM || pfault_disable)
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 1,
+ .refdwlen = 5,
+ .refversn = 2,
+ };
+
+ if (pfault_disable)
return;
- __ctl_clear_bit(0,9);
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
@@ -545,10 +642,15 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
-static void pfault_interrupt(__u16 error_code)
+static DEFINE_SPINLOCK(pfault_lock);
+static LIST_HEAD(pfault_list);
+
+static void pfault_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
+ pid_t pid;
/*
* Get the external interruption subcode & pfault
@@ -556,63 +658,118 @@ static void pfault_interrupt(__u16 error_code)
* in the 'cpu address' field associated with the
* external interrupt.
*/
- subcode = S390_lowcore.cpu_addr;
+ subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
-
- /*
- * Get the token (= address of the task structure of the affected task).
- */
- tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
-
+ inc_irq_stat(IRQEXT_PFL);
+ /* Get the token (= pid of the affected task). */
+ pid = sizeof(void *) == 4 ? param32 : param64;
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
+ return;
+ spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
- if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
+ if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
+ list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
+ } else {
+ /* Completion interrupt was faster than initial
+ * interrupt. Set pfault_wait to -1 so the initial
+ * interrupt doesn't put the task to sleep.
+ * If the task is not running, ignore the completion
+ * interrupt since it must be a leftover of a PFAULT
+ * CANCEL operation which didn't remove all pending
+ * completion interrupts. */
+ if (tsk->state == TASK_RUNNING)
+ tsk->thread.pfault_wait = -1;
}
} else {
/* signal bit not set -> a real page is missing. */
- get_task_struct(tsk);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
+ if (WARN_ON_ONCE(tsk != current))
+ goto out;
+ if (tsk->thread.pfault_wait == 1) {
+ /* Already on the list with a reference: put to sleep */
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_tsk_need_resched(tsk);
+ } else if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
- * interrupt (swapped in a -1 for pfault_wait). Set
- * pfault_wait back to zero and exit. This can be
- * done safely because tsk is running in kernel
- * mode and can't produce new pfaults. */
+ * interrupt (pfault_wait == -1). Set pfault_wait
+ * back to zero and exit. */
tsk->thread.pfault_wait = 0;
- set_task_state(tsk, TASK_RUNNING);
- put_task_struct(tsk);
- } else
+ } else {
+ /* Initial interrupt arrived before completion
+ * interrupt. Let the task sleep.
+ * An extra task reference is needed since a different
+ * cpu may set the task state to TASK_RUNNING again
+ * before the scheduler is reached. */
+ get_task_struct(tsk);
+ tsk->thread.pfault_wait = 1;
+ list_add(&tsk->thread.list, &pfault_list);
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ }
}
+out:
+ spin_unlock(&pfault_lock);
+ put_task_struct(tsk);
}
-void __init pfault_irq_init(void)
+static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
- if (!MACHINE_IS_VM)
- return;
+ struct thread_struct *thread, *next;
+ struct task_struct *tsk;
- /*
- * Try to get pfault pseudo page faults going.
- */
- if (register_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault) != 0)
- panic("Couldn't request external interrupt 0x2603");
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DEAD:
+ spin_lock_irq(&pfault_lock);
+ list_for_each_entry_safe(thread, next, &pfault_list, list) {
+ thread->pfault_wait = 0;
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ }
+ spin_unlock_irq(&pfault_lock);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- if (pfault_init() == 0)
- return;
+static int __init pfault_irq_init(void)
+{
+ int rc;
- /* Tough luck, no pfault. */
+ rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+ if (rc)
+ goto out_extint;
+ rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
+ if (rc)
+ goto out_pfault;
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+ hotcpu_notifier(pfault_cpu_notify, 0);
+ return 0;
+
+out_pfault:
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+out_extint:
pfault_disable = 1;
- unregister_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault);
+ return rc;
}
-#endif
+early_initcall(pfault_irq_init);
+
+#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
new file mode 100644
index 00000000000..639fce46400
--- /dev/null
+++ b/arch/s390/mm/gup.c
@@ -0,0 +1,246 @@
+/*
+ * Lockless get_user_pages_fast for s390
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/vmstat.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <asm/pgtable.h>
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask;
+ pte_t *ptep, pte;
+ struct page *page;
+
+ mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+
+ ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
+ do {
+ pte = *ptep;
+ barrier();
+ if ((pte_val(pte) & mask) != 0)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ if (!page_cache_get_speculative(page))
+ return 0;
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ put_page(page);
+ return 0;
+ }
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 1;
+}
+
+static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask, result;
+ struct page *head, *page, *tail;
+ int refs;
+
+ result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+ mask = result | _SEGMENT_ENTRY_INVALID;
+ if ((pmd_val(pmd) & mask) != result)
+ return 0;
+ VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
+
+ refs = 0;
+ head = pmd_page(pmd);
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ /*
+ * Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
+ }
+
+ return 1;
+}
+
+
+static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp, pmd;
+
+ pmdp = (pmd_t *) pudp;
+#ifdef CONFIG_64BIT
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pmdp = (pmd_t *) pud_deref(pud);
+ pmdp += pmd_index(addr);
+#endif
+ do {
+ pmd = *pmdp;
+ barrier();
+ next = pmd_addr_end(addr, end);
+ /*
+ * The pmd_trans_splitting() check below explains why
+ * pmdp_splitting_flush() has to serialize with
+ * smp_call_function() against our disabled IRQs, to stop
+ * this gup-fast code from running while we set the
+ * splitting bit in the pmd. Returning zero will take
+ * the slow path that will call wait_split_huge_page()
+ * if the pmd is still in splitting state.
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+ if (unlikely(pmd_large(pmd))) {
+ if (!gup_huge_pmd(pmdp, pmd, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (!gup_pte_range(pmdp, pmd, addr, next,
+ write, pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp, pud;
+
+ pudp = (pud_t *) pgdp;
+#ifdef CONFIG_64BIT
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+ pudp = (pud_t *) pgd_deref(pgd);
+ pudp += pud_index(addr);
+#endif
+ do {
+ pud = *pudp;
+ barrier();
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
+ return 0;
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next, flags;
+ pgd_t *pgdp, pgd;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if ((end <= start) || (end > TASK_SIZE))
+ return 0;
+ /*
+ * local_irq_save() doesn't prevent pagetable teardown, but does
+ * prevent the pagetables from being freed on s390.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd = *pgdp;
+ barrier();
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ int nr, ret;
+
+ start &= PAGE_MASK;
+ nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ if (nr == nr_pages)
+ return nr;
+
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ nr_pages - nr, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+ /* Have to be a bit careful with return values */
+ if (nr > 0)
+ ret = (ret < 0) ? nr : ret + nr;
+ return ret;
+}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
new file mode 100644
index 00000000000..0ff66a7e29b
--- /dev/null
+++ b/arch/s390/mm/hugetlbpage.c
@@ -0,0 +1,235 @@
+/*
+ * IBM System z Huge TLB Page Support for Kernel.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+
+static inline pmd_t __pte_to_pmd(pte_t pte)
+{
+ int none, young, prot;
+ pmd_t pmd;
+
+ /*
+ * Convert encoding pte bits pmd bits
+ * .IR...wrdytp ..R...I...y.
+ * empty .10...000000 -> ..0...1...0.
+ * prot-none, clean, old .11...000001 -> ..0...1...1.
+ * prot-none, clean, young .11...000101 -> ..1...1...1.
+ * prot-none, dirty, old .10...001001 -> ..0...1...1.
+ * prot-none, dirty, young .10...001101 -> ..1...1...1.
+ * read-only, clean, old .11...010001 -> ..1...1...0.
+ * read-only, clean, young .01...010101 -> ..1...0...1.
+ * read-only, dirty, old .11...011001 -> ..1...1...0.
+ * read-only, dirty, young .01...011101 -> ..1...0...1.
+ * read-write, clean, old .11...110001 -> ..0...1...0.
+ * read-write, clean, young .01...110101 -> ..0...0...1.
+ * read-write, dirty, old .10...111001 -> ..0...1...0.
+ * read-write, dirty, young .00...111101 -> ..0...0...1.
+ * Huge ptes are dirty by definition, a clean pte is made dirty
+ * by the conversion.
+ */
+ if (pte_present(pte)) {
+ pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
+ if (pte_val(pte) & _PAGE_INVALID)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ none = (pte_val(pte) & _PAGE_PRESENT) &&
+ !(pte_val(pte) & _PAGE_READ) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ prot = (pte_val(pte) & _PAGE_PROTECT) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ young = pte_val(pte) & _PAGE_YOUNG;
+ if (none || young)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (prot || (none && young))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ } else
+ pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
+ return pmd;
+}
+
+static inline pte_t __pmd_to_pte(pmd_t pmd)
+{
+ pte_t pte;
+
+ /*
+ * Convert encoding pmd bits pte bits
+ * ..R...I...y. .IR...wrdytp
+ * empty ..0...1...0. -> .10...000000
+ * prot-none, old ..0...1...1. -> .10...001001
+ * prot-none, young ..1...1...1. -> .10...001101
+ * read-only, old ..1...1...0. -> .11...011001
+ * read-only, young ..1...0...1. -> .01...011101
+ * read-write, old ..0...1...0. -> .10...111001
+ * read-write, young ..0...0...1. -> .00...111101
+ * Huge ptes are dirty by definition
+ */
+ if (pmd_present(pmd)) {
+ pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
+ (pmd_val(pmd) & PAGE_MASK);
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
+ pte_val(pte) |= _PAGE_INVALID;
+ if (pmd_prot_none(pmd)) {
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_YOUNG;
+ } else {
+ pte_val(pte) |= _PAGE_READ;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_PROTECT;
+ else
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
+ pte_val(pte) |= _PAGE_YOUNG;
+ }
+ } else
+ pte_val(pte) = _PAGE_INVALID;
+ return pte;
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ pmd_t pmd;
+
+ pmd = __pte_to_pmd(pte);
+ if (!MACHINE_HAS_HPAGE) {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= pte_page(pte)[1].index;
+ } else
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
+ *(pmd_t *) ptep = pmd;
+}
+
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ unsigned long origin;
+ pmd_t pmd;
+
+ pmd = *(pmd_t *) ptep;
+ if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
+ origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= *(unsigned long *) origin;
+ }
+ return __pmd_to_pte(pmd);
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *) ptep;
+ pte_t pte = huge_ptep_get(ptep);
+
+ pmdp_flush_direct(mm, addr, pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ return pte;
+}
+
+int arch_prepare_hugepage(struct page *page)
+{
+ unsigned long addr = page_to_phys(page);
+ pte_t pte;
+ pte_t *ptep;
+ int i;
+
+ if (MACHINE_HAS_HPAGE)
+ return 0;
+
+ ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
+ if (!ptep)
+ return -ENOMEM;
+
+ pte_val(pte) = addr;
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
+ pte_val(pte) += PAGE_SIZE;
+ }
+ page[1].index = (unsigned long) ptep;
+ return 0;
+}
+
+void arch_release_hugepage(struct page *page)
+{
+ pte_t *ptep;
+
+ if (MACHINE_HAS_HPAGE)
+ return;
+
+ ptep = (pte_t *) page[1].index;
+ if (!ptep)
+ return;
+ clear_table((unsigned long *) ptep, _PAGE_INVALID,
+ PTRS_PER_PTE * sizeof(pte_t));
+ page_table_free(&init_mm, (unsigned long *) ptep);
+ page[1].index = 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp = NULL;
+
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_alloc(mm, pgdp, addr);
+ if (pudp)
+ pmdp = pmd_alloc(mm, pudp, addr);
+ return (pte_t *) pmdp;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp = NULL;
+
+ pgdp = pgd_offset(mm, addr);
+ if (pgd_present(*pgdp)) {
+ pudp = pud_offset(pgdp, addr);
+ if (pud_present(*pudp))
+ pmdp = pmd_offset(pudp, addr);
+ }
+ return (pte_t *) pmdp;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ if (!MACHINE_HAS_HPAGE)
+ return 0;
+
+ return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+}
+
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmdp, int write)
+{
+ struct page *page;
+
+ if (!MACHINE_HAS_HPAGE)
+ return NULL;
+
+ page = pmd_page(*pmdp);
+ if (page)
+ page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+ return page;
+}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8053245fe25..0c1073ed1e8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/init.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
@@ -23,11 +21,13 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -36,68 +36,60 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
-char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-void show_mem(void)
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL(empty_zero_page);
+
+static void __init setup_zero_pages(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ struct cpuid cpu_id;
+ unsigned int order;
struct page *page;
+ int i;
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
- i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
- continue;
- page = pfn_to_page(i);
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672: /* g5 */
+ case 0x2064: /* z900 */
+ case 0x2066: /* z900 */
+ case 0x2084: /* z990 */
+ case 0x2086: /* z990 */
+ case 0x2094: /* z9-109 */
+ case 0x2096: /* z9-109 */
+ order = 0;
+ break;
+ case 0x2097: /* z10 */
+ case 0x2098: /* z10 */
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
+ order = 2;
+ break;
+ case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
-
- printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
- printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
- printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
- printk("%lu pages slab\n",
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE));
- printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
-}
-
-static void __init setup_ro_region(void)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t new_pte;
- unsigned long address, end;
-
- address = ((unsigned long)&_stext) & PAGE_MASK;
- end = PFN_ALIGN((unsigned long)&_eshared);
-
- for (; address < end; address += PAGE_SIZE) {
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
- *pte = new_pte;
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Out of memory in setup_zero_pages");
+
+ page = virt_to_page((void *) empty_zero_page);
+ split_page(page, order);
+ for (i = 1 << order; i > 0; i--) {
+ mark_page_reserved(page);
+ page++;
}
+
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
@@ -105,122 +97,135 @@ static void __init setup_ro_region(void)
*/
void __init paging_init(void)
{
- static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type;
+ unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
- S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
- /* A three level page table (4TB) is enough for the kernel space. */
- S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
+ if (VMALLOC_END > (1UL << 42)) {
+ asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION2_ENTRY_EMPTY;
+ } else {
+ asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION3_ENTRY_EMPTY;
+ }
#else
- S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+ asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
- setup_ro_region();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
- __raw_local_irq_ssm(ssm_mask);
+ arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
}
void __init mem_init(void)
{
- unsigned long codesize, reservedpages, datasize, initsize;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(0, mm_cpumask(&init_mm));
+ atomic_set(&init_mm.context.attach_count, 1);
- max_mapnr = num_physpages = max_low_pfn;
+ max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
+ /* Setup guest page hinting */
+ cmma_init();
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
-
- reservedpages = 0;
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >>10,
- initsize >> 10);
+ free_all_bootmem();
+ setup_zero_pages(); /* Setup zeroed pages. */
+
+ mem_init_print_info(NULL);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
+ free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+ "initrd");
+}
+#endif
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- ptep_invalidate(&init_mm, address, pte);
- continue;
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long size_pages = PFN_DOWN(size);
+ struct zone *zone;
+ int rc;
+
+ rc = vmem_add_mapping(start, size);
+ if (rc)
+ return rc;
+ for_each_zone(zone) {
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
+ } else {
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
}
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
+ if (rc)
+ break;
+ start_pfn += nr_pages;
+ size_pages -= nr_pages;
+ if (!size_pages)
+ break;
}
+ if (rc)
+ vmem_remove_mapping(start, size);
+ return rc;
}
-#endif
-void free_initmem(void)
+unsigned long memory_block_size_bytes(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk ("Freeing unused kernel memory: %ldk freed\n",
- ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
+ /*
+ * Make sure the memory block size is always greater
+ * or equal than the memory increment size.
+ */
+ return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
{
- if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ /*
+ * There is no hardware or firmware interface which could trigger a
+ * hot memory remove on s390. So there is nothing that needs to be
+ * implemented.
+ */
+ return -EBUSY;
}
#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
new file mode 100644
index 00000000000..2a2e35416d2
--- /dev/null
+++ b/arch/s390/mm/maccess.c
@@ -0,0 +1,204 @@
+/*
+ * Access kernel memory without faulting -- s390 specific implementation.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/cpu.h>
+#include <asm/ctl_reg.h>
+#include <asm/io.h>
+
+/*
+ * This function writes to kernel memory bypassing DAT and possible
+ * write protection. It copies one to four bytes from src to dst
+ * using the stura instruction.
+ * Returns the number of bytes copied or -EFAULT.
+ */
+static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
+{
+ unsigned long count, aligned;
+ int offset, mask;
+ int rc = -EFAULT;
+
+ aligned = (unsigned long) dst & ~3UL;
+ offset = (unsigned long) dst & 3;
+ count = min_t(unsigned long, 4 - offset, size);
+ mask = (0xf << (4 - count)) & 0xf;
+ mask >>= offset;
+ asm volatile(
+ " bras 1,0f\n"
+ " icm 0,0,0(%3)\n"
+ "0: l 0,0(%1)\n"
+ " lra %1,0(%1)\n"
+ "1: ex %2,0(1)\n"
+ "2: stura 0,%1\n"
+ " la %0,0\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
+ : "+d" (rc), "+a" (aligned)
+ : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
+ return rc ? rc : count;
+}
+
+long probe_kernel_write(void *dst, const void *src, size_t size)
+{
+ long copied = 0;
+
+ while (size) {
+ copied = probe_kernel_write_odd(dst, src, size);
+ if (copied < 0)
+ break;
+ dst += copied;
+ src += copied;
+ size -= copied;
+ }
+ return copied < 0 ? -EFAULT : 0;
+}
+
+static int __memcpy_real(void *dest, void *src, size_t count)
+{
+ register unsigned long _dest asm("2") = (unsigned long) dest;
+ register unsigned long _len1 asm("3") = (unsigned long) count;
+ register unsigned long _src asm("4") = (unsigned long) src;
+ register unsigned long _len2 asm("5") = (unsigned long) count;
+ int rc = -EFAULT;
+
+ asm volatile (
+ "0: mvcle %1,%2,0x0\n"
+ "1: jo 0b\n"
+ " lhi %0,0x0\n"
+ "2:\n"
+ EX_TABLE(1b,2b)
+ : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
+ "+d" (_len2), "=m" (*((long *) dest))
+ : "m" (*((long *) src))
+ : "cc", "memory");
+ return rc;
+}
+
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!count)
+ return 0;
+ local_irq_save(flags);
+ __arch_local_irq_stnsm(0xfbUL);
+ rc = __memcpy_real(dest, src, count);
+ local_irq_restore(flags);
+ return rc;
+}
+
+/*
+ * Copy memory in absolute mode (kernel to kernel)
+ */
+void memcpy_absolute(void *dest, void *src, size_t count)
+{
+ unsigned long cr0, flags, prefix;
+
+ flags = arch_local_irq_save();
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28); /* disable lowcore protection */
+ prefix = store_prefix();
+ if (prefix) {
+ local_mcck_disable();
+ set_prefix(0);
+ memcpy(dest, src, count);
+ set_prefix(prefix);
+ local_mcck_enable();
+ } else {
+ memcpy(dest, src, count);
+ }
+ __ctl_load(cr0, 0, 0);
+ arch_local_irq_restore(flags);
+}
+
+/*
+ * Copy memory from kernel (real) to user (virtual)
+ */
+int copy_to_user_real(void __user *dest, void *src, unsigned long count)
+{
+ int offs = 0, size, rc;
+ char *buf;
+
+ buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = -EFAULT;
+ while (offs < count) {
+ size = min(PAGE_SIZE, count - offs);
+ if (memcpy_real(buf, src + offs, size))
+ goto out;
+ if (copy_to_user(dest + offs, buf, size))
+ goto out;
+ offs += size;
+ }
+ rc = 0;
+out:
+ free_page((unsigned long) buf);
+ return rc;
+}
+
+/*
+ * Check if physical address is within prefix or zero page
+ */
+static int is_swapped(unsigned long addr)
+{
+ unsigned long lc;
+ int cpu;
+
+ if (addr < sizeof(struct _lowcore))
+ return 1;
+ for_each_online_cpu(cpu) {
+ lc = (unsigned long) lowcore_ptr[cpu];
+ if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Convert a physical pointer for /dev/mem access
+ *
+ * For swapped prefix pages a new buffer is returned that contains a copy of
+ * the absolute memory. The buffer size is maximum one page large.
+ */
+void *xlate_dev_mem_ptr(unsigned long addr)
+{
+ void *bounce = (void *) addr;
+ unsigned long size;
+
+ get_online_cpus();
+ preempt_disable();
+ if (is_swapped(addr)) {
+ size = PAGE_SIZE - (addr & ~PAGE_MASK);
+ bounce = (void *) __get_free_page(GFP_ATOMIC);
+ if (bounce)
+ memcpy_absolute(bounce, (void *) addr, size);
+ }
+ preempt_enable();
+ put_online_cpus();
+ return bounce;
+}
+
+/*
+ * Free converted buffer for /dev/mem access (if necessary)
+ */
+void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
+{
+ if ((void *) addr != buf)
+ free_page((unsigned long) buf);
+}
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
new file mode 100644
index 00000000000..5535cfe0ee1
--- /dev/null
+++ b/arch/s390/mm/mem_detect.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+
+#define ADDR2G (1ULL << 31)
+
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY 1
+
+static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
+{
+ memblock_add_range(&memblock.memory, start, size, 0, 0);
+ memblock_add_range(&memblock.physmem, start, size, 0, 0);
+}
+
+void __init detect_memory_memblock(void)
+{
+ unsigned long long memsize, rnmax, rzm;
+ unsigned long addr, size;
+ int type;
+
+ rzm = sclp_get_rzm();
+ rnmax = sclp_get_rnmax();
+ memsize = rzm * rnmax;
+ if (!rzm)
+ rzm = 1ULL << 17;
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ rzm = min(ADDR2G, rzm);
+ memsize = min(ADDR2G, memsize);
+ }
+ max_physmem_end = memsize;
+ addr = 0;
+ /* keep memblock lists close to the kernel */
+ memblock_set_bottom_up(true);
+ do {
+ size = 0;
+ type = tprot(addr);
+ do {
+ size += rzm;
+ if (max_physmem_end && addr + size >= max_physmem_end)
+ break;
+ } while (type == tprot(addr + size));
+ if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
+ if (max_physmem_end && (addr + size > max_physmem_end))
+ size = max_physmem_end - addr;
+ memblock_physmem_add(addr, size);
+ }
+ addr += size;
+ } while (addr < max_physmem_end);
+ memblock_set_bottom_up(false);
+ if (!max_physmem_end)
+ max_physmem_end = memblock_end_of_DRAM();
+}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 5932a824547..9b436c21195 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/s390/mm/mmap.c
- *
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
@@ -26,41 +24,61 @@
#include <linux/personality.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/compat.h>
#include <asm/pgalloc.h>
+static unsigned long stack_maxrandom_size(void)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+ if (current->personality & ADDR_NO_RANDOMIZE)
+ return 0;
+ return STACK_RND_MASK << PAGE_SHIFT;
+}
+
/*
* Top of mmap area (just below the process stack).
*
- * Leave an at least ~128 MB hole.
+ * Leave at least a ~32 MB hole.
*/
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
+#define MIN_GAP (32*1024*1024)
+#define MAX_GAP (STACK_TOP/6*5)
+
+static inline int mmap_is_legacy(void)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ return 1;
+ return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+ /* 8MB randomization for mmap_base */
+ return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+}
+
+static unsigned long mmap_base_legacy(void)
+{
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+}
static inline unsigned long mmap_base(void)
{
- unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_64BIT
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_31BIT))
- return 1;
-#endif
- return sysctl_legacy_va_layout ||
- (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
+ gap &= PAGE_MASK;
+ return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
#ifndef CONFIG_64BIT
@@ -76,55 +94,69 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#else
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+{
+ if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+ return 0;
+ if (!(flags & MAP_FIXED))
+ addr = 0;
+ if ((addr + len) >= TASK_SIZE)
+ return crst_table_upgrade(current->mm, 1UL << 53);
+ return 0;
+}
+
static unsigned long
s390_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ unsigned long area;
int rc;
- addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
- if (addr & ~PAGE_MASK)
- return addr;
- if (unlikely(mm->context.asce_limit < addr + len)) {
- rc = crst_table_upgrade(mm, addr + len);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ if (!(area & ~PAGE_MASK))
+ return area;
+ if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+ /* Upgrade the page table to 4 levels and retry. */
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
- return addr;
+ return area;
}
static unsigned long
-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
+ unsigned long area;
int rc;
- addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
- if (addr & ~PAGE_MASK)
- return addr;
- if (unlikely(mm->context.asce_limit < addr + len)) {
- rc = crst_table_upgrade(mm, addr + len);
+ area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ if (!(area & ~PAGE_MASK))
+ return area;
+ if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+ /* Upgrade the page table to 4 levels and retry. */
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
}
- return addr;
+ return area;
}
/*
* This function, called very early during the creation of a new
@@ -137,15 +169,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = s390_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#endif
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
new file mode 100644
index 00000000000..a90d45e9dfb
--- /dev/null
+++ b/arch/s390/mm/page-states.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright IBM Corp. 2008
+ *
+ * Guest page hinting for unused pages.
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+
+#define ESSA_SET_STABLE 1
+#define ESSA_SET_UNUSED 2
+
+static int cmma_flag = 1;
+
+static int __init cmma(char *str)
+{
+ char *parm;
+
+ parm = strstrip(str);
+ if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
+ cmma_flag = 1;
+ return 1;
+ }
+ cmma_flag = 0;
+ if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
+ return 1;
+ return 0;
+}
+__setup("cmma=", cmma);
+
+void __init cmma_init(void)
+{
+ register unsigned long tmp asm("0") = 0;
+ register int rc asm("1") = -EOPNOTSUPP;
+
+ if (!cmma_flag)
+ return;
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+&d" (rc), "+&d" (tmp));
+ if (rc)
+ cmma_flag = 0;
+}
+
+static inline void set_page_unstable(struct page *page, int order)
+{
+ int i, rc;
+
+ for (i = 0; i < (1 << order); i++)
+ asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (rc)
+ : "a" (page_to_phys(page + i)),
+ "i" (ESSA_SET_UNUSED));
+}
+
+void arch_free_page(struct page *page, int order)
+{
+ if (!cmma_flag)
+ return;
+ set_page_unstable(page, order);
+}
+
+static inline void set_page_stable(struct page *page, int order)
+{
+ int i, rc;
+
+ for (i = 0; i < (1 << order); i++)
+ asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (rc)
+ : "a" (page_to_phys(page + i)),
+ "i" (ESSA_SET_STABLE));
+}
+
+void arch_alloc_page(struct page *page, int order)
+{
+ if (!cmma_flag)
+ return;
+ set_page_stable(page, order);
+}
+
+void arch_set_page_states(int make_stable)
+{
+ unsigned long flags, order, t;
+ struct list_head *l;
+ struct page *page;
+ struct zone *zone;
+
+ if (!cmma_flag)
+ return;
+ if (make_stable)
+ drain_local_pages(NULL);
+ for_each_populated_zone(zone) {
+ spin_lock_irqsave(&zone->lock, flags);
+ for_each_migratetype_order(order, t) {
+ list_for_each(l, &zone->free_area[order].free_list[t]) {
+ page = list_entry(l, struct page, lru);
+ if (make_stable)
+ set_page_stable(page, order);
+ else
+ set_page_unstable(page, order);
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
new file mode 100644
index 00000000000..8400f494623
--- /dev/null
+++ b/arch/s390/mm/pageattr.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright IBM Corp. 2011
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/hugetlb.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+#if PAGE_DEFAULT_KEY
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
+void __storage_key_init_range(unsigned long start, unsigned long end)
+{
+ unsigned long boundary, size;
+
+ while (start < end) {
+ if (MACHINE_HAS_EDAT1) {
+ /* set storage keys for a 1MB frame */
+ size = 1UL << 20;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ start += PAGE_SIZE;
+ }
+}
+#endif
+
+static pte_t *walk_page_table(unsigned long addr)
+{
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_k(addr);
+ if (pgd_none(*pgdp))
+ return NULL;
+ pudp = pud_offset(pgdp, addr);
+ if (pud_none(*pudp) || pud_large(*pudp))
+ return NULL;
+ pmdp = pmd_offset(pudp, addr);
+ if (pmd_none(*pmdp) || pmd_large(*pmdp))
+ return NULL;
+ ptep = pte_offset_kernel(pmdp, addr);
+ if (pte_none(*ptep))
+ return NULL;
+ return ptep;
+}
+
+static void change_page_attr(unsigned long addr, int numpages,
+ pte_t (*set) (pte_t))
+{
+ pte_t *ptep, pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ ptep = walk_page_table(addr);
+ if (WARN_ON_ONCE(!ptep))
+ break;
+ pte = *ptep;
+ pte = set(pte);
+ __ptep_ipte(addr, ptep);
+ *ptep = pte;
+ addr += PAGE_SIZE;
+ }
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_wrprotect);
+ return 0;
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_mkwrite);
+ return 0;
+}
+
+/* not possible */
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long address;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_INVALID;
+ continue;
+ }
+ pte_val(*pte) = __pa(address);
+ }
+}
+
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
+ return cc == 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index fd072013f88..37b8241ec78 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,24 +1,24 @@
/*
- * arch/s390/mm/pgtable.c
- *
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
-#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/swapops.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -27,62 +27,52 @@
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
-#define TABLES_PER_PAGE 4
-#define FRAG_MASK 15UL
-#define SECOND_HALVES 10UL
+#define FRAG_MASK 0x0f
#else
#define ALLOC_ORDER 2
-#define TABLES_PER_PAGE 2
-#define FRAG_MASK 3UL
-#define SECOND_HALVES 2UL
+#define FRAG_MASK 0x03
#endif
-unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
+
+unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
- page->index = 0;
- if (noexec) {
- struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
- if (!shadow) {
- __free_pages(page, ALLOC_ORDER);
- return NULL;
- }
- page->index = page_to_phys(shadow);
- }
- spin_lock(&mm->page_table_lock);
- list_add(&page->lru, &mm->context.crst_list);
- spin_unlock(&mm->page_table_lock);
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
- unsigned long *shadow = get_shadow_table(table);
- struct page *page = virt_to_page(table);
-
- spin_lock(&mm->page_table_lock);
- list_del(&page->lru);
- spin_unlock(&mm->page_table_lock);
- if (shadow)
- free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
#ifdef CONFIG_64BIT
+static void __crst_table_upgrade(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (current->active_mm == mm) {
+ clear_user_asce();
+ set_user_asce(mm);
+ }
+ __tlb_flush_local();
+}
+
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
+ int flush;
BUG_ON(limit > (1UL << 53));
+ flush = 0;
repeat:
- table = crst_table_alloc(mm, mm->context.noexec);
+ table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
- spin_lock(&mm->page_table_lock);
+ spin_lock_bh(&mm->page_table_lock);
if (mm->context.asce_limit < limit) {
pgd = (unsigned long *) mm->pgd;
if (mm->context.asce_limit <= (1UL << 31)) {
@@ -101,14 +91,17 @@ repeat:
crst_table_init(table, entry);
pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
+ mm->task_size = mm->context.asce_limit;
table = NULL;
+ flush = 1;
}
- spin_unlock(&mm->page_table_lock);
+ spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
- update_mm(mm, current);
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
@@ -116,9 +109,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- if (mm->context.asce_limit <= limit)
- return;
- __tlb_flush_mm(mm);
+ if (current->active_mm == mm) {
+ clear_user_asce();
+ __tlb_flush_mm(mm);
+ }
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -138,93 +132,1365 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
BUG();
}
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
- update_mm(mm, current);
+ if (current->active_mm == mm)
+ set_user_asce(mm);
}
#endif
+#ifdef CONFIG_PGSTE
+
+/**
+ * gmap_alloc - allocate a guest address space
+ * @mm: pointer to the parent mm_struct
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_alloc(struct mm_struct *mm)
+{
+ struct gmap *gmap;
+ struct page *page;
+ unsigned long *table;
+
+ gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
+ if (!gmap)
+ goto out;
+ INIT_LIST_HEAD(&gmap->crst_list);
+ gmap->mm = mm;
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ goto out_free;
+ list_add(&page->lru, &gmap->crst_list);
+ table = (unsigned long *) page_to_phys(page);
+ crst_table_init(table, _REGION1_ENTRY_EMPTY);
+ gmap->table = table;
+ gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | __pa(table);
+ list_add(&gmap->list, &mm->context.gmap_list);
+ return gmap;
+
+out_free:
+ kfree(gmap);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gmap_alloc);
+
+static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
+{
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ if (*table & _SEGMENT_ENTRY_INVALID)
+ return 0;
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ if (rmap->entry != table)
+ continue;
+ list_del(&rmap->list);
+ kfree(rmap);
+ break;
+ }
+ *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
+ return 1;
+}
+
+static void gmap_flush_tlb(struct gmap *gmap)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+}
+
+/**
+ * gmap_free - free a guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_free(struct gmap *gmap)
+{
+ struct page *page, *next;
+ unsigned long *table;
+ int i;
+
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+
+ /* Free all segment & region tables. */
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ table = (unsigned long *) page_to_phys(page);
+ if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
+ /* Remove gmap rmap structures for segment table. */
+ for (i = 0; i < PTRS_PER_PMD; i++, table++)
+ gmap_unlink_segment(gmap, table);
+ __free_pages(page, ALLOC_ORDER);
+ }
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ list_del(&gmap->list);
+ kfree(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_enable - switch primary space to the guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_enable(struct gmap *gmap)
+{
+ S390_lowcore.gmap = (unsigned long) gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_enable);
+
+/**
+ * gmap_disable - switch back to the standard primary address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_disable(struct gmap *gmap)
+{
+ S390_lowcore.gmap = 0UL;
+}
+EXPORT_SYMBOL_GPL(gmap_disable);
+
/*
- * page table entry allocation/free routines.
+ * gmap_alloc_table is assumed to be called with mmap_sem held
*/
-unsigned long *page_table_alloc(struct mm_struct *mm)
+static int gmap_alloc_table(struct gmap *gmap,
+ unsigned long *table, unsigned long init)
+ __releases(&gmap->mm->page_table_lock)
+ __acquires(&gmap->mm->page_table_lock)
{
struct page *page;
+ unsigned long *new;
+
+ /* since we dont free the gmap table until gmap_free we can unlock */
+ spin_unlock(&gmap->mm->page_table_lock);
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ spin_lock(&gmap->mm->page_table_lock);
+ if (!page)
+ return -ENOMEM;
+ new = (unsigned long *) page_to_phys(page);
+ crst_table_init(new, init);
+ if (*table & _REGION_ENTRY_INVALID) {
+ list_add(&page->lru, &gmap->crst_list);
+ *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
+ (*table & _REGION_ENTRY_TYPE_MASK);
+ } else
+ __free_pages(page, ALLOC_ORDER);
+ return 0;
+}
+
+/**
+ * gmap_unmap_segment - unmap segment from the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @addr: address in the guest address space
+ * @len: length of the memory area to unmap
+ *
+ * Returns 0 if the unmap succeeded, -EINVAL if not.
+ */
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
+{
unsigned long *table;
- unsigned long bits;
+ unsigned long off;
+ int flush;
+
+ if ((to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the guest addr space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Clear segment table entry in guest address space. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INVALID;
+ }
+out:
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_unmap_segment);
+
+/**
+ * gmap_mmap_segment - map a segment to the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @from: source address in the parent address space
+ * @to: target address in the guest address space
+ *
+ * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
+ */
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((from | to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || from + len > TASK_MAX_SIZE ||
+ from + len < from || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Store 'from' address in an invalid segment table entry. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
+ }
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+
+out_unmap:
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ gmap_unmap_segment(gmap, to, len);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gmap_map_segment);
+
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table;
+
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
+ /* Convert the gmap address to an mm address. */
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ } else if (segment & _SEGMENT_ENTRY_PROTECT) {
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
- bits = mm->context.noexec ? 3UL : 1UL;
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
+ unsigned long *segment_ptr, struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* large pmds cannot yet be handled */
+ if (pmd_large(*pmd))
+ return -EFAULT;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->gmap = gmap;
+ rmap->entry = segment_ptr;
+ rmap->vmaddr = address & PMD_MASK;
spin_lock(&mm->page_table_lock);
- page = NULL;
+ if (*segment_ptr == segment) {
+ list_add(&rmap->list, &mp->mapper);
+ /* Set gmap segment table entry to page table. */
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_PROTECT))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
+ }
+ return -EFAULT;
+}
+
+unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_fault(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_fault);
+
+static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
+{
+ if (!non_swap_entry(entry))
+ dec_mm_counter(mm, MM_SWAPENTS);
+ else if (is_migration_entry(entry)) {
+ struct page *page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ dec_mm_counter(mm, MM_ANONPAGES);
+ else
+ dec_mm_counter(mm, MM_FILEPAGES);
+ }
+ free_swap_and_cache(entry);
+}
+
+/**
+ * The mm->mmap_sem lock must be held
+ */
+static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
+{
+ unsigned long ptev, pgstev;
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep, pte;
+
+ ptep = get_locked_pte(mm, address, &ptl);
+ if (unlikely(!ptep))
+ return;
+ pte = *ptep;
+ if (!pte_swap(pte))
+ goto out_pte;
+ /* Zap unused and logically-zero pages */
+ pgste = pgste_get_lock(ptep);
+ pgstev = pgste_val(pgste);
+ ptev = pte_val(pte);
+ if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+ ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
+ gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
+ pte_clear(mm, address, ptep);
+ }
+ pgste_set_unlock(ptep, pgste);
+out_pte:
+ pte_unmap_unlock(*ptep, ptl);
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+void __gmap_zap(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table, *segment_ptr;
+ unsigned long segment, pgstev, ptev;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return;
+ segment = *segment_ptr;
+ if (segment & _SEGMENT_ENTRY_INVALID)
+ return;
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ address = mp->vmaddr | (address & ~PMD_MASK);
+ /* Page table is present */
+ table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ pgstev = table[PTRS_PER_PTE];
+ ptev = table[0];
+ /* quick check, checked again with locks held */
+ if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+ ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
+ gmap_zap_unused(gmap->mm, address);
+}
+EXPORT_SYMBOL_GPL(__gmap_zap);
+
+void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
+{
+
+ unsigned long *table, address, size;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ down_read(&gmap->mm->mmap_sem);
+ address = from;
+ while (address < to) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ vma = find_vma(gmap->mm, mp->vmaddr);
+ size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
+ zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
+ size, NULL);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ }
+ up_read(&gmap->mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(gmap_discard);
+
+static LIST_HEAD(gmap_notifier_list);
+static DEFINE_SPINLOCK(gmap_notifier_lock);
+
+/**
+ * gmap_register_ipte_notifier - register a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_register_ipte_notifier(struct gmap_notifier *nb)
+{
+ spin_lock(&gmap_notifier_lock);
+ list_add(&nb->list, &gmap_notifier_list);
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
+
+/**
+ * gmap_unregister_ipte_notifier - remove a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
+{
+ spin_lock(&gmap_notifier_lock);
+ list_del_init(&nb->list);
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
+
+/**
+ * gmap_ipte_notify - mark a range of ptes for invalidation notification
+ * @gmap: pointer to guest mapping meta data structure
+ * @start: virtual address in the guest address space
+ * @len: size of area
+ *
+ * Returns 0 if for each page in the given range a gmap mapping exists and
+ * the invalidation notification could be set. If the gmap mapping is missing
+ * for one or more pages -EFAULT is returned. If no memory could be allocated
+ * -ENOMEM is returned. This function establishes missing page table entries.
+ */
+int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
+{
+ unsigned long addr;
+ spinlock_t *ptl;
+ pte_t *ptep, entry;
+ pgste_t pgste;
+ int rc = 0;
+
+ if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
+ return -EINVAL;
+ down_read(&gmap->mm->mmap_sem);
+ while (len) {
+ /* Convert gmap address and connect the page tables */
+ addr = __gmap_fault(start, gmap);
+ if (IS_ERR_VALUE(addr)) {
+ rc = addr;
+ break;
+ }
+ /* Get the page mapped */
+ if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
+ rc = -EFAULT;
+ break;
+ }
+ /* Walk the process page table, lock and get pte pointer */
+ ptep = get_locked_pte(gmap->mm, addr, &ptl);
+ if (unlikely(!ptep))
+ continue;
+ /* Set notification bit in the pgste of the pte */
+ entry = *ptep;
+ if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
+ pgste = pgste_get_lock(ptep);
+ pgste_val(pgste) |= PGSTE_IN_BIT;
+ pgste_set_unlock(ptep, pgste);
+ start += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ spin_unlock(ptl);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_ipte_notify);
+
+/**
+ * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
+ * @mm: pointer to the process mm_struct
+ * @pte: pointer to the page table entry
+ *
+ * This function is assumed to be called with the page table lock held
+ * for the pte to notify.
+ */
+void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)
+{
+ unsigned long segment_offset;
+ struct gmap_notifier *nb;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
+ segment_offset = segment_offset * (4096 / sizeof(pte_t));
+ page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ spin_lock(&gmap_notifier_lock);
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ list_for_each_entry(nb, &gmap_notifier_list, list)
+ nb->notifier_call(rmap->gmap,
+ rmap->vmaddr + segment_offset);
+ }
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
+
+static inline int page_table_with_pgste(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == 0;
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
+{
+ struct page *page;
+ unsigned long *table;
+ struct gmap_pgtable *mp;
+
+ page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!page)
+ return NULL;
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
+ if (!mp) {
+ __free_page(page);
+ return NULL;
+ }
+ if (!pgtable_page_ctor(page)) {
+ kfree(mp);
+ __free_page(page);
+ return NULL;
+ }
+ mp->vmaddr = vmaddr & PMD_MASK;
+ INIT_LIST_HEAD(&mp->mapper);
+ page->index = (unsigned long) mp;
+ atomic_set(&page->_mapcount, 0);
+ table = (unsigned long *) page_to_phys(page);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ return table;
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+ struct page *page;
+ struct gmap_pgtable *mp;
+
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ BUG_ON(!list_empty(&mp->mapper));
+ pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
+ kfree(mp);
+ __free_page(page);
+}
+
+static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ pte_t *start_pte, *pte;
+ spinlock_t *ptl;
+ pgste_t pgste;
+
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
+ do {
+ pgste = pgste_get_lock(pte);
+ pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ if (init_skey) {
+ unsigned long address;
+
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+ PGSTE_GR_BIT | PGSTE_GC_BIT);
+
+ /* skip invalid and not writable pages */
+ if (pte_val(*pte) & _PAGE_INVALID ||
+ !(pte_val(*pte) & _PAGE_WRITE)) {
+ pgste_set_unlock(pte, pgste);
+ continue;
+ }
+
+ address = pte_val(*pte) & PAGE_MASK;
+ page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
+ }
+ pgste_set_unlock(pte, pgste);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(start_pte, ptl);
+
+ return addr;
+}
+
+static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ unsigned long next;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+
+static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool init_skey)
+{
+ unsigned long addr, next;
+ pgd_t *pgd;
+
+ down_write(&mm->mmap_sem);
+ if (init_skey && mm_use_skey(mm))
+ goto out_up;
+ addr = start;
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
+ } while (pgd++, addr = next, addr != end);
+ if (init_skey)
+ current->mm->context.use_skey = 1;
+out_up:
+ up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL(page_table_reset_pgste);
+
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned long key, bool nq)
+{
+ spinlock_t *ptl;
+ pgste_t old, new;
+ pte_t *ptep;
+
+ down_read(&mm->mmap_sem);
+ ptep = get_locked_pte(current->mm, addr, &ptl);
+ if (unlikely(!ptep)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ new = old = pgste_get_lock(ptep);
+ pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
+ PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
+ pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+ if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+ unsigned long address, bits, skey;
+
+ address = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(address);
+ bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+ skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
+ /* Set storage key ACC and FP */
+ page_set_storage_key(address, skey, !nq);
+ /* Merge host changed & referenced into pgste */
+ pgste_val(new) |= bits << 52;
+ }
+ /* changing the guest storage key is considered a change of the page */
+ if ((pgste_val(new) ^ pgste_val(old)) &
+ (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
+ pgste_val(new) |= PGSTE_UC_BIT;
+
+ pgste_set_unlock(ptep, new);
+ pte_unmap_unlock(*ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return 0;
+}
+EXPORT_SYMBOL(set_guest_storage_key);
+
+#else /* CONFIG_PGSTE */
+
+static inline int page_table_with_pgste(struct page *page)
+{
+ return 0;
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
+{
+ return NULL;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool init_skey)
+{
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+}
+
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
+{
+}
+
+#endif /* CONFIG_PGSTE */
+
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+ unsigned int old, new;
+
+ do {
+ old = atomic_read(v);
+ new = old ^ bits;
+ } while (atomic_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+/*
+ * page table entry allocation/free routines.
+ */
+unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *uninitialized_var(table);
+ struct page *uninitialized_var(page);
+ unsigned int mask, bit;
+
+ if (mm_has_pgste(mm))
+ return page_table_alloc_pgste(mm, vmaddr);
+ /* Allocate fragments of a 4K page as 1K/2K page table */
+ spin_lock_bh(&mm->context.list_lock);
+ mask = FRAG_MASK;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
- if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
- page = NULL;
+ table = (unsigned long *) page_to_phys(page);
+ mask = atomic_read(&page->_mapcount);
+ mask = mask | (mask >> 4);
}
- if (!page) {
- spin_unlock(&mm->page_table_lock);
+ if ((mask & FRAG_MASK) == FRAG_MASK) {
+ spin_unlock_bh(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
- pgtable_page_ctor(page);
- page->flags &= ~FRAG_MASK;
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
- spin_lock(&mm->page_table_lock);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+ spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
+ } else {
+ for (bit = 1; mask & bit; bit <<= 1)
+ table += PTRS_PER_PTE;
+ mask = atomic_xor_bits(&page->_mapcount, bit);
+ if ((mask & FRAG_MASK) == FRAG_MASK)
+ list_del(&page->lru);
}
- table = (unsigned long *) page_to_phys(page);
- while (page->flags & bits) {
- table += 256;
- bits <<= 1;
- }
- page->flags |= bits;
- if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
- list_move_tail(&page->lru, &mm->context.pgtable_list);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock_bh(&mm->context.list_lock);
return table;
}
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
- unsigned long bits;
+ unsigned int bit, mask;
- bits = mm->context.noexec ? 3UL : 1UL;
- bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- spin_lock(&mm->page_table_lock);
- page->flags ^= bits;
- if (page->flags & FRAG_MASK) {
- /* Page now has some free pgtable fragments. */
- list_move(&page->lru, &mm->context.pgtable_list);
- page = NULL;
- } else
- /* All fragments of the 4K page have been freed. */
+ if (page_table_with_pgste(page)) {
+ gmap_disconnect_pgtable(mm, table);
+ return page_table_free_pgste(table);
+ }
+ /* Free 1K/2K page table fragment of a 4K page */
+ bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
+ spin_lock_bh(&mm->context.list_lock);
+ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
list_del(&page->lru);
- spin_unlock(&mm->page_table_lock);
- if (page) {
+ mask = atomic_xor_bits(&page->_mapcount, bit);
+ if (mask & FRAG_MASK)
+ list_add(&page->lru, &mm->context.pgtable_list);
+ spin_unlock_bh(&mm->context.list_lock);
+ if (mask == 0) {
pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
__free_page(page);
}
}
-void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
+static void __page_table_free_rcu(void *table, unsigned bit)
{
struct page *page;
- spin_lock(&mm->page_table_lock);
- /* Free shadow region and segment tables. */
- list_for_each_entry(page, &mm->context.crst_list, lru)
- if (page->index) {
- free_pages((unsigned long) page->index, ALLOC_ORDER);
- page->index = 0;
+ if (bit == FRAG_MASK)
+ return page_table_free_pgste(table);
+ /* Free 1K/2K page table fragment of a 4K page */
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
+ pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
+ __free_page(page);
+ }
+}
+
+void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
+{
+ struct mm_struct *mm;
+ struct page *page;
+ unsigned int bit, mask;
+
+ mm = tlb->mm;
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page)) {
+ gmap_disconnect_pgtable(mm, table);
+ table = (unsigned long *) (__pa(table) | FRAG_MASK);
+ tlb_remove_table(tlb, table);
+ return;
+ }
+ bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
+ spin_lock_bh(&mm->context.list_lock);
+ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
+ list_del(&page->lru);
+ mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
+ if (mask & FRAG_MASK)
+ list_add_tail(&page->lru, &mm->context.pgtable_list);
+ spin_unlock_bh(&mm->context.list_lock);
+ table = (unsigned long *) (__pa(table) | (bit << 4));
+ tlb_remove_table(tlb, table);
+}
+
+static void __tlb_remove_table(void *_table)
+{
+ const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
+ void *table = (void *)((unsigned long) _table & ~mask);
+ unsigned type = (unsigned long) _table & mask;
+
+ if (type)
+ __page_table_free_rcu(table, type);
+ else
+ free_pages((unsigned long) table, ALLOC_ORDER);
+}
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+ /*
+ * This isn't an RCU grace period and hence the page-tables cannot be
+ * assumed to be actually RCU-freed.
+ *
+ * It is however sufficient for software page-table walkers that rely
+ * on IRQ disabling. See the comment near struct mmu_table_batch.
+ */
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+ struct mmu_table_batch *batch;
+ int i;
+
+ batch = container_of(head, struct mmu_table_batch, rcu);
+
+ for (i = 0; i < batch->nr; i++)
+ __tlb_remove_table(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ tlb->mm->context.flush_mm = 1;
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)
+ __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
+ __tlb_flush_mm_lazy(tlb->mm);
+ tlb_remove_table_one(table);
+ return;
}
- /* "Free" second halves of page tables. */
- list_for_each_entry(page, &mm->context.pgtable_list, lru)
- page->flags &= ~SECOND_HALVES;
- spin_unlock(&mm->page_table_lock);
- mm->context.noexec = 0;
- update_mm(mm, tsk);
+ (*batch)->nr = 0;
+ }
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_flush_mmu(tlb);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void thp_split_vma(struct vm_area_struct *vma)
+{
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
+ follow_page(vma, addr, FOLL_SPLIT);
+}
+
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ thp_split_vma(vma);
+ vma->vm_flags &= ~VM_HUGEPAGE;
+ vma->vm_flags |= VM_NOHUGEPAGE;
+ }
+ mm->def_flags |= VM_NOHUGEPAGE;
+}
+#else
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
+ struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next, *table, *new;
+ struct page *page;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+again:
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ table = (unsigned long *) pmd_deref(*pmd);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page))
+ continue;
+ /* Allocate new page table with pgstes */
+ new = page_table_alloc_pgste(mm, addr);
+ if (!new)
+ return -ENOMEM;
+
+ spin_lock(&mm->page_table_lock);
+ if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
+ /* Nuke pmd entry pointing to the "short" page table */
+ pmdp_flush_lazy(mm, addr, pmd);
+ pmd_clear(pmd);
+ /* Copy ptes from old table to new table */
+ memcpy(new, table, PAGE_SIZE/2);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ /* Establish new table */
+ pmd_populate(mm, pmd, (pte_t *) new);
+ /* Free old table with rcu, there might be a walker! */
+ page_table_free_rcu(tlb, table);
+ new = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (new) {
+ page_table_free_pgste(new);
+ goto again;
+ }
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+
+static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
+ struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pgd_t *pgd;
+
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
+ } while (pgd++, addr = next, addr != end);
+
+ return 0;
+}
+
+/*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+int s390_enable_sie(void)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct mmu_gather tlb;
+
+ /* Do we have pgstes? if yes, we are done */
+ if (mm_has_pgste(tsk->mm))
+ return 0;
+
+ down_write(&mm->mmap_sem);
+ /* split thp mappings and disable thp for future mappings */
+ thp_split_mm(mm);
+ /* Reallocate the page tables with pgstes */
+ tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
+ if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
+ mm->context.has_pgste = 1;
+ tlb_finish_mmu(&tlb, 0, TASK_SIZE);
+ up_write(&mm->mmap_sem);
+ return mm->context.has_pgste ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+/*
+ * Enable storage key handling from now on and initialize the storage
+ * keys with the default key.
+ */
+void s390_enable_skey(void)
+{
+ page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
+}
+EXPORT_SYMBOL_GPL(s390_enable_skey);
+
+/*
+ * Test and reset if a guest page is dirty
+ */
+bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+ bool dirty = false;
+
+ pte = get_locked_pte(gmap->mm, address, &ptl);
+ if (unlikely(!pte))
+ return false;
+
+ if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
+ dirty = true;
+
+ spin_unlock(ptl);
+ return dirty;
+}
+EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB */
+ return pmdp_test_and_clear_young(vma, address, pmdp);
+}
+
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+ if (pmd_same(*pmdp, entry))
+ return 0;
+ pmdp_invalidate(vma, address, pmdp);
+ set_pmd_at(vma->vm_mm, address, pmdp, entry);
+ return 1;
+}
+
+static void pmdp_splitting_flush_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
+ (unsigned long *) pmdp)) {
+ /* need to serialize against gup-fast (IRQ disabled) */
+ smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
+ }
+}
+
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+{
+ struct list_head *lh = (struct list_head *) pgtable;
+
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
+
+ /* FIFO */
+ if (!pmd_huge_pte(mm, pmdp))
+ INIT_LIST_HEAD(lh);
+ else
+ list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+ pmd_huge_pte(mm, pmdp) = pgtable;
+}
+
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+{
+ struct list_head *lh;
+ pgtable_t pgtable;
+ pte_t *ptep;
+
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
+
+ /* FIFO */
+ pgtable = pmd_huge_pte(mm, pmdp);
+ lh = (struct list_head *) pgtable;
+ if (list_empty(lh))
+ pmd_huge_pte(mm, pmdp) = NULL;
+ else {
+ pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+ list_del(lh);
+ }
+ ptep = (pte_t *) pgtable;
+ pte_val(*ptep) = _PAGE_INVALID;
+ ptep++;
+ pte_val(*ptep) = _PAGE_INVALID;
+ return pgtable;
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 35d90a4720f..fe9012a49aa 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/mm/vmem.c
- *
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -10,10 +8,14 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
static DEFINE_MUTEX(vmem_mutex);
@@ -25,43 +27,6 @@ struct memory_segment {
static LIST_HEAD(mem_segs);
-void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
-{
- struct page *start, *end;
- struct page *map_start, *map_end;
- int i;
-
- start = pfn_to_page(start_pfn);
- end = start + size;
-
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
- unsigned long cstart, cend;
-
- cstart = PFN_DOWN(memory_chunk[i].addr);
- cend = cstart + PFN_DOWN(memory_chunk[i].size);
-
- map_start = mem_map + cstart;
- map_end = mem_map + cend;
-
- if (map_start < start)
- map_start = start;
- if (map_end > end)
- map_end = end;
-
- map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
- / sizeof(struct page);
- map_end += ((PFN_ALIGN((unsigned long) map_end)
- - (unsigned long) map_end)
- / sizeof(struct page));
-
- if (map_start < map_end)
- memmap_init_zone((unsigned long)(map_end - map_start),
- nid, zone, page_to_pfn(map_start),
- MEMMAP_EARLY);
- }
-}
-
static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
@@ -77,8 +42,7 @@ static inline pud_t *vmem_pud_alloc(void)
pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
- pud_val(*pud) = _REGION3_ENTRY_EMPTY;
- memcpy(pud + 1, pud, (PTRS_PER_PUD - 1)*sizeof(pud_t));
+ clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
#endif
return pud;
}
@@ -91,22 +55,23 @@ static inline pmd_t *vmem_pmd_alloc(void)
pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
- clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
+ clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
#endif
return pmd;
}
-static pte_t __init_refok *vmem_pte_alloc(void)
+static pte_t __ref *vmem_pte_alloc(unsigned long address)
{
pte_t *pte;
if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
+ pte = (pte_t *) page_table_alloc(&init_mm, address);
else
- pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
+ pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
+ PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
@@ -114,48 +79,67 @@ static pte_t __init_refok *vmem_pte_alloc(void)
/*
* Add a physical memory range to the 1:1 mapping.
*/
-static int vmem_add_range(unsigned long start, unsigned long size)
+static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{
- unsigned long address;
+ unsigned long end = start + size;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
- for (address = start; address < start + size; address += PAGE_SIZE) {
+ while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
-
pu_dir = pud_offset(pg_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
+ !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
+ pud_val(*pu_dir) = __pa(address) |
+ _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
+ (ro ? _REGION_ENTRY_PROTECT : 0);
+ address += PUD_SIZE;
+ continue;
+ }
+#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
-
pm_dir = pmd_offset(pu_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
+ !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
+ pmd_val(*pm_dir) = __pa(address) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+ _SEGMENT_ENTRY_YOUNG |
+ (ro ? _SEGMENT_ENTRY_PROTECT : 0);
+ address += PMD_SIZE;
+ continue;
+ }
+#endif
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
- pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
- *pt_dir = pte;
+ pte_val(*pt_dir) = __pa(address) |
+ pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
+ address += PAGE_SIZE;
}
ret = 0;
out:
- flush_tlb_kernel_range(start, start + size);
return ret;
}
@@ -165,55 +149,67 @@ out:
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
- unsigned long address;
+ unsigned long end = start + size;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
- pte_val(pte) = _PAGE_TYPE_EMPTY;
- for (address = start; address < start + size; address += PAGE_SIZE) {
+ pte_val(pte) = _PAGE_INVALID;
+ while (address < end) {
pg_dir = pgd_offset_k(address);
+ if (pgd_none(*pg_dir)) {
+ address += PGDIR_SIZE;
+ continue;
+ }
pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir))
+ if (pud_none(*pu_dir)) {
+ address += PUD_SIZE;
+ continue;
+ }
+ if (pud_large(*pu_dir)) {
+ pud_clear(pu_dir);
+ address += PUD_SIZE;
continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir))
+ if (pmd_none(*pm_dir)) {
+ address += PMD_SIZE;
+ continue;
+ }
+ if (pmd_large(*pm_dir)) {
+ pmd_clear(pm_dir);
+ address += PMD_SIZE;
continue;
+ }
pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte;
+ address += PAGE_SIZE;
}
- flush_tlb_kernel_range(start, start + size);
+ flush_tlb_kernel_range(start, end);
}
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
-static int vmem_add_mem_map(unsigned long start, unsigned long size)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long address, start_addr, end_addr;
- struct page *map_start, *map_end;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
- map_start = VMEM_MAP + PFN_DOWN(start);
- map_end = VMEM_MAP + PFN_DOWN(start + size);
-
- start_addr = (unsigned long) map_start & PAGE_MASK;
- end_addr = PFN_ALIGN((unsigned long) map_end);
-
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+ for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@@ -221,15 +217,38 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+#ifdef CONFIG_64BIT
+ /* Use 1MB frames for vmemmap if available. We always
+ * use large frames even if they are only partially
+ * used.
+ * Otherwise we would have also page tables since
+ * vmemmap_populate gets called for each section
+ * separately. */
+ if (MACHINE_HAS_EDAT1) {
+ void *new_page;
+
+ new_page = vmemmap_alloc_block(PMD_SIZE, node);
+ if (!new_page)
+ goto out;
+ pmd_val(*pm_dir) = __pa(new_page) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+ _SEGMENT_ENTRY_CO;
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+#endif
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -239,24 +258,19 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
- pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
- *pt_dir = pte;
+ pte_val(*pt_dir) =
+ __pa(new_page) | pgprot_val(PAGE_KERNEL);
}
+ address += PAGE_SIZE;
}
+ memset((void *)start, 0, end - start);
ret = 0;
out:
- flush_tlb_kernel_range(start_addr, end_addr);
return ret;
}
-static int vmem_add_mem(unsigned long start, unsigned long size)
+void vmemmap_free(unsigned long start, unsigned long end)
{
- int ret;
-
- ret = vmem_add_mem_map(start, size);
- if (ret)
- return ret;
- return vmem_add_range(start, size);
}
/*
@@ -267,7 +281,7 @@ static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
- if (seg->start + seg->size >= VMEM_MAX_PHYS ||
+ if (seg->start + seg->size > VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;
@@ -296,7 +310,7 @@ static void __remove_shared_memory(struct memory_segment *seg)
vmem_remove_range(seg->start, seg->size);
}
-int remove_shared_memory(unsigned long start, unsigned long size)
+int vmem_remove_mapping(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
int ret;
@@ -320,11 +334,9 @@ out:
return ret;
}
-int add_shared_memory(unsigned long start, unsigned long size)
+int vmem_add_mapping(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
- struct page *page;
- unsigned long pfn, num_pfn, end_pfn;
int ret;
mutex_lock(&vmem_mutex);
@@ -339,24 +351,9 @@ int add_shared_memory(unsigned long start, unsigned long size)
if (ret)
goto out_free;
- ret = vmem_add_mem(start, size);
+ ret = vmem_add_mem(start, size, 0);
if (ret)
goto out_remove;
-
- pfn = PFN_DOWN(start);
- num_pfn = PFN_DOWN(size);
- end_pfn = pfn + num_pfn;
-
- page = pfn_to_page(pfn);
- memset(page, 0, num_pfn * sizeof(struct page));
-
- for (; pfn < end_pfn; pfn++) {
- page = pfn_to_page(pfn);
- init_page_count(page);
- reset_page_mapcount(page);
- SetPageReserved(page);
- INIT_LIST_HEAD(&page->lru);
- }
goto out;
out_remove:
@@ -375,34 +372,49 @@ out:
*/
void __init vmem_map_init(void)
{
- int i;
-
- INIT_LIST_HEAD(&init_mm.context.crst_list);
- INIT_LIST_HEAD(&init_mm.context.pgtable_list);
- init_mm.context.noexec = 0;
- NODE_DATA(0)->node_mem_map = VMEM_MAP;
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
- vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+ unsigned long ro_start, ro_end;
+ struct memblock_region *reg;
+ phys_addr_t start, end;
+
+ ro_start = PFN_ALIGN((unsigned long)&_stext);
+ ro_end = (unsigned long)&_eshared & PAGE_MASK;
+ for_each_memblock(memory, reg) {
+ start = reg->base;
+ end = reg->base + reg->size - 1;
+ if (start >= ro_end || end <= ro_start)
+ vmem_add_mem(start, end - start, 0);
+ else if (start >= ro_start && end <= ro_end)
+ vmem_add_mem(start, end - start, 1);
+ else if (start >= ro_start) {
+ vmem_add_mem(start, ro_end - start, 1);
+ vmem_add_mem(ro_end, end - ro_end, 0);
+ } else if (end < ro_end) {
+ vmem_add_mem(start, ro_start - start, 0);
+ vmem_add_mem(ro_start, end - ro_start, 1);
+ } else {
+ vmem_add_mem(start, ro_start - start, 0);
+ vmem_add_mem(ro_start, ro_end - ro_start, 1);
+ vmem_add_mem(ro_end, end - ro_end, 0);
+ }
+ }
}
/*
- * Convert memory chunk array to a memory segment list so there is a single
- * list that contains both r/w memory and shared memory segments.
+ * Convert memblock.memory to a memory segment list so there is a single
+ * list that contains all memory segments.
*/
static int __init vmem_convert_memory_chunk(void)
{
+ struct memblock_region *reg;
struct memory_segment *seg;
- int i;
mutex_lock(&vmem_mutex);
- for (i = 0; i < MEMORY_CHUNKS; i++) {
- if (!memory_chunk[i].size)
- continue;
+ for_each_memblock(memory, reg) {
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
panic("Out of memory...\n");
- seg->start = memory_chunk[i].addr;
- seg->size = memory_chunk[i].size;
+ seg->start = reg->base;
+ seg->size = reg->size;
insert_memory_segment(seg);
}
mutex_unlock(&vmem_mutex);