aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile10
-rw-r--r--arch/s390/mm/cmm.c24
-rw-r--r--arch/s390/mm/dump_pagetables.c246
-rw-r--r--arch/s390/mm/extable.c81
-rw-r--r--arch/s390/mm/extmem.c9
-rw-r--r--arch/s390/mm/fault.c664
-rw-r--r--arch/s390/mm/gup.c139
-rw-r--r--arch/s390/mm/hugetlbpage.c137
-rw-r--r--arch/s390/mm/init.c181
-rw-r--r--arch/s390/mm/maccess.c133
-rw-r--r--arch/s390/mm/mem_detect.c65
-rw-r--r--arch/s390/mm/mmap.c78
-rw-r--r--arch/s390/mm/pageattr.c146
-rw-r--r--arch/s390/mm/pgtable.c1590
-rw-r--r--arch/s390/mm/vmem.c178
15 files changed, 2840 insertions, 841 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 6fbc6f3fbdf..839592ca265 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,7 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o gup.o
-obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o
+
+obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index c66ffd8dbbb..79ddd580d60 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,7 +1,7 @@
/*
* Collaborative memory management interface.
*
- * Copyright IBM Corp 2003,2010
+ * Copyright IBM Corp 2003, 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
*/
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else
free_page((unsigned long) npa);
}
- diag10(addr);
+ diag10_range(addr >> PAGE_SHIFT, 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
@@ -253,12 +253,12 @@ static int cmm_skip_blanks(char *cp, char **endp)
static struct ctl_table cmm_table[];
-static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
+ unsigned int len;
long nr;
- int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -293,12 +293,12 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
return 0;
}
-static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
- int len;
+ unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
-out_kthread:
+ rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
new file mode 100644
index 00000000000..46d517c3c76
--- /dev/null
+++ b/arch/s390/mm/dump_pagetables.c
@@ -0,0 +1,246 @@
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+
+static unsigned long max_addr;
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+enum address_markers_idx {
+ IDENTITY_NR = 0,
+ KERNEL_START_NR,
+ KERNEL_END_NR,
+ VMEMMAP_NR,
+ VMALLOC_NR,
+#ifdef CONFIG_64BIT
+ MODULES_NR,
+#endif
+};
+
+static struct addr_marker address_markers[] = {
+ [IDENTITY_NR] = {0, "Identity Mapping"},
+ [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"},
+ [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
+ [VMEMMAP_NR] = {0, "vmemmap Area"},
+ [VMALLOC_NR] = {0, "vmalloc Area"},
+#ifdef CONFIG_64BIT
+ [MODULES_NR] = {0, "Modules Area"},
+#endif
+ { -1, NULL }
+};
+
+struct pg_state {
+ int level;
+ unsigned int current_prot;
+ unsigned long start_address;
+ unsigned long current_address;
+ const struct addr_marker *marker;
+};
+
+static void print_prot(struct seq_file *m, unsigned int pr, int level)
+{
+ static const char * const level_name[] =
+ { "ASCE", "PGD", "PUD", "PMD", "PTE" };
+
+ seq_printf(m, "%s ", level_name[level]);
+ if (pr & _PAGE_INVALID) {
+ seq_printf(m, "I\n");
+ return;
+ }
+ seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
+ seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
+ seq_putc(m, '\n');
+}
+
+static void note_page(struct seq_file *m, struct pg_state *st,
+ unsigned int new_prot, int level)
+{
+ static const char units[] = "KMGTPE";
+ int width = sizeof(unsigned long) * 2;
+ const char *unit = units;
+ unsigned int prot, cur;
+ unsigned long delta;
+
+ /*
+ * If we have a "break" in the series, we need to flush the state
+ * that we have now. "break" is either changing perms, levels or
+ * address space marker.
+ */
+ prot = new_prot;
+ cur = st->current_prot;
+
+ if (!st->level) {
+ /* First entry */
+ st->current_prot = new_prot;
+ st->level = level;
+ st->marker = address_markers;
+ seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ } else if (prot != cur || level != st->level ||
+ st->current_address >= st->marker[1].start_address) {
+ /* Print the actual finished series */
+ seq_printf(m, "0x%0*lx-0x%0*lx",
+ width, st->start_address,
+ width, st->current_address);
+ delta = (st->current_address - st->start_address) >> 10;
+ while (!(delta & 0x3ff) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(m, "%9lu%c ", delta, *unit);
+ print_prot(m, st->current_prot, st->level);
+ if (st->current_address >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ }
+ st->start_address = st->current_address;
+ st->current_prot = new_prot;
+ st->level = level;
+ }
+}
+
+/*
+ * The actual page table walker functions. In order to keep the
+ * implementation of print_prot() short, we only check and pass
+ * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
+ * segment or page table entry is invalid or read-only.
+ * After all it's just a hint that the current level being walked
+ * contains an invalid or read-only entry.
+ */
+static void walk_pte_level(struct seq_file *m, struct pg_state *st,
+ pmd_t *pmd, unsigned long addr)
+{
+ unsigned int prot;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
+ st->current_address = addr;
+ pte = pte_offset_kernel(pmd, addr);
+ prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
+ note_page(m, st, prot, 4);
+ addr += PAGE_SIZE;
+ }
+}
+
+#ifdef CONFIG_64BIT
+#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
+#else
+#define _PMD_PROT_MASK 0
+#endif
+
+static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
+ pud_t *pud, unsigned long addr)
+{
+ unsigned int prot;
+ pmd_t *pmd;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
+ st->current_address = addr;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ if (pmd_large(*pmd)) {
+ prot = pmd_val(*pmd) & _PMD_PROT_MASK;
+ note_page(m, st, prot, 3);
+ } else
+ walk_pte_level(m, st, pmd, addr);
+ } else
+ note_page(m, st, _PAGE_INVALID, 3);
+ addr += PMD_SIZE;
+ }
+}
+
+#ifdef CONFIG_64BIT
+#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
+#else
+#define _PUD_PROT_MASK 0
+#endif
+
+static void walk_pud_level(struct seq_file *m, struct pg_state *st,
+ pgd_t *pgd, unsigned long addr)
+{
+ unsigned int prot;
+ pud_t *pud;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
+ st->current_address = addr;
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud))
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) & _PUD_PROT_MASK;
+ note_page(m, st, prot, 2);
+ } else
+ walk_pmd_level(m, st, pud, addr);
+ else
+ note_page(m, st, _PAGE_INVALID, 2);
+ addr += PUD_SIZE;
+ }
+}
+
+static void walk_pgd_level(struct seq_file *m)
+{
+ unsigned long addr = 0;
+ struct pg_state st;
+ pgd_t *pgd;
+ int i;
+
+ memset(&st, 0, sizeof(st));
+ for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
+ st.current_address = addr;
+ pgd = pgd_offset_k(addr);
+ if (!pgd_none(*pgd))
+ walk_pud_level(m, &st, pgd, addr);
+ else
+ note_page(m, &st, _PAGE_INVALID, 1);
+ addr += PGDIR_SIZE;
+ }
+ /* Flush out the last page */
+ st.current_address = max_addr;
+ note_page(m, &st, 0, 0);
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ walk_pgd_level(m);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pt_dump_init(void)
+{
+ /*
+ * Figure out the maximum virtual address being accessible with the
+ * kernel ASCE. We need this to keep the page table walker functions
+ * from accessing non-existent entries.
+ */
+#ifdef CONFIG_32BIT
+ max_addr = 1UL << 31;
+#else
+ max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
+ max_addr = 1UL << (max_addr * 11 + 31);
+ address_markers[MODULES_NR].start_address = MODULES_VADDR;
+#endif
+ address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
+ address_markers[VMALLOC_NR].start_address = VMALLOC_START;
+ debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
+ return 0;
+}
+device_initcall(pt_dump_init);
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
new file mode 100644
index 00000000000..4d1ee88864e
--- /dev/null
+++ b/arch/s390/mm/extable.c
@@ -0,0 +1,81 @@
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+
+/*
+ * Search one exception table for an entry corresponding to the
+ * given instruction address, and return the address of the entry,
+ * or NULL if none is found.
+ * We use a binary search, and thus we assume that the table is
+ * already sorted.
+ */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ const struct exception_table_entry *mid;
+ unsigned long addr;
+
+ while (first <= last) {
+ mid = ((last - first) >> 1) + first;
+ addr = extable_insn(mid);
+ if (addr < value)
+ first = mid + 1;
+ else if (addr > value)
+ last = mid - 1;
+ else
+ return mid;
+ }
+ return NULL;
+}
+
+/*
+ * The exception table needs to be sorted so that the binary
+ * search that we use to find entries in it works properly.
+ * This is used both for the kernel exception table and for
+ * the exception tables of modules that get loaded.
+ *
+ */
+static int cmp_ex(const void *a, const void *b)
+{
+ const struct exception_table_entry *x = a, *y = b;
+
+ /* This compare is only valid after normalization. */
+ return x->insn - y->insn;
+}
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+ struct exception_table_entry *p;
+ int i;
+
+ /* Normalize entries to being relative to the start of the section */
+ for (p = start, i = 0; p < finish; p++, i += 8)
+ p->insn += i;
+ sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ /* Denormalize all entries */
+ for (p = start, i = 0; p < finish; p++, i += 8)
+ p->insn -= i;
+}
+
+#ifdef CONFIG_MODULES
+/*
+ * If the exception table is sorted, any referring to the module init
+ * will be at the beginning or the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /* Trim the beginning */
+ while (m->num_exentries &&
+ within_module_init(extable_insn(&m->extable[0]), m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /* Trim the end */
+ while (m->num_exentries &&
+ within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 3cc95dd0a3a..519bba716cc 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -1,10 +1,9 @@
/*
- * File...........: arch/s390/mm/extmem.c
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
* Steven Shultz <shultzss@us.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation 2002-2004
+ * Copyright IBM Corp. 2002, 2004
*/
#define KMSG_COMPONENT "extmem"
@@ -412,6 +411,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
struct dcss_segment *seg;
int rc, diag_cc;
+ start_addr = end_addr = 0;
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
if (seg == NULL) {
rc = -ENOMEM;
@@ -573,6 +573,7 @@ segment_modify_shared (char *name, int do_nonshared)
unsigned long start_addr, end_addr, dummy;
int rc, diag_cc;
+ start_addr = end_addr = 0;
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
@@ -681,8 +682,6 @@ void
segment_save(char *name)
{
struct dcss_segment *seg;
- int startpfn = 0;
- int endpfn = 0;
char cmd1[160];
char cmd2[80];
int i, response;
@@ -698,8 +697,6 @@ segment_save(char *name)
goto out;
}
- startpfn = seg->start_addr >> PAGE_SHIFT;
- endpfn = (seg->end) >> PAGE_SHIFT;
sprintf(cmd1, "DEFSEG %s", name);
for (i=0; i<seg->segcnt; i++) {
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe5701e9efb..3f3b35403d0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/fault.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
@@ -10,6 +8,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
+#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -31,11 +30,10 @@
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/mmu_context.h>
-#include <asm/compat.h>
+#include <asm/facility.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
@@ -51,14 +49,20 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
+#define VM_FAULT_SIGNAL 0x080000
+#define VM_FAULT_PFAULT 0x100000
-static unsigned long store_indication;
+static unsigned long store_indication __read_mostly;
-void fault_init(void)
+#ifdef CONFIG_64BIT
+static int __init fault_init(void)
{
- if (test_facility(2) && test_facility(75))
+ if (test_facility(75))
store_indication = 0xc00;
+ return 0;
}
+early_initcall(fault_init);
+#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -102,30 +106,154 @@ void bust_spinlocks(int yes)
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
-static inline int user_space_fault(unsigned long trans_exc_code)
+static inline int user_space_fault(struct pt_regs *regs)
{
+ unsigned long trans_exc_code;
+
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
- trans_exc_code &= 3;
- if (trans_exc_code == 2)
- /* Access via secondary space, set_fs setting decides */
+ trans_exc_code = regs->int_parm_long & 3;
+ if (trans_exc_code == 3) /* home space -> kernel */
+ return 0;
+ if (user_mode(regs))
+ return 1;
+ if (trans_exc_code == 2) /* secondary space -> set_fs */
return current->thread.mm_segment.ar4;
- if (user_mode == HOME_SPACE_MODE)
- /* User space if the access has been done via home space. */
- return trans_exc_code == 3;
- /*
- * If the user space is not the home space the kernel runs in home
- * space. Access via secondary space has already been covered,
- * access via primary space or access register is from user space
- * and access via home space is from the kernel.
- */
- return trans_exc_code != 3;
+ if (current->flags & PF_VCPU)
+ return 1;
+ return 0;
+}
+
+static int bad_address(void *p)
+{
+ unsigned long dummy;
+
+ return probe_kernel_address((unsigned long *)p, dummy);
+}
+
+#ifdef CONFIG_64BIT
+static void dump_pagetable(unsigned long asce, unsigned long address)
+{
+ unsigned long *table = __va(asce & PAGE_MASK);
+
+ pr_alert("AS:%016lx ", asce);
+ switch (asce & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R1:%016lx ", *table);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_REGION2:
+ table = table + ((address >> 42) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R2:%016lx ", *table);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_REGION3:
+ table = table + ((address >> 31) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("R3:%016lx ", *table);
+ if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* fallthrough */
+ case _ASCE_TYPE_SEGMENT:
+ table = table + ((address >> 20) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont(KERN_CONT "S:%016lx ", *table);
+ if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
+ goto out;
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ }
+ table = table + ((address >> 12) & 0xff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("P:%016lx ", *table);
+out:
+ pr_cont("\n");
+ return;
+bad:
+ pr_cont("BAD\n");
}
-static inline void report_user_fault(struct pt_regs *regs, long int_code,
- int signr, unsigned long address)
+#else /* CONFIG_64BIT */
+
+static void dump_pagetable(unsigned long asce, unsigned long address)
+{
+ unsigned long *table = __va(asce & PAGE_MASK);
+
+ pr_alert("AS:%08lx ", asce);
+ table = table + ((address >> 20) & 0x7ff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("S:%08lx ", *table);
+ if (*table & _SEGMENT_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ if (bad_address(table))
+ goto bad;
+ pr_cont("P:%08lx ", *table);
+out:
+ pr_cont("\n");
+ return;
+bad:
+ pr_cont("BAD\n");
+}
+
+#endif /* CONFIG_64BIT */
+
+static void dump_fault_info(struct pt_regs *regs)
+{
+ unsigned long asce;
+
+ pr_alert("Fault in ");
+ switch (regs->int_parm_long & 3) {
+ case 3:
+ pr_cont("home space ");
+ break;
+ case 2:
+ pr_cont("secondary space ");
+ break;
+ case 1:
+ pr_cont("access register ");
+ break;
+ case 0:
+ pr_cont("primary space ");
+ break;
+ }
+ pr_cont("mode while using ");
+ if (!user_space_fault(regs)) {
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ }
+#ifdef CONFIG_PGSTE
+ else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
+ struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+ asce = gmap->asce;
+ pr_cont("gmap ");
+ }
+#endif
+ else {
+ asce = S390_lowcore.user_asce;
+ pr_cont("user ");
+ }
+ pr_cont("ASCE.\n");
+ dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
+}
+
+static inline void report_user_fault(struct pt_regs *regs, long signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
@@ -133,10 +261,13 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code,
return;
if (!printk_ratelimit())
return;
- printk("User process fault: interruption code 0x%lX ", int_code);
+ printk(KERN_ALERT "User process fault: interruption code 0x%X ",
+ regs->int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
- printk("\n");
- printk("failing address: %lX\n", address);
+ printk(KERN_CONT "\n");
+ printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
+ regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
+ dump_fault_info(regs);
show_regs(regs);
}
@@ -144,24 +275,18 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code,
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
-static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
- int si_code, unsigned long trans_exc_code)
+static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
struct siginfo si;
- unsigned long address;
- address = trans_exc_code & __FAIL_ADDR_MASK;
- current->thread.prot_addr = address;
- current->thread.trap_no = int_code;
- report_user_fault(regs, int_code, SIGSEGV, address);
+ report_user_fault(regs, SIGSEGV);
si.si_signo = SIGSEGV;
si.si_code = si_code;
- si.si_addr = (void __user *) address;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
}
-static noinline void do_no_context(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long address;
@@ -169,7 +294,7 @@ static noinline void do_no_context(struct pt_regs *regs, long int_code,
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
- regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+ regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
return;
}
@@ -177,115 +302,84 @@ static noinline void do_no_context(struct pt_regs *regs, long int_code,
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- address = trans_exc_code & __FAIL_ADDR_MASK;
- if (!user_space_fault(trans_exc_code))
+ address = regs->int_parm_long & __FAIL_ADDR_MASK;
+ if (!user_space_fault(regs))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
- " at virtual kernel address %p\n", (void *)address);
+ " in virtual kernel address space\n");
else
printk(KERN_ALERT "Unable to handle kernel paging request"
- " at virtual user address %p\n", (void *)address);
-
- die("Oops", regs, int_code);
+ " in virtual user address space\n");
+ printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
+ regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
+ dump_fault_info(regs);
+ die(regs, "Oops");
do_exit(SIGKILL);
}
-static noinline void do_low_address(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
- die ("Low-address protection", regs, int_code);
+ die (regs, "Low-address protection");
do_exit(SIGKILL);
}
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs);
}
-static noinline void do_sigbus(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_sigbus(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- unsigned long address;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- address = trans_exc_code & __FAIL_ADDR_MASK;
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = int_code;
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
- si.si_addr = (void __user *) address;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGBUS, &si, tsk);
}
-#ifdef CONFIG_S390_EXEC_PROTECT
-static noinline int signal_return(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
-{
- u16 instruction;
- int rc;
-
- rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
-
- if (!rc && instruction == 0x0a77) {
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
- if (is_compat_task())
- sys32_sigreturn();
- else
- sys_sigreturn();
- } else if (!rc && instruction == 0x0aad) {
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
- if (is_compat_task())
- sys32_rt_sigreturn();
- else
- sys_rt_sigreturn();
- } else
- do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
- return 0;
-}
-#endif /* CONFIG_S390_EXEC_PROTECT */
-
-static noinline void do_fault_error(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code, int fault)
+static noinline void do_fault_error(struct pt_regs *regs, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
-#ifdef CONFIG_S390_EXEC_PROTECT
- if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
- (trans_exc_code & 3) == 0) {
- signal_return(regs, int_code, trans_exc_code);
- break;
- }
-#endif /* CONFIG_S390_EXEC_PROTECT */
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
- do_sigsegv(regs, int_code, si_code, trans_exc_code);
+ do_sigsegv(regs, si_code);
return;
}
case VM_FAULT_BADCONTEXT:
- do_no_context(regs, int_code, trans_exc_code);
+ case VM_FAULT_PFAULT:
+ do_no_context(regs);
+ break;
+ case VM_FAULT_SIGNAL:
+ if (!user_mode(regs))
+ do_no_context(regs);
break;
default: /* fault & VM_FAULT_ERROR */
- if (fault & VM_FAULT_OOM)
- pagefault_out_of_memory();
- else if (fault & VM_FAULT_SIGBUS) {
+ if (fault & VM_FAULT_OOM) {
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs, int_code, trans_exc_code);
+ if (!user_mode(regs))
+ do_no_context(regs);
else
- do_sigbus(regs, int_code, trans_exc_code);
+ do_sigbus(regs);
} else
BUG();
break;
@@ -303,20 +397,31 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline int do_exception(struct pt_regs *regs, int access,
- unsigned long trans_exc_code)
+static inline int do_exception(struct pt_regs *regs, int access)
{
+#ifdef CONFIG_PGSTE
+ struct gmap *gmap;
+#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ unsigned long trans_exc_code;
unsigned long address;
- int fault, write;
+ unsigned int flags;
+ int fault;
+
+ tsk = current;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_pt_regs_flag(regs, PIF_PER_TRAP);
if (notify_page_fault(regs))
return 0;
- tsk = current;
mm = tsk->mm;
+ trans_exc_code = regs->int_parm_long;
/*
* Verify that the fault happened in user space, that
@@ -324,13 +429,37 @@ static inline int do_exception(struct pt_regs *regs, int access,
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
+ if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+ flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
+#ifdef CONFIG_PGSTE
+ gmap = (struct gmap *)
+ ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
+ if (gmap) {
+ address = __gmap_fault(address, gmap);
+ if (address == -EFAULT) {
+ fault = VM_FAULT_BADMAP;
+ goto out_up;
+ }
+ if (address == -ENOMEM) {
+ fault = VM_FAULT_OOM;
+ goto out_up;
+ }
+ if (gmap->pfault_enabled)
+ flags |= FAULT_FLAG_RETRY_NOWAIT;
+ }
+#endif
+
+retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
@@ -358,27 +487,49 @@ static inline int do_exception(struct pt_regs *regs, int access,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- write = (access == VM_WRITE ||
- (trans_exc_code & store_indication) == 0x400) ?
- FAULT_FLAG_WRITE : 0;
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, flags);
+ /* No reason to continue if interrupted by SIGKILL. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ fault = VM_FAULT_SIGNAL;
+ goto out;
+ }
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
- }
/*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
*/
- clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+#ifdef CONFIG_PGSTE
+ if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* FAULT_FLAG_RETRY_NOWAIT has been set,
+ * mmap_sem has not been released */
+ current->thread.gmap_pfault = 1;
+ fault = VM_FAULT_PFAULT;
+ goto out_up;
+ }
+#endif
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~(FAULT_FLAG_ALLOW_RETRY |
+ FAULT_FLAG_RETRY_NOWAIT);
+ flags |= FAULT_FLAG_TRIED;
+ down_read(&mm->mmap_sem);
+ goto retry;
+ }
+ }
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -386,102 +537,48 @@ out:
return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void __kprobes do_protection_exception(struct pt_regs *regs)
{
+ unsigned long trans_exc_code;
int fault;
- /* Protection exception is supressing, decrement psw address. */
- regs->psw.addr -= (pgm_int_code >> 16);
+ trans_exc_code = regs->int_parm_long;
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
- do_low_address(regs, pgm_int_code, trans_exc_code);
+ do_low_address(regs);
return;
}
- fault = do_exception(regs, VM_WRITE, trans_exc_code);
+ fault = do_exception(regs, VM_WRITE);
if (unlikely(fault))
- do_fault_error(regs, 4, trans_exc_code, fault);
+ do_fault_error(regs, fault);
}
-void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void __kprobes do_dat_exception(struct pt_regs *regs)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
-#ifdef CONFIG_S390_EXEC_PROTECT
- if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
- (trans_exc_code & 3) == 0)
- access = VM_EXEC;
-#endif
- fault = do_exception(regs, access, trans_exc_code);
+ fault = do_exception(regs, access);
if (unlikely(fault))
- do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
-}
-
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
- return;
- }
-
-no_context:
- do_no_context(regs, pgm_int_code, trans_exc_code);
-}
-#endif
-
-int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
-{
- struct pt_regs regs;
- int access, fault;
-
- regs.psw.mask = psw_kernel_bits;
- if (!irqs_disabled())
- regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
- regs.psw.addr = (unsigned long) __builtin_return_address(0);
- regs.psw.addr |= PSW_ADDR_AMODE;
- uaddr &= PAGE_MASK;
- access = write ? VM_WRITE : VM_READ;
- fault = do_exception(&regs, access, uaddr | 2);
- if (unlikely(fault)) {
- if (fault & VM_FAULT_OOM) {
- pagefault_out_of_memory();
- fault = 0;
- } else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(&regs, pgm_int_code, uaddr);
- }
- return fault ? -EFAULT : 0;
+ do_fault_error(regs, fault);
}
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
-static ext_int_info_t ext_int_pfault;
-static int pfault_disable = 0;
+static int pfault_disable;
static int __init nopfault(char *str)
{
@@ -491,25 +588,31 @@ static int __init nopfault(char *str)
__setup("nopfault", nopfault);
-typedef struct {
- __u16 refdiagc;
- __u16 reffcode;
- __u16 refdwlen;
- __u16 refversn;
- __u64 refgaddr;
- __u64 refselmk;
- __u64 refcmpmk;
- __u64 reserved;
-} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
+struct pfault_refbk {
+ u16 refdiagc;
+ u16 reffcode;
+ u16 refdwlen;
+ u16 refversn;
+ u64 refgaddr;
+ u64 refselmk;
+ u64 refcmpmk;
+ u64 reserved;
+} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
- pfault_refbk_t refbk =
- { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
- __PF_RES_FIELD };
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 0,
+ .refdwlen = 5,
+ .refversn = 2,
+ .refgaddr = __LC_CURRENT_PID,
+ .refselmk = 1ULL << 48,
+ .refcmpmk = 1ULL << 48,
+ .reserved = __PF_RES_FIELD };
int rc;
- if (!MACHINE_IS_VM || pfault_disable)
+ if (pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
@@ -518,18 +621,20 @@ int pfault_init(void)
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
- __ctl_set_bit(0, 9);
return rc;
}
void pfault_fini(void)
{
- pfault_refbk_t refbk =
- { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
-
- if (!MACHINE_IS_VM || pfault_disable)
+ struct pfault_refbk refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 1,
+ .refdwlen = 5,
+ .refversn = 2,
+ };
+
+ if (pfault_disable)
return;
- __ctl_clear_bit(0,9);
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
@@ -537,11 +642,15 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
-static void pfault_interrupt(unsigned int ext_int_code,
+static DEFINE_SPINLOCK(pfault_lock);
+static LIST_HEAD(pfault_list);
+
+static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
+ pid_t pid;
/*
* Get the external interruption subcode & pfault
@@ -549,67 +658,118 @@ static void pfault_interrupt(unsigned int ext_int_code,
* in the 'cpu address' field associated with the
* external interrupt.
*/
- subcode = ext_int_code >> 16;
+ subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
-
- /*
- * Get the token (= address of the task structure of the affected task).
- */
-#ifdef CONFIG_64BIT
- tsk = *(struct task_struct **) param64;
-#else
- tsk = *(struct task_struct **) param32;
-#endif
-
+ inc_irq_stat(IRQEXT_PFL);
+ /* Get the token (= pid of the affected task). */
+ pid = sizeof(void *) == 4 ? param32 : param64;
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
+ return;
+ spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
- if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
+ if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
+ list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
+ } else {
+ /* Completion interrupt was faster than initial
+ * interrupt. Set pfault_wait to -1 so the initial
+ * interrupt doesn't put the task to sleep.
+ * If the task is not running, ignore the completion
+ * interrupt since it must be a leftover of a PFAULT
+ * CANCEL operation which didn't remove all pending
+ * completion interrupts. */
+ if (tsk->state == TASK_RUNNING)
+ tsk->thread.pfault_wait = -1;
}
} else {
/* signal bit not set -> a real page is missing. */
- get_task_struct(tsk);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
+ if (WARN_ON_ONCE(tsk != current))
+ goto out;
+ if (tsk->thread.pfault_wait == 1) {
+ /* Already on the list with a reference: put to sleep */
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_tsk_need_resched(tsk);
+ } else if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
- * interrupt (swapped in a -1 for pfault_wait). Set
- * pfault_wait back to zero and exit. This can be
- * done safely because tsk is running in kernel
- * mode and can't produce new pfaults. */
+ * interrupt (pfault_wait == -1). Set pfault_wait
+ * back to zero and exit. */
tsk->thread.pfault_wait = 0;
- set_task_state(tsk, TASK_RUNNING);
- put_task_struct(tsk);
- } else
+ } else {
+ /* Initial interrupt arrived before completion
+ * interrupt. Let the task sleep.
+ * An extra task reference is needed since a different
+ * cpu may set the task state to TASK_RUNNING again
+ * before the scheduler is reached. */
+ get_task_struct(tsk);
+ tsk->thread.pfault_wait = 1;
+ list_add(&tsk->thread.list, &pfault_list);
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ }
}
+out:
+ spin_unlock(&pfault_lock);
+ put_task_struct(tsk);
}
-void __init pfault_irq_init(void)
+static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
- if (!MACHINE_IS_VM)
- return;
+ struct thread_struct *thread, *next;
+ struct task_struct *tsk;
- /*
- * Try to get pfault pseudo page faults going.
- */
- if (register_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault) != 0)
- panic("Couldn't request external interrupt 0x2603");
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DEAD:
+ spin_lock_irq(&pfault_lock);
+ list_for_each_entry_safe(thread, next, &pfault_list, list) {
+ thread->pfault_wait = 0;
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ }
+ spin_unlock_irq(&pfault_lock);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- if (pfault_init() == 0)
- return;
+static int __init pfault_irq_init(void)
+{
+ int rc;
- /* Tough luck, no pfault. */
+ rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+ if (rc)
+ goto out_extint;
+ rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
+ if (rc)
+ goto out_pfault;
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+ hotcpu_notifier(pfault_cpu_notify, 0);
+ return 0;
+
+out_pfault:
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+out_extint:
pfault_disable = 1;
- unregister_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault);
+ return rc;
}
-#endif
+early_initcall(pfault_irq_init);
+
+#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 38e641cdd97..639fce46400 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,18 +20,17 @@
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask, result;
+ unsigned long mask;
pte_t *ptep, pte;
struct page *page;
- result = write ? 0 : _PAGE_RO;
- mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
+ mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
pte = *ptep;
barrier();
- if ((pte_val(pte) & mask) != result)
+ if ((pte_val(pte) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
@@ -53,11 +52,11 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
- struct page *head, *page;
+ struct page *head, *page, *tail;
int refs;
- result = write ? 0 : _SEGMENT_ENTRY_RO;
- mask = result | _SEGMENT_ENTRY_INV;
+ result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+ mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
@@ -65,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
refs = 0;
head = pmd_page(pmd);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
@@ -82,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
*nr -= refs;
while (refs--)
put_page(head);
+ return 0;
+ }
+
+ /*
+ * Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
}
return 1;
@@ -104,9 +115,18 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
pmd = *pmdp;
barrier();
next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
+ /*
+ * The pmd_trans_splitting() check below explains why
+ * pmdp_splitting_flush() has to serialize with
+ * smp_call_function() against our disabled IRQs, to stop
+ * this gup-fast code from running while we set the
+ * splitting bit in the pmd. Returning zero will take
+ * the slow path that will call wait_split_huge_page()
+ * if the pmd is still in splitting state.
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_huge(pmd))) {
+ if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
@@ -143,28 +163,16 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
return 1;
}
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
*/
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
- unsigned long next;
+ unsigned long next, flags;
pgd_t *pgdp, pgd;
int nr = 0;
@@ -172,54 +180,67 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (end < start)
- goto slow_irqon;
-
+ if ((end <= start) || (end > TASK_SIZE))
+ return 0;
/*
- * local_irq_disable() doesn't prevent pagetable teardown, but does
+ * local_irq_save() doesn't prevent pagetable teardown, but does
* prevent the pagetables from being freed on s390.
*
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
- local_irq_disable();
+ local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd = *pgdp;
barrier();
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
- goto slow;
+ break;
if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
- goto slow;
+ break;
} while (pgdp++, addr = next, addr != end);
- local_irq_enable();
+ local_irq_restore(flags);
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
+}
- {
- int ret;
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ int nr, ret;
- return ret;
- }
+ start &= PAGE_MASK;
+ nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ if (nr == nr_pages)
+ return nr;
+
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ nr_pages - nr, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+ /* Have to be a bit careful with return values */
+ if (nr > 0)
+ ret = (ret < 0) ? nr : ret + nr;
+ return ret;
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 639cd21f221..0ff66a7e29b 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -1,38 +1,131 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
- * Copyright 2007 IBM Corp.
+ * Copyright IBM Corp. 2007
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
+static inline pmd_t __pte_to_pmd(pte_t pte)
+{
+ int none, young, prot;
+ pmd_t pmd;
+
+ /*
+ * Convert encoding pte bits pmd bits
+ * .IR...wrdytp ..R...I...y.
+ * empty .10...000000 -> ..0...1...0.
+ * prot-none, clean, old .11...000001 -> ..0...1...1.
+ * prot-none, clean, young .11...000101 -> ..1...1...1.
+ * prot-none, dirty, old .10...001001 -> ..0...1...1.
+ * prot-none, dirty, young .10...001101 -> ..1...1...1.
+ * read-only, clean, old .11...010001 -> ..1...1...0.
+ * read-only, clean, young .01...010101 -> ..1...0...1.
+ * read-only, dirty, old .11...011001 -> ..1...1...0.
+ * read-only, dirty, young .01...011101 -> ..1...0...1.
+ * read-write, clean, old .11...110001 -> ..0...1...0.
+ * read-write, clean, young .01...110101 -> ..0...0...1.
+ * read-write, dirty, old .10...111001 -> ..0...1...0.
+ * read-write, dirty, young .00...111101 -> ..0...0...1.
+ * Huge ptes are dirty by definition, a clean pte is made dirty
+ * by the conversion.
+ */
+ if (pte_present(pte)) {
+ pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
+ if (pte_val(pte) & _PAGE_INVALID)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ none = (pte_val(pte) & _PAGE_PRESENT) &&
+ !(pte_val(pte) & _PAGE_READ) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ prot = (pte_val(pte) & _PAGE_PROTECT) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ young = pte_val(pte) & _PAGE_YOUNG;
+ if (none || young)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (prot || (none && young))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ } else
+ pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
+ return pmd;
+}
+
+static inline pte_t __pmd_to_pte(pmd_t pmd)
+{
+ pte_t pte;
+
+ /*
+ * Convert encoding pmd bits pte bits
+ * ..R...I...y. .IR...wrdytp
+ * empty ..0...1...0. -> .10...000000
+ * prot-none, old ..0...1...1. -> .10...001001
+ * prot-none, young ..1...1...1. -> .10...001101
+ * read-only, old ..1...1...0. -> .11...011001
+ * read-only, young ..1...0...1. -> .01...011101
+ * read-write, old ..0...1...0. -> .10...111001
+ * read-write, young ..0...0...1. -> .00...111101
+ * Huge ptes are dirty by definition
+ */
+ if (pmd_present(pmd)) {
+ pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
+ (pmd_val(pmd) & PAGE_MASK);
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
+ pte_val(pte) |= _PAGE_INVALID;
+ if (pmd_prot_none(pmd)) {
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_YOUNG;
+ } else {
+ pte_val(pte) |= _PAGE_READ;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_PROTECT;
+ else
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
+ pte_val(pte) |= _PAGE_YOUNG;
+ }
+ } else
+ pte_val(pte) = _PAGE_INVALID;
+ return pte;
+}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pteptr, pte_t pteval)
+ pte_t *ptep, pte_t pte)
{
- pmd_t *pmdp = (pmd_t *) pteptr;
- pte_t shadow_pteval = pteval;
- unsigned long mask;
+ pmd_t pmd;
+ pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) {
- pteptr = (pte_t *) pte_page(pteval)[1].index;
- mask = pte_val(pteval) &
- (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
- pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
- if (mm->context.noexec) {
- pteptr += PTRS_PER_PTE;
- pte_val(shadow_pteval) =
- (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
- }
- }
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= pte_page(pte)[1].index;
+ } else
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
+ *(pmd_t *) ptep = pmd;
+}
- pmd_val(*pmdp) = pte_val(pteval);
- if (mm->context.noexec) {
- pmdp = get_shadow_table(pmdp);
- pmd_val(*pmdp) = pte_val(shadow_pteval);
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ unsigned long origin;
+ pmd_t pmd;
+
+ pmd = *(pmd_t *) ptep;
+ if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
+ origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= *(unsigned long *) origin;
}
+ return __pmd_to_pte(pmd);
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *) ptep;
+ pte_t pte = huge_ptep_get(ptep);
+
+ pmdp_flush_direct(mm, addr, pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ return pte;
}
int arch_prepare_hugepage(struct page *page)
@@ -45,11 +138,11 @@ int arch_prepare_hugepage(struct page *page)
if (MACHINE_HAS_HPAGE)
return 0;
- ptep = (pte_t *) pte_alloc_one(&init_mm, address);
+ ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
- pte = mk_pte(page, PAGE_RW);
+ pte_val(pte) = addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
@@ -68,6 +161,8 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
+ clear_table((unsigned long *) ptep, _PAGE_INVALID,
+ PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index bb409332a48..0c1073ed1e8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/init.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
@@ -23,12 +21,13 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
+#include <linux/export.h>
#include <linux/gfp.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -37,17 +36,18 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
-static unsigned long setup_zero_pages(void)
+static void __init setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
- unsigned long size;
struct page *page;
int i;
@@ -64,10 +64,19 @@ static unsigned long setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
- default:
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
order = 2;
break;
+ case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
@@ -76,14 +85,11 @@ static unsigned long setup_zero_pages(void)
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
- SetPageReserved(page);
+ mark_page_reserved(page);
page++;
}
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
-
- return 1UL << order;
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
@@ -92,18 +98,22 @@ static unsigned long setup_zero_pages(void)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type;
+ unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
- S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
- /* A three level page table (4TB) is enough for the kernel space. */
- S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
+ if (VMALLOC_END > (1UL << 42)) {
+ asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION2_ENTRY_EMPTY;
+ } else {
+ asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION3_ENTRY_EMPTY;
+ }
#else
- S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+ asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
@@ -114,123 +124,108 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
- atomic_set(&init_mm.context.attach_count, 1);
-
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
- fault_init();
}
void __init mem_init(void)
{
- unsigned long codesize, reservedpages, datasize, initsize;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(0, mm_cpumask(&init_mm));
+ atomic_set(&init_mm.context.attach_count, 1);
- max_mapnr = num_physpages = max_low_pfn;
+ max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
/* Setup guest page hinting */
cmma_init();
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
-
- reservedpages = 0;
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >>10,
- initsize >> 10);
+ free_all_bootmem();
+ setup_zero_pages(); /* Setup zeroed pages. */
+
+ mem_init_print_info(NULL);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
-
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- ptep_invalidate(&init_mm, address, pte);
- continue;
- }
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
- }
-}
-#endif
-
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr = begin;
-
- if (begin >= end)
- return;
- for (; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
- PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-}
-
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)&__init_begin,
- (unsigned long)&__init_end);
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory", start, end);
+ free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+ "initrd");
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
- struct pglist_data *pgdat;
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long size_pages = PFN_DOWN(size);
struct zone *zone;
int rc;
- pgdat = NODE_DATA(nid);
- zone = pgdat->node_zones + ZONE_MOVABLE;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
+ for_each_zone(zone) {
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
+ } else {
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
+ }
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
+ if (rc)
+ break;
+ start_pfn += nr_pages;
+ size_pages -= nr_pages;
+ if (!size_pages)
+ break;
+ }
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
+
+unsigned long memory_block_size_bytes(void)
+{
+ /*
+ * Make sure the memory block size is always greater
+ * or equal than the memory increment size.
+ */
+ return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
+{
+ /*
+ * There is no hardware or firmware interface which could trigger a
+ * hot memory remove on s390. So there is nothing that needs to be
+ * implemented.
+ */
+ return -EBUSY;
+}
+#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 71a4b0d34be..2a2e35416d2 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -11,7 +11,10 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <asm/system.h>
+#include <linux/gfp.h>
+#include <linux/cpu.h>
+#include <asm/ctl_reg.h>
+#include <asm/io.h>
/*
* This function writes to kernel memory bypassing DAT and possible
@@ -19,7 +22,7 @@
* using the stura instruction.
* Returns the number of bytes copied or -EFAULT.
*/
-static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long count, aligned;
int offset, mask;
@@ -45,7 +48,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
return rc ? rc : count;
}
-long probe_kernel_write(void *dst, void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
long copied = 0;
@@ -60,18 +63,14 @@ long probe_kernel_write(void *dst, void *src, size_t size)
return copied < 0 ? -EFAULT : 0;
}
-int memcpy_real(void *dest, void *src, size_t count)
+static int __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
register unsigned long _src asm("4") = (unsigned long) src;
register unsigned long _len2 asm("5") = (unsigned long) count;
- unsigned long flags;
int rc = -EFAULT;
- if (!count)
- return 0;
- flags = __arch_local_irq_stnsm(0xf8UL);
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
@@ -82,6 +81,124 @@ int memcpy_real(void *dest, void *src, size_t count)
"+d" (_len2), "=m" (*((long *) dest))
: "m" (*((long *) src))
: "cc", "memory");
+ return rc;
+}
+
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!count)
+ return 0;
+ local_irq_save(flags);
+ __arch_local_irq_stnsm(0xfbUL);
+ rc = __memcpy_real(dest, src, count);
+ local_irq_restore(flags);
+ return rc;
+}
+
+/*
+ * Copy memory in absolute mode (kernel to kernel)
+ */
+void memcpy_absolute(void *dest, void *src, size_t count)
+{
+ unsigned long cr0, flags, prefix;
+
+ flags = arch_local_irq_save();
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28); /* disable lowcore protection */
+ prefix = store_prefix();
+ if (prefix) {
+ local_mcck_disable();
+ set_prefix(0);
+ memcpy(dest, src, count);
+ set_prefix(prefix);
+ local_mcck_enable();
+ } else {
+ memcpy(dest, src, count);
+ }
+ __ctl_load(cr0, 0, 0);
arch_local_irq_restore(flags);
+}
+
+/*
+ * Copy memory from kernel (real) to user (virtual)
+ */
+int copy_to_user_real(void __user *dest, void *src, unsigned long count)
+{
+ int offs = 0, size, rc;
+ char *buf;
+
+ buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = -EFAULT;
+ while (offs < count) {
+ size = min(PAGE_SIZE, count - offs);
+ if (memcpy_real(buf, src + offs, size))
+ goto out;
+ if (copy_to_user(dest + offs, buf, size))
+ goto out;
+ offs += size;
+ }
+ rc = 0;
+out:
+ free_page((unsigned long) buf);
return rc;
}
+
+/*
+ * Check if physical address is within prefix or zero page
+ */
+static int is_swapped(unsigned long addr)
+{
+ unsigned long lc;
+ int cpu;
+
+ if (addr < sizeof(struct _lowcore))
+ return 1;
+ for_each_online_cpu(cpu) {
+ lc = (unsigned long) lowcore_ptr[cpu];
+ if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Convert a physical pointer for /dev/mem access
+ *
+ * For swapped prefix pages a new buffer is returned that contains a copy of
+ * the absolute memory. The buffer size is maximum one page large.
+ */
+void *xlate_dev_mem_ptr(unsigned long addr)
+{
+ void *bounce = (void *) addr;
+ unsigned long size;
+
+ get_online_cpus();
+ preempt_disable();
+ if (is_swapped(addr)) {
+ size = PAGE_SIZE - (addr & ~PAGE_MASK);
+ bounce = (void *) __get_free_page(GFP_ATOMIC);
+ if (bounce)
+ memcpy_absolute(bounce, (void *) addr, size);
+ }
+ preempt_enable();
+ put_online_cpus();
+ return bounce;
+}
+
+/*
+ * Free converted buffer for /dev/mem access (if necessary)
+ */
+void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
+{
+ if ((void *) addr != buf)
+ free_page((unsigned long) buf);
+}
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
new file mode 100644
index 00000000000..5535cfe0ee1
--- /dev/null
+++ b/arch/s390/mm/mem_detect.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+
+#define ADDR2G (1ULL << 31)
+
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY 1
+
+static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
+{
+ memblock_add_range(&memblock.memory, start, size, 0, 0);
+ memblock_add_range(&memblock.physmem, start, size, 0, 0);
+}
+
+void __init detect_memory_memblock(void)
+{
+ unsigned long long memsize, rnmax, rzm;
+ unsigned long addr, size;
+ int type;
+
+ rzm = sclp_get_rzm();
+ rnmax = sclp_get_rnmax();
+ memsize = rzm * rnmax;
+ if (!rzm)
+ rzm = 1ULL << 17;
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ rzm = min(ADDR2G, rzm);
+ memsize = min(ADDR2G, memsize);
+ }
+ max_physmem_end = memsize;
+ addr = 0;
+ /* keep memblock lists close to the kernel */
+ memblock_set_bottom_up(true);
+ do {
+ size = 0;
+ type = tprot(addr);
+ do {
+ size += rzm;
+ if (max_physmem_end && addr + size >= max_physmem_end)
+ break;
+ } while (type == tprot(addr + size));
+ if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
+ if (max_physmem_end && (addr + size > max_physmem_end))
+ size = max_physmem_end - addr;
+ memblock_physmem_add(addr, size);
+ }
+ addr += size;
+ } while (addr < max_physmem_end);
+ memblock_set_bottom_up(false);
+ if (!max_physmem_end)
+ max_physmem_end = memblock_end_of_DRAM();
+}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 869efbaed3e..9b436c21195 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/s390/mm/mmap.c
- *
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
@@ -26,18 +24,51 @@
#include <linux/personality.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/compat.h>
#include <asm/pgalloc.h>
-#include <asm/compat.h>
+
+static unsigned long stack_maxrandom_size(void)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+ if (current->personality & ADDR_NO_RANDOMIZE)
+ return 0;
+ return STACK_RND_MASK << PAGE_SHIFT;
+}
/*
* Top of mmap area (just below the process stack).
*
- * Leave an at least ~128 MB hole.
+ * Leave at least a ~32 MB hole.
*/
-#define MIN_GAP (128*1024*1024)
+#define MIN_GAP (32*1024*1024)
#define MAX_GAP (STACK_TOP/6*5)
+static inline int mmap_is_legacy(void)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ return 1;
+ return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+ /* 8MB randomization for mmap_base */
+ return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+}
+
+static unsigned long mmap_base_legacy(void)
+{
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+}
+
static inline unsigned long mmap_base(void)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -46,22 +77,8 @@ static inline unsigned long mmap_base(void)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
-
- return STACK_TOP - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_64BIT
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!is_compat_task())
- return 1;
-#endif
- return sysctl_legacy_va_layout ||
- (current->personality & ADDR_COMPAT_LAYOUT) ||
- rlimit(RLIMIT_STACK) == RLIM_INFINITY;
+ gap &= PAGE_MASK;
+ return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
#ifndef CONFIG_64BIT
@@ -77,23 +94,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#else
-int s390_mmap_check(unsigned long addr, unsigned long len)
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
- if (!is_compat_task() &&
- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+ if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+ return 0;
+ if (!(flags & MAP_FIXED))
+ addr = 0;
+ if ((addr + len) >= TASK_SIZE)
return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
@@ -152,15 +169,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = s390_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
}
}
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
new file mode 100644
index 00000000000..8400f494623
--- /dev/null
+++ b/arch/s390/mm/pageattr.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright IBM Corp. 2011
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/hugetlb.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+#if PAGE_DEFAULT_KEY
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
+void __storage_key_init_range(unsigned long start, unsigned long end)
+{
+ unsigned long boundary, size;
+
+ while (start < end) {
+ if (MACHINE_HAS_EDAT1) {
+ /* set storage keys for a 1MB frame */
+ size = 1UL << 20;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ start += PAGE_SIZE;
+ }
+}
+#endif
+
+static pte_t *walk_page_table(unsigned long addr)
+{
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_k(addr);
+ if (pgd_none(*pgdp))
+ return NULL;
+ pudp = pud_offset(pgdp, addr);
+ if (pud_none(*pudp) || pud_large(*pudp))
+ return NULL;
+ pmdp = pmd_offset(pudp, addr);
+ if (pmd_none(*pmdp) || pmd_large(*pmdp))
+ return NULL;
+ ptep = pte_offset_kernel(pmdp, addr);
+ if (pte_none(*ptep))
+ return NULL;
+ return ptep;
+}
+
+static void change_page_attr(unsigned long addr, int numpages,
+ pte_t (*set) (pte_t))
+{
+ pte_t *ptep, pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ ptep = walk_page_table(addr);
+ if (WARN_ON_ONCE(!ptep))
+ break;
+ pte = *ptep;
+ pte = set(pte);
+ __ptep_ipte(addr, ptep);
+ *ptep = pte;
+ addr += PAGE_SIZE;
+ }
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_wrprotect);
+ return 0;
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_mkwrite);
+ return 0;
+}
+
+/* not possible */
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long address;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_INVALID;
+ continue;
+ }
+ pte_val(*pte) = __pa(address);
+ }
+}
+
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
+ return cc == 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0c719c61972..37b8241ec78 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007,2009
+ * Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
@@ -16,188 +16,60 @@
#include <linux/module.h>
#include <linux/quicklist.h>
#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/swapops.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-struct rcu_table_freelist {
- struct rcu_head rcu;
- struct mm_struct *mm;
- unsigned int pgt_index;
- unsigned int crst_index;
- unsigned long *table[0];
-};
-
-#define RCU_FREELIST_SIZE \
- ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
- / sizeof(unsigned long))
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
-
-static void __page_table_free(struct mm_struct *mm, unsigned long *table);
-static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
-
-static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
-{
- struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
- struct rcu_table_freelist *batch = *batchp;
-
- if (batch)
- return batch;
- batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
- if (batch) {
- batch->mm = mm;
- batch->pgt_index = 0;
- batch->crst_index = RCU_FREELIST_SIZE;
- *batchp = batch;
- }
- return batch;
-}
-
-static void rcu_table_freelist_callback(struct rcu_head *head)
-{
- struct rcu_table_freelist *batch =
- container_of(head, struct rcu_table_freelist, rcu);
-
- while (batch->pgt_index > 0)
- __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
- while (batch->crst_index < RCU_FREELIST_SIZE)
- __crst_table_free(batch->mm, batch->table[batch->crst_index++]);
- free_page((unsigned long) batch);
-}
-
-void rcu_table_freelist_finish(void)
-{
- struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
-
- if (!batch)
- return;
- call_rcu(&batch->rcu, rcu_table_freelist_callback);
- __get_cpu_var(rcu_table_freelist) = NULL;
-}
-
-static void smp_sync(void *arg)
-{
-}
-
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
-#define TABLES_PER_PAGE 4
-#define FRAG_MASK 15UL
-#define SECOND_HALVES 10UL
-
-void clear_table_pgstes(unsigned long *table)
-{
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
- memset(table + 256, 0, PAGE_SIZE/4);
- clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
- memset(table + 768, 0, PAGE_SIZE/4);
-}
-
+#define FRAG_MASK 0x0f
#else
#define ALLOC_ORDER 2
-#define TABLES_PER_PAGE 2
-#define FRAG_MASK 3UL
-#define SECOND_HALVES 2UL
-
-void clear_table_pgstes(unsigned long *table)
-{
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
- memset(table + 256, 0, PAGE_SIZE/2);
-}
-
+#define FRAG_MASK 0x03
#endif
-unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
-EXPORT_SYMBOL(VMALLOC_START);
-static int __init parse_vmalloc(char *arg)
-{
- if (!arg)
- return -EINVAL;
- VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
- return 0;
-}
-early_param("vmalloc", parse_vmalloc);
-
-unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
+unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
- page->index = 0;
- if (noexec) {
- struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
- if (!shadow) {
- __free_pages(page, ALLOC_ORDER);
- return NULL;
- }
- page->index = page_to_phys(shadow);
- }
- spin_lock_bh(&mm->context.list_lock);
- list_add(&page->lru, &mm->context.crst_list);
- spin_unlock_bh(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
-static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
-{
- unsigned long *shadow = get_shadow_table(table);
-
- if (shadow)
- free_pages((unsigned long) shadow, ALLOC_ORDER);
- free_pages((unsigned long) table, ALLOC_ORDER);
-}
-
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
- struct page *page = virt_to_page(table);
-
- spin_lock_bh(&mm->context.list_lock);
- list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
- __crst_table_free(mm, table);
+ free_pages((unsigned long) table, ALLOC_ORDER);
}
-void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
+#ifdef CONFIG_64BIT
+static void __crst_table_upgrade(void *arg)
{
- struct rcu_table_freelist *batch;
- struct page *page = virt_to_page(table);
+ struct mm_struct *mm = arg;
- spin_lock_bh(&mm->context.list_lock);
- list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
- if (atomic_read(&mm->mm_users) < 2 &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- __crst_table_free(mm, table);
- return;
+ if (current->active_mm == mm) {
+ clear_user_asce();
+ set_user_asce(mm);
}
- batch = rcu_table_freelist_get(mm);
- if (!batch) {
- smp_call_function(smp_sync, NULL, 1);
- __crst_table_free(mm, table);
- return;
- }
- batch->table[--batch->crst_index] = table;
- if (batch->pgt_index >= batch->crst_index)
- rcu_table_freelist_finish();
+ __tlb_flush_local();
}
-#ifdef CONFIG_64BIT
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
+ int flush;
BUG_ON(limit > (1UL << 53));
+ flush = 0;
repeat:
- table = crst_table_alloc(mm, mm->context.noexec);
+ table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
spin_lock_bh(&mm->page_table_lock);
@@ -221,13 +93,15 @@ repeat:
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
+ flush = 1;
}
spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
- update_mm(mm, current);
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
@@ -235,9 +109,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- if (mm->context.asce_limit <= limit)
- return;
- __tlb_flush_mm(mm);
+ if (current->active_mm == mm) {
+ clear_user_asce();
+ __tlb_flush_mm(mm);
+ }
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -260,141 +135,1228 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
- update_mm(mm, current);
+ if (current->active_mm == mm)
+ set_user_asce(mm);
}
#endif
+#ifdef CONFIG_PGSTE
+
+/**
+ * gmap_alloc - allocate a guest address space
+ * @mm: pointer to the parent mm_struct
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_alloc(struct mm_struct *mm)
+{
+ struct gmap *gmap;
+ struct page *page;
+ unsigned long *table;
+
+ gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
+ if (!gmap)
+ goto out;
+ INIT_LIST_HEAD(&gmap->crst_list);
+ gmap->mm = mm;
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ goto out_free;
+ list_add(&page->lru, &gmap->crst_list);
+ table = (unsigned long *) page_to_phys(page);
+ crst_table_init(table, _REGION1_ENTRY_EMPTY);
+ gmap->table = table;
+ gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | __pa(table);
+ list_add(&gmap->list, &mm->context.gmap_list);
+ return gmap;
+
+out_free:
+ kfree(gmap);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gmap_alloc);
+
+static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
+{
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ if (*table & _SEGMENT_ENTRY_INVALID)
+ return 0;
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ if (rmap->entry != table)
+ continue;
+ list_del(&rmap->list);
+ kfree(rmap);
+ break;
+ }
+ *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
+ return 1;
+}
+
+static void gmap_flush_tlb(struct gmap *gmap)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+}
+
+/**
+ * gmap_free - free a guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_free(struct gmap *gmap)
+{
+ struct page *page, *next;
+ unsigned long *table;
+ int i;
+
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+
+ /* Free all segment & region tables. */
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ table = (unsigned long *) page_to_phys(page);
+ if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
+ /* Remove gmap rmap structures for segment table. */
+ for (i = 0; i < PTRS_PER_PMD; i++, table++)
+ gmap_unlink_segment(gmap, table);
+ __free_pages(page, ALLOC_ORDER);
+ }
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ list_del(&gmap->list);
+ kfree(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_enable - switch primary space to the guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_enable(struct gmap *gmap)
+{
+ S390_lowcore.gmap = (unsigned long) gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_enable);
+
+/**
+ * gmap_disable - switch back to the standard primary address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_disable(struct gmap *gmap)
+{
+ S390_lowcore.gmap = 0UL;
+}
+EXPORT_SYMBOL_GPL(gmap_disable);
+
/*
- * page table entry allocation/free routines.
+ * gmap_alloc_table is assumed to be called with mmap_sem held
+ */
+static int gmap_alloc_table(struct gmap *gmap,
+ unsigned long *table, unsigned long init)
+ __releases(&gmap->mm->page_table_lock)
+ __acquires(&gmap->mm->page_table_lock)
+{
+ struct page *page;
+ unsigned long *new;
+
+ /* since we dont free the gmap table until gmap_free we can unlock */
+ spin_unlock(&gmap->mm->page_table_lock);
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ spin_lock(&gmap->mm->page_table_lock);
+ if (!page)
+ return -ENOMEM;
+ new = (unsigned long *) page_to_phys(page);
+ crst_table_init(new, init);
+ if (*table & _REGION_ENTRY_INVALID) {
+ list_add(&page->lru, &gmap->crst_list);
+ *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
+ (*table & _REGION_ENTRY_TYPE_MASK);
+ } else
+ __free_pages(page, ALLOC_ORDER);
+ return 0;
+}
+
+/**
+ * gmap_unmap_segment - unmap segment from the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @addr: address in the guest address space
+ * @len: length of the memory area to unmap
+ *
+ * Returns 0 if the unmap succeeded, -EINVAL if not.
+ */
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the guest addr space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if (*table & _REGION_ENTRY_INVALID)
+ goto out;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Clear segment table entry in guest address space. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INVALID;
+ }
+out:
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_unmap_segment);
+
+/**
+ * gmap_mmap_segment - map a segment to the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @from: source address in the parent address space
+ * @to: target address in the guest address space
+ *
+ * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
+ */
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((from | to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || from + len > TASK_MAX_SIZE ||
+ from + len < from || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INVALID) &&
+ gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Store 'from' address in an invalid segment table entry. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
+ }
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+
+out_unmap:
+ spin_unlock(&gmap->mm->page_table_lock);
+ up_read(&gmap->mm->mmap_sem);
+ gmap_unmap_segment(gmap, to, len);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gmap_map_segment);
+
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table;
+
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
+ return ERR_PTR(-EFAULT);
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
+ /* Convert the gmap address to an mm address. */
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ } else if (segment & _SEGMENT_ENTRY_PROTECT) {
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
+ unsigned long *segment_ptr, struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* large pmds cannot yet be handled */
+ if (pmd_large(*pmd))
+ return -EFAULT;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->gmap = gmap;
+ rmap->entry = segment_ptr;
+ rmap->vmaddr = address & PMD_MASK;
+ spin_lock(&mm->page_table_lock);
+ if (*segment_ptr == segment) {
+ list_add(&rmap->list, &mp->mapper);
+ /* Set gmap segment table entry to page table. */
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_PROTECT))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
+ }
+ return -EFAULT;
+}
+
+unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_fault(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_fault);
+
+static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
+{
+ if (!non_swap_entry(entry))
+ dec_mm_counter(mm, MM_SWAPENTS);
+ else if (is_migration_entry(entry)) {
+ struct page *page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ dec_mm_counter(mm, MM_ANONPAGES);
+ else
+ dec_mm_counter(mm, MM_FILEPAGES);
+ }
+ free_swap_and_cache(entry);
+}
+
+/**
+ * The mm->mmap_sem lock must be held
+ */
+static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
+{
+ unsigned long ptev, pgstev;
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep, pte;
+
+ ptep = get_locked_pte(mm, address, &ptl);
+ if (unlikely(!ptep))
+ return;
+ pte = *ptep;
+ if (!pte_swap(pte))
+ goto out_pte;
+ /* Zap unused and logically-zero pages */
+ pgste = pgste_get_lock(ptep);
+ pgstev = pgste_val(pgste);
+ ptev = pte_val(pte);
+ if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+ ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
+ gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
+ pte_clear(mm, address, ptep);
+ }
+ pgste_set_unlock(ptep, pgste);
+out_pte:
+ pte_unmap_unlock(*ptep, ptl);
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+void __gmap_zap(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table, *segment_ptr;
+ unsigned long segment, pgstev, ptev;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return;
+ segment = *segment_ptr;
+ if (segment & _SEGMENT_ENTRY_INVALID)
+ return;
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ address = mp->vmaddr | (address & ~PMD_MASK);
+ /* Page table is present */
+ table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ pgstev = table[PTRS_PER_PTE];
+ ptev = table[0];
+ /* quick check, checked again with locks held */
+ if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+ ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
+ gmap_zap_unused(gmap->mm, address);
+}
+EXPORT_SYMBOL_GPL(__gmap_zap);
+
+void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
+{
+
+ unsigned long *table, address, size;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ down_read(&gmap->mm->mmap_sem);
+ address = from;
+ while (address < to) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ vma = find_vma(gmap->mm, mp->vmaddr);
+ size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
+ zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
+ size, NULL);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ }
+ up_read(&gmap->mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(gmap_discard);
+
+static LIST_HEAD(gmap_notifier_list);
+static DEFINE_SPINLOCK(gmap_notifier_lock);
+
+/**
+ * gmap_register_ipte_notifier - register a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_register_ipte_notifier(struct gmap_notifier *nb)
+{
+ spin_lock(&gmap_notifier_lock);
+ list_add(&nb->list, &gmap_notifier_list);
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
+
+/**
+ * gmap_unregister_ipte_notifier - remove a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
+{
+ spin_lock(&gmap_notifier_lock);
+ list_del_init(&nb->list);
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
+
+/**
+ * gmap_ipte_notify - mark a range of ptes for invalidation notification
+ * @gmap: pointer to guest mapping meta data structure
+ * @start: virtual address in the guest address space
+ * @len: size of area
+ *
+ * Returns 0 if for each page in the given range a gmap mapping exists and
+ * the invalidation notification could be set. If the gmap mapping is missing
+ * for one or more pages -EFAULT is returned. If no memory could be allocated
+ * -ENOMEM is returned. This function establishes missing page table entries.
+ */
+int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
+{
+ unsigned long addr;
+ spinlock_t *ptl;
+ pte_t *ptep, entry;
+ pgste_t pgste;
+ int rc = 0;
+
+ if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
+ return -EINVAL;
+ down_read(&gmap->mm->mmap_sem);
+ while (len) {
+ /* Convert gmap address and connect the page tables */
+ addr = __gmap_fault(start, gmap);
+ if (IS_ERR_VALUE(addr)) {
+ rc = addr;
+ break;
+ }
+ /* Get the page mapped */
+ if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
+ rc = -EFAULT;
+ break;
+ }
+ /* Walk the process page table, lock and get pte pointer */
+ ptep = get_locked_pte(gmap->mm, addr, &ptl);
+ if (unlikely(!ptep))
+ continue;
+ /* Set notification bit in the pgste of the pte */
+ entry = *ptep;
+ if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
+ pgste = pgste_get_lock(ptep);
+ pgste_val(pgste) |= PGSTE_IN_BIT;
+ pgste_set_unlock(ptep, pgste);
+ start += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ spin_unlock(ptl);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_ipte_notify);
+
+/**
+ * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
+ * @mm: pointer to the process mm_struct
+ * @pte: pointer to the page table entry
+ *
+ * This function is assumed to be called with the page table lock held
+ * for the pte to notify.
*/
-unsigned long *page_table_alloc(struct mm_struct *mm)
+void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)
+{
+ unsigned long segment_offset;
+ struct gmap_notifier *nb;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
+ segment_offset = segment_offset * (4096 / sizeof(pte_t));
+ page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ spin_lock(&gmap_notifier_lock);
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ list_for_each_entry(nb, &gmap_notifier_list, list)
+ nb->notifier_call(rmap->gmap,
+ rmap->vmaddr + segment_offset);
+ }
+ spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
+
+static inline int page_table_with_pgste(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == 0;
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
- unsigned long bits;
+ struct gmap_pgtable *mp;
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
+ page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!page)
+ return NULL;
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
+ if (!mp) {
+ __free_page(page);
+ return NULL;
+ }
+ if (!pgtable_page_ctor(page)) {
+ kfree(mp);
+ __free_page(page);
+ return NULL;
+ }
+ mp->vmaddr = vmaddr & PMD_MASK;
+ INIT_LIST_HEAD(&mp->mapper);
+ page->index = (unsigned long) mp;
+ atomic_set(&page->_mapcount, 0);
+ table = (unsigned long *) page_to_phys(page);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ return table;
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+ struct page *page;
+ struct gmap_pgtable *mp;
+
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ BUG_ON(!list_empty(&mp->mapper));
+ pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
+ kfree(mp);
+ __free_page(page);
+}
+
+static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ pte_t *start_pte, *pte;
+ spinlock_t *ptl;
+ pgste_t pgste;
+
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
+ do {
+ pgste = pgste_get_lock(pte);
+ pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ if (init_skey) {
+ unsigned long address;
+
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+ PGSTE_GR_BIT | PGSTE_GC_BIT);
+
+ /* skip invalid and not writable pages */
+ if (pte_val(*pte) & _PAGE_INVALID ||
+ !(pte_val(*pte) & _PAGE_WRITE)) {
+ pgste_set_unlock(pte, pgste);
+ continue;
+ }
+
+ address = pte_val(*pte) & PAGE_MASK;
+ page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
+ }
+ pgste_set_unlock(pte, pgste);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(start_pte, ptl);
+
+ return addr;
+}
+
+static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ unsigned long next;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+
+static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end, bool init_skey)
+{
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool init_skey)
+{
+ unsigned long addr, next;
+ pgd_t *pgd;
+
+ down_write(&mm->mmap_sem);
+ if (init_skey && mm_use_skey(mm))
+ goto out_up;
+ addr = start;
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
+ } while (pgd++, addr = next, addr != end);
+ if (init_skey)
+ current->mm->context.use_skey = 1;
+out_up:
+ up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL(page_table_reset_pgste);
+
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned long key, bool nq)
+{
+ spinlock_t *ptl;
+ pgste_t old, new;
+ pte_t *ptep;
+
+ down_read(&mm->mmap_sem);
+ ptep = get_locked_pte(current->mm, addr, &ptl);
+ if (unlikely(!ptep)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ new = old = pgste_get_lock(ptep);
+ pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
+ PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
+ pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+ if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+ unsigned long address, bits, skey;
+
+ address = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(address);
+ bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+ skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
+ /* Set storage key ACC and FP */
+ page_set_storage_key(address, skey, !nq);
+ /* Merge host changed & referenced into pgste */
+ pgste_val(new) |= bits << 52;
+ }
+ /* changing the guest storage key is considered a change of the page */
+ if ((pgste_val(new) ^ pgste_val(old)) &
+ (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
+ pgste_val(new) |= PGSTE_UC_BIT;
+
+ pgste_set_unlock(ptep, new);
+ pte_unmap_unlock(*ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return 0;
+}
+EXPORT_SYMBOL(set_guest_storage_key);
+
+#else /* CONFIG_PGSTE */
+
+static inline int page_table_with_pgste(struct page *page)
+{
+ return 0;
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
+{
+ return NULL;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
+ unsigned long end, bool init_skey)
+{
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+}
+
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
+{
+}
+
+#endif /* CONFIG_PGSTE */
+
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+ unsigned int old, new;
+
+ do {
+ old = atomic_read(v);
+ new = old ^ bits;
+ } while (atomic_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+/*
+ * page table entry allocation/free routines.
+ */
+unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *uninitialized_var(table);
+ struct page *uninitialized_var(page);
+ unsigned int mask, bit;
+
+ if (mm_has_pgste(mm))
+ return page_table_alloc_pgste(mm, vmaddr);
+ /* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock);
- page = NULL;
+ mask = FRAG_MASK;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
- if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
- page = NULL;
+ table = (unsigned long *) page_to_phys(page);
+ mask = atomic_read(&page->_mapcount);
+ mask = mask | (mask >> 4);
}
- if (!page) {
+ if ((mask & FRAG_MASK) == FRAG_MASK) {
spin_unlock_bh(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
- pgtable_page_ctor(page);
- page->flags &= ~FRAG_MASK;
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page);
- if (mm->context.has_pgste)
- clear_table_pgstes(table);
- else
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
+ } else {
+ for (bit = 1; mask & bit; bit <<= 1)
+ table += PTRS_PER_PTE;
+ mask = atomic_xor_bits(&page->_mapcount, bit);
+ if ((mask & FRAG_MASK) == FRAG_MASK)
+ list_del(&page->lru);
}
- table = (unsigned long *) page_to_phys(page);
- while (page->flags & bits) {
- table += 256;
- bits <<= 1;
- }
- page->flags |= bits;
- if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
- list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock);
return table;
}
-static void __page_table_free(struct mm_struct *mm, unsigned long *table)
+void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct page *page;
- unsigned long bits;
+ unsigned int bit, mask;
- bits = ((unsigned long) table) & 15;
- table = (unsigned long *)(((unsigned long) table) ^ bits);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- page->flags ^= bits;
- if (!(page->flags & FRAG_MASK)) {
+ if (page_table_with_pgste(page)) {
+ gmap_disconnect_pgtable(mm, table);
+ return page_table_free_pgste(table);
+ }
+ /* Free 1K/2K page table fragment of a 4K page */
+ bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
+ spin_lock_bh(&mm->context.list_lock);
+ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
+ list_del(&page->lru);
+ mask = atomic_xor_bits(&page->_mapcount, bit);
+ if (mask & FRAG_MASK)
+ list_add(&page->lru, &mm->context.pgtable_list);
+ spin_unlock_bh(&mm->context.list_lock);
+ if (mask == 0) {
pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
__free_page(page);
}
}
-void page_table_free(struct mm_struct *mm, unsigned long *table)
+static void __page_table_free_rcu(void *table, unsigned bit)
{
struct page *page;
- unsigned long bits;
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
- bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
+ if (bit == FRAG_MASK)
+ return page_table_free_pgste(table);
+ /* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- spin_lock_bh(&mm->context.list_lock);
- page->flags ^= bits;
- if (page->flags & FRAG_MASK) {
- /* Page now has some free pgtable fragments. */
- list_move(&page->lru, &mm->context.pgtable_list);
- page = NULL;
- } else
- /* All fragments of the 4K page have been freed. */
- list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
- if (page) {
+ if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
pgtable_page_dtor(page);
+ atomic_set(&page->_mapcount, -1);
__free_page(page);
}
}
-void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
+void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
{
- struct rcu_table_freelist *batch;
+ struct mm_struct *mm;
struct page *page;
- unsigned long bits;
+ unsigned int bit, mask;
- if (atomic_read(&mm->mm_users) < 2 &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- page_table_free(mm, table);
- return;
- }
- batch = rcu_table_freelist_get(mm);
- if (!batch) {
- smp_call_function(smp_sync, NULL, 1);
- page_table_free(mm, table);
+ mm = tlb->mm;
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page)) {
+ gmap_disconnect_pgtable(mm, table);
+ table = (unsigned long *) (__pa(table) | FRAG_MASK);
+ tlb_remove_table(tlb, table);
return;
}
- bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
- bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
spin_lock_bh(&mm->context.list_lock);
- /* Delayed freeing with rcu prevents reuse of pgtable fragments */
- list_del_init(&page->lru);
+ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
+ list_del(&page->lru);
+ mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
+ if (mask & FRAG_MASK)
+ list_add_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock);
- table = (unsigned long *)(((unsigned long) table) | bits);
- batch->table[batch->pgt_index++] = table;
- if (batch->pgt_index >= batch->crst_index)
- rcu_table_freelist_finish();
+ table = (unsigned long *) (__pa(table) | (bit << 4));
+ tlb_remove_table(tlb, table);
+}
+
+static void __tlb_remove_table(void *_table)
+{
+ const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
+ void *table = (void *)((unsigned long) _table & ~mask);
+ unsigned type = (unsigned long) _table & mask;
+
+ if (type)
+ __page_table_free_rcu(table, type);
+ else
+ free_pages((unsigned long) table, ALLOC_ORDER);
+}
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+ /*
+ * This isn't an RCU grace period and hence the page-tables cannot be
+ * assumed to be actually RCU-freed.
+ *
+ * It is however sufficient for software page-table walkers that rely
+ * on IRQ disabling. See the comment near struct mmu_table_batch.
+ */
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+ struct mmu_table_batch *batch;
+ int i;
+
+ batch = container_of(head, struct mmu_table_batch, rcu);
+
+ for (i = 0; i < batch->nr; i++)
+ __tlb_remove_table(batch->tables[i]);
+
+ free_page((unsigned long)batch);
}
-void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
+void tlb_table_flush(struct mmu_gather *tlb)
{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ tlb->mm->context.flush_mm = 1;
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)
+ __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
+ __tlb_flush_mm_lazy(tlb->mm);
+ tlb_remove_table_one(table);
+ return;
+ }
+ (*batch)->nr = 0;
+ }
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_flush_mmu(tlb);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void thp_split_vma(struct vm_area_struct *vma)
+{
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
+ follow_page(vma, addr, FOLL_SPLIT);
+}
+
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ thp_split_vma(vma);
+ vma->vm_flags &= ~VM_HUGEPAGE;
+ vma->vm_flags |= VM_NOHUGEPAGE;
+ }
+ mm->def_flags |= VM_NOHUGEPAGE;
+}
+#else
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
+ struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next, *table, *new;
struct page *page;
+ pmd_t *pmd;
- spin_lock_bh(&mm->context.list_lock);
- /* Free shadow region and segment tables. */
- list_for_each_entry(page, &mm->context.crst_list, lru)
- if (page->index) {
- free_pages((unsigned long) page->index, ALLOC_ORDER);
- page->index = 0;
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+again:
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ table = (unsigned long *) pmd_deref(*pmd);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page))
+ continue;
+ /* Allocate new page table with pgstes */
+ new = page_table_alloc_pgste(mm, addr);
+ if (!new)
+ return -ENOMEM;
+
+ spin_lock(&mm->page_table_lock);
+ if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
+ /* Nuke pmd entry pointing to the "short" page table */
+ pmdp_flush_lazy(mm, addr, pmd);
+ pmd_clear(pmd);
+ /* Copy ptes from old table to new table */
+ memcpy(new, table, PAGE_SIZE/2);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ /* Establish new table */
+ pmd_populate(mm, pmd, (pte_t *) new);
+ /* Free old table with rcu, there might be a walker! */
+ page_table_free_rcu(tlb, table);
+ new = NULL;
}
- /* "Free" second halves of page tables. */
- list_for_each_entry(page, &mm->context.pgtable_list, lru)
- page->flags &= ~SECOND_HALVES;
- spin_unlock_bh(&mm->context.list_lock);
- mm->context.noexec = 0;
- update_mm(mm, tsk);
+ spin_unlock(&mm->page_table_lock);
+ if (new) {
+ page_table_free_pgste(new);
+ goto again;
+ }
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+
+static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
+ struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pgd_t *pgd;
+
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
+ if (unlikely(IS_ERR_VALUE(next)))
+ return next;
+ } while (pgd++, addr = next, addr != end);
+
+ return 0;
}
/*
@@ -403,74 +1365,132 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
int s390_enable_sie(void)
{
struct task_struct *tsk = current;
- struct mm_struct *mm, *old_mm;
-
- /* Do we have switched amode? If no, we cannot do sie */
- if (user_mode == HOME_SPACE_MODE)
- return -EINVAL;
+ struct mm_struct *mm = tsk->mm;
+ struct mmu_gather tlb;
/* Do we have pgstes? if yes, we are done */
- if (tsk->mm->context.has_pgste)
+ if (mm_has_pgste(tsk->mm))
return 0;
- /* lets check if we are allowed to replace the mm */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- task_unlock(tsk);
- return -EINVAL;
- }
- task_unlock(tsk);
+ down_write(&mm->mmap_sem);
+ /* split thp mappings and disable thp for future mappings */
+ thp_split_mm(mm);
+ /* Reallocate the page tables with pgstes */
+ tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
+ if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
+ mm->context.has_pgste = 1;
+ tlb_finish_mmu(&tlb, 0, TASK_SIZE);
+ up_write(&mm->mmap_sem);
+ return mm->context.has_pgste ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
- /* we copy the mm and let dup_mm create the page tables with_pgstes */
- tsk->mm->context.alloc_pgste = 1;
- mm = dup_mm(tsk);
- tsk->mm->context.alloc_pgste = 0;
- if (!mm)
- return -ENOMEM;
+/*
+ * Enable storage key handling from now on and initialize the storage
+ * keys with the default key.
+ */
+void s390_enable_skey(void)
+{
+ page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
+}
+EXPORT_SYMBOL_GPL(s390_enable_skey);
- /* Now lets check again if something happened */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- mmput(mm);
- task_unlock(tsk);
- return -EINVAL;
+/*
+ * Test and reset if a guest page is dirty
+ */
+bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+ bool dirty = false;
+
+ pte = get_locked_pte(gmap->mm, address, &ptl);
+ if (unlikely(!pte))
+ return false;
+
+ if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
+ dirty = true;
+
+ spin_unlock(ptl);
+ return dirty;
+}
+EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB */
+ return pmdp_test_and_clear_young(vma, address, pmdp);
+}
+
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+ if (pmd_same(*pmdp, entry))
+ return 0;
+ pmdp_invalidate(vma, address, pmdp);
+ set_pmd_at(vma->vm_mm, address, pmdp, entry);
+ return 1;
+}
+
+static void pmdp_splitting_flush_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
+ (unsigned long *) pmdp)) {
+ /* need to serialize against gup-fast (IRQ disabled) */
+ smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
}
+}
- /* ok, we are alone. No ptrace, no threads, etc. */
- old_mm = tsk->mm;
- tsk->mm = tsk->active_mm = mm;
- preempt_disable();
- update_mm(mm, tsk);
- atomic_inc(&mm->context.attach_count);
- atomic_dec(&old_mm->context.attach_count);
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- preempt_enable();
- task_unlock(tsk);
- mmput(old_mm);
- return 0;
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+{
+ struct list_head *lh = (struct list_head *) pgtable;
+
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
+
+ /* FIFO */
+ if (!pmd_huge_pte(mm, pmdp))
+ INIT_LIST_HEAD(lh);
+ else
+ list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+ pmd_huge_pte(mm, pmdp) = pgtable;
}
-EXPORT_SYMBOL_GPL(s390_enable_sie);
-#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
-bool kernel_page_present(struct page *page)
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
- unsigned long addr;
- int cc;
+ struct list_head *lh;
+ pgtable_t pgtable;
+ pte_t *ptep;
+
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
+ /* FIFO */
+ pgtable = pmd_huge_pte(mm, pmdp);
+ lh = (struct list_head *) pgtable;
+ if (list_empty(lh))
+ pmd_huge_pte(mm, pmdp) = NULL;
+ else {
+ pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+ list_del(lh);
+ }
+ ptep = (pte_t *) pgtable;
+ pte_val(*ptep) = _PAGE_INVALID;
+ ptep++;
+ pte_val(*ptep) = _PAGE_INVALID;
+ return pgtable;
}
-#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 34c43f23b28..fe9012a49aa 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/mm/vmem.c
- *
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -12,6 +10,7 @@
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
@@ -61,17 +60,18 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
-static pte_t __ref *vmem_pte_alloc(void)
+static pte_t __ref *vmem_pte_alloc(unsigned long address)
{
pte_t *pte;
if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
+ pte = (pte_t *) page_table_alloc(&init_mm, address);
else
- pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
+ pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
+ PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
@@ -81,57 +81,65 @@ static pte_t __ref *vmem_pte_alloc(void)
*/
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{
- unsigned long address;
+ unsigned long end = start + size;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
- for (address = start; address < start + size; address += PAGE_SIZE) {
+ while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
-
pu_dir = pud_offset(pg_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
+ !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
+ pud_val(*pu_dir) = __pa(address) |
+ _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
+ (ro ? _REGION_ENTRY_PROTECT : 0);
+ address += PUD_SIZE;
+ continue;
+ }
+#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
-
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
-
-#ifdef __s390x__
- if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
- (address + HPAGE_SIZE <= start + size) &&
- (address >= HPAGE_SIZE)) {
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
- pmd_val(*pm_dir) = pte_val(pte);
- address += HPAGE_SIZE - PAGE_SIZE;
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
+ !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
+ pmd_val(*pm_dir) = __pa(address) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+ _SEGMENT_ENTRY_YOUNG |
+ (ro ? _SEGMENT_ENTRY_PROTECT : 0);
+ address += PMD_SIZE;
continue;
}
#endif
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
- *pt_dir = pte;
+ pte_val(*pt_dir) = __pa(address) |
+ pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
+ address += PAGE_SIZE;
}
ret = 0;
out:
- flush_tlb_kernel_range(start, start + size);
return ret;
}
@@ -141,58 +149,67 @@ out:
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
- unsigned long address;
+ unsigned long end = start + size;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
- pte_val(pte) = _PAGE_TYPE_EMPTY;
- for (address = start; address < start + size; address += PAGE_SIZE) {
+ pte_val(pte) = _PAGE_INVALID;
+ while (address < end) {
pg_dir = pgd_offset_k(address);
+ if (pgd_none(*pg_dir)) {
+ address += PGDIR_SIZE;
+ continue;
+ }
pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir))
+ if (pud_none(*pu_dir)) {
+ address += PUD_SIZE;
+ continue;
+ }
+ if (pud_large(*pu_dir)) {
+ pud_clear(pu_dir);
+ address += PUD_SIZE;
continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir))
+ if (pmd_none(*pm_dir)) {
+ address += PMD_SIZE;
continue;
-
- if (pmd_huge(*pm_dir)) {
- pmd_clear_kernel(pm_dir);
- address += HPAGE_SIZE - PAGE_SIZE;
+ }
+ if (pmd_large(*pm_dir)) {
+ pmd_clear(pm_dir);
+ address += PMD_SIZE;
continue;
}
-
pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte;
+ address += PAGE_SIZE;
}
- flush_tlb_kernel_range(start, start + size);
+ flush_tlb_kernel_range(start, end);
}
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
-int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long address, start_addr, end_addr;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
- start_addr = (unsigned long) start;
- end_addr = (unsigned long) (start + nr);
-
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+ for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
if (!pu_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
@@ -200,15 +217,38 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pud_populate_kernel(&init_mm, pu_dir, pm_dir);
+ pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+#ifdef CONFIG_64BIT
+ /* Use 1MB frames for vmemmap if available. We always
+ * use large frames even if they are only partially
+ * used.
+ * Otherwise we would have also page tables since
+ * vmemmap_populate gets called for each section
+ * separately. */
+ if (MACHINE_HAS_EDAT1) {
+ void *new_page;
+
+ new_page = vmemmap_alloc_block(PMD_SIZE, node);
+ if (!new_page)
+ goto out;
+ pmd_val(*pm_dir) = __pa(new_page) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+ _SEGMENT_ENTRY_CO;
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+#endif
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
- pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
+ pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -218,17 +258,21 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
- pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
- *pt_dir = pte;
+ pte_val(*pt_dir) =
+ __pa(new_page) | pgprot_val(PAGE_KERNEL);
}
+ address += PAGE_SIZE;
}
- memset(start, 0, nr * sizeof(struct page));
+ memset((void *)start, 0, end - start);
ret = 0;
out:
- flush_tlb_kernel_range(start_addr, end_addr);
return ret;
}
+void vmemmap_free(unsigned long start, unsigned long end)
+{
+}
+
/*
* Add memory segment to the segment list if it doesn't overlap with
* an already present segment.
@@ -329,14 +373,14 @@ out:
void __init vmem_map_init(void)
{
unsigned long ro_start, ro_end;
- unsigned long start, end;
- int i;
-
- ro_start = ((unsigned long)&_stext) & PAGE_MASK;
- ro_end = PFN_ALIGN((unsigned long)&_eshared);
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
- start = memory_chunk[i].addr;
- end = memory_chunk[i].addr + memory_chunk[i].size;
+ struct memblock_region *reg;
+ phys_addr_t start, end;
+
+ ro_start = PFN_ALIGN((unsigned long)&_stext);
+ ro_end = (unsigned long)&_eshared & PAGE_MASK;
+ for_each_memblock(memory, reg) {
+ start = reg->base;
+ end = reg->base + reg->size - 1;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
@@ -356,23 +400,21 @@ void __init vmem_map_init(void)
}
/*
- * Convert memory chunk array to a memory segment list so there is a single
- * list that contains both r/w memory and shared memory segments.
+ * Convert memblock.memory to a memory segment list so there is a single
+ * list that contains all memory segments.
*/
static int __init vmem_convert_memory_chunk(void)
{
+ struct memblock_region *reg;
struct memory_segment *seg;
- int i;
mutex_lock(&vmem_mutex);
- for (i = 0; i < MEMORY_CHUNKS; i++) {
- if (!memory_chunk[i].size)
- continue;
+ for_each_memblock(memory, reg) {
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
panic("Out of memory...\n");
- seg->start = memory_chunk[i].addr;
- seg->size = memory_chunk[i].size;
+ seg->start = reg->base;
+ seg->size = reg->size;
insert_memory_segment(seg);
}
mutex_unlock(&vmem_mutex);