diff options
Diffstat (limited to 'arch/s390/mm')
| -rw-r--r-- | arch/s390/mm/cmm.c | 12 | ||||
| -rw-r--r-- | arch/s390/mm/fault.c | 263 | ||||
| -rw-r--r-- | arch/s390/mm/gup.c | 83 | ||||
| -rw-r--r-- | arch/s390/mm/hugetlbpage.c | 10 | ||||
| -rw-r--r-- | arch/s390/mm/init.c | 7 | ||||
| -rw-r--r-- | arch/s390/mm/maccess.c | 28 | ||||
| -rw-r--r-- | arch/s390/mm/mem_detect.c | 130 | ||||
| -rw-r--r-- | arch/s390/mm/mmap.c | 21 | ||||
| -rw-r--r-- | arch/s390/mm/pageattr.c | 4 | ||||
| -rw-r--r-- | arch/s390/mm/pgtable.c | 302 | ||||
| -rw-r--r-- | arch/s390/mm/vmem.c | 32 | 
11 files changed, 537 insertions, 355 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 9d84a1feefe..79ddd580d60 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -253,12 +253,12 @@ static int cmm_skip_blanks(char *cp, char **endp)  static struct ctl_table cmm_table[]; -static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer, -			     size_t *lenp, loff_t *ppos) +static int cmm_pages_handler(struct ctl_table *ctl, int write, +			     void __user *buffer, size_t *lenp, loff_t *ppos)  {  	char buf[16], *p; +	unsigned int len;  	long nr; -	int len;  	if (!*lenp || (*ppos && !write)) {  		*lenp = 0; @@ -293,12 +293,12 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,  	return 0;  } -static int cmm_timeout_handler(ctl_table *ctl, int write,  void __user *buffer, -			       size_t *lenp, loff_t *ppos) +static int cmm_timeout_handler(struct ctl_table *ctl, int write, +			       void __user *buffer, size_t *lenp, loff_t *ppos)  {  	char buf[64], *p;  	long nr, seconds; -	int len; +	unsigned int len;  	if (!*lenp || (*ppos && !write)) {  		*lenp = 0; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fc6679210d8..3f3b35403d0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -50,6 +50,7 @@  #define VM_FAULT_BADMAP		0x020000  #define VM_FAULT_BADACCESS	0x040000  #define VM_FAULT_SIGNAL		0x080000 +#define VM_FAULT_PFAULT		0x100000  static unsigned long store_indication __read_mostly; @@ -105,26 +106,151 @@ void bust_spinlocks(int yes)   * Returns the address space associated with the fault.   * Returns 0 for kernel space and 1 for user space.   */ -static inline int user_space_fault(unsigned long trans_exc_code) +static inline int user_space_fault(struct pt_regs *regs)  { +	unsigned long trans_exc_code; +  	/*  	 * The lowest two bits of the translation exception  	 * identification indicate which paging table was used.  	 */ -	trans_exc_code &= 3; -	if (trans_exc_code == 2) -		/* Access via secondary space, set_fs setting decides */ +	trans_exc_code = regs->int_parm_long & 3; +	if (trans_exc_code == 3) /* home space -> kernel */ +		return 0; +	if (user_mode(regs)) +		return 1; +	if (trans_exc_code == 2) /* secondary space -> set_fs */  		return current->thread.mm_segment.ar4; -	if (s390_user_mode == HOME_SPACE_MODE) -		/* User space if the access has been done via home space. */ -		return trans_exc_code == 3; -	/* -	 * If the user space is not the home space the kernel runs in home -	 * space. Access via secondary space has already been covered, -	 * access via primary space or access register is from user space -	 * and access via home space is from the kernel. -	 */ -	return trans_exc_code != 3; +	if (current->flags & PF_VCPU) +		return 1; +	return 0; +} + +static int bad_address(void *p) +{ +	unsigned long dummy; + +	return probe_kernel_address((unsigned long *)p, dummy); +} + +#ifdef CONFIG_64BIT +static void dump_pagetable(unsigned long asce, unsigned long address) +{ +	unsigned long *table = __va(asce & PAGE_MASK); + +	pr_alert("AS:%016lx ", asce); +	switch (asce & _ASCE_TYPE_MASK) { +	case _ASCE_TYPE_REGION1: +		table = table + ((address >> 53) & 0x7ff); +		if (bad_address(table)) +			goto bad; +		pr_cont("R1:%016lx ", *table); +		if (*table & _REGION_ENTRY_INVALID) +			goto out; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +		/* fallthrough */ +	case _ASCE_TYPE_REGION2: +		table = table + ((address >> 42) & 0x7ff); +		if (bad_address(table)) +			goto bad; +		pr_cont("R2:%016lx ", *table); +		if (*table & _REGION_ENTRY_INVALID) +			goto out; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +		/* fallthrough */ +	case _ASCE_TYPE_REGION3: +		table = table + ((address >> 31) & 0x7ff); +		if (bad_address(table)) +			goto bad; +		pr_cont("R3:%016lx ", *table); +		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) +			goto out; +		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); +		/* fallthrough */ +	case _ASCE_TYPE_SEGMENT: +		table = table + ((address >> 20) & 0x7ff); +		if (bad_address(table)) +			goto bad; +		pr_cont(KERN_CONT "S:%016lx ", *table); +		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) +			goto out; +		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); +	} +	table = table + ((address >> 12) & 0xff); +	if (bad_address(table)) +		goto bad; +	pr_cont("P:%016lx ", *table); +out: +	pr_cont("\n"); +	return; +bad: +	pr_cont("BAD\n"); +} + +#else /* CONFIG_64BIT */ + +static void dump_pagetable(unsigned long asce, unsigned long address) +{ +	unsigned long *table = __va(asce & PAGE_MASK); + +	pr_alert("AS:%08lx ", asce); +	table = table + ((address >> 20) & 0x7ff); +	if (bad_address(table)) +		goto bad; +	pr_cont("S:%08lx ", *table); +	if (*table & _SEGMENT_ENTRY_INVALID) +		goto out; +	table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); +	table = table + ((address >> 12) & 0xff); +	if (bad_address(table)) +		goto bad; +	pr_cont("P:%08lx ", *table); +out: +	pr_cont("\n"); +	return; +bad: +	pr_cont("BAD\n"); +} + +#endif /* CONFIG_64BIT */ + +static void dump_fault_info(struct pt_regs *regs) +{ +	unsigned long asce; + +	pr_alert("Fault in "); +	switch (regs->int_parm_long & 3) { +	case 3: +		pr_cont("home space "); +		break; +	case 2: +		pr_cont("secondary space "); +		break; +	case 1: +		pr_cont("access register "); +		break; +	case 0: +		pr_cont("primary space "); +		break; +	} +	pr_cont("mode while using "); +	if (!user_space_fault(regs)) { +		asce = S390_lowcore.kernel_asce; +		pr_cont("kernel "); +	} +#ifdef CONFIG_PGSTE +	else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) { +		struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; +		asce = gmap->asce; +		pr_cont("gmap "); +	} +#endif +	else { +		asce = S390_lowcore.user_asce; +		pr_cont("user "); +	} +	pr_cont("ASCE.\n"); +	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);  }  static inline void report_user_fault(struct pt_regs *regs, long signr) @@ -139,8 +265,9 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)  	       regs->int_code);  	print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);  	printk(KERN_CONT "\n"); -	printk(KERN_ALERT "failing address: %lX\n", -	       regs->int_parm_long & __FAIL_ADDR_MASK); +	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", +	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); +	dump_fault_info(regs);  	show_regs(regs);  } @@ -176,13 +303,15 @@ static noinline void do_no_context(struct pt_regs *regs)  	 * terminate things with extreme prejudice.  	 */  	address = regs->int_parm_long & __FAIL_ADDR_MASK; -	if (!user_space_fault(regs->int_parm_long)) +	if (!user_space_fault(regs))  		printk(KERN_ALERT "Unable to handle kernel pointer dereference" -		       " at virtual kernel address %p\n", (void *)address); +		       " in virtual kernel address space\n");  	else  		printk(KERN_ALERT "Unable to handle kernel paging request" -		       " at virtual user address %p\n", (void *)address); - +		       " in virtual user address space\n"); +	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", +	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); +	dump_fault_info(regs);  	die(regs, "Oops");  	do_exit(SIGKILL);  } @@ -232,6 +361,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)  			return;  		}  	case VM_FAULT_BADCONTEXT: +	case VM_FAULT_PFAULT:  		do_no_context(regs);  		break;  	case VM_FAULT_SIGNAL: @@ -269,6 +399,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)   */  static inline int do_exception(struct pt_regs *regs, int access)  { +#ifdef CONFIG_PGSTE +	struct gmap *gmap; +#endif  	struct task_struct *tsk;  	struct mm_struct *mm;  	struct vm_area_struct *vma; @@ -282,7 +415,7 @@ static inline int do_exception(struct pt_regs *regs, int access)  	 * The instruction that caused the program check has  	 * been nullified. Don't signal single step via SIGTRAP.  	 */ -	clear_tsk_thread_flag(tsk, TIF_PER_TRAP); +	clear_pt_regs_flag(regs, PIF_PER_TRAP);  	if (notify_page_fault(regs))  		return 0; @@ -296,7 +429,7 @@ static inline int do_exception(struct pt_regs *regs, int access)  	 * user context.  	 */  	fault = VM_FAULT_BADCONTEXT; -	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) +	if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))  		goto out;  	address = trans_exc_code & __FAIL_ADDR_MASK; @@ -309,9 +442,10 @@ static inline int do_exception(struct pt_regs *regs, int access)  	down_read(&mm->mmap_sem);  #ifdef CONFIG_PGSTE -	if ((current->flags & PF_VCPU) && S390_lowcore.gmap) { -		address = __gmap_fault(address, -				     (struct gmap *) S390_lowcore.gmap); +	gmap = (struct gmap *) +		((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0); +	if (gmap) { +		address = __gmap_fault(address, gmap);  		if (address == -EFAULT) {  			fault = VM_FAULT_BADMAP;  			goto out_up; @@ -320,6 +454,8 @@ static inline int do_exception(struct pt_regs *regs, int access)  			fault = VM_FAULT_OOM;  			goto out_up;  		} +		if (gmap->pfault_enabled) +			flags |= FAULT_FLAG_RETRY_NOWAIT;  	}  #endif @@ -376,9 +512,19 @@ retry:  				      regs, address);  		}  		if (fault & VM_FAULT_RETRY) { +#ifdef CONFIG_PGSTE +			if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) { +				/* FAULT_FLAG_RETRY_NOWAIT has been set, +				 * mmap_sem has not been released */ +				current->thread.gmap_pfault = 1; +				fault = VM_FAULT_PFAULT; +				goto out_up; +			} +#endif  			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk  			 * of starvation. */ -			flags &= ~FAULT_FLAG_ALLOW_RETRY; +			flags &= ~(FAULT_FLAG_ALLOW_RETRY | +				   FAULT_FLAG_RETRY_NOWAIT);  			flags |= FAULT_FLAG_TRIED;  			down_read(&mm->mmap_sem);  			goto retry; @@ -428,67 +574,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)  		do_fault_error(regs, fault);  } -#ifdef CONFIG_64BIT -void __kprobes do_asce_exception(struct pt_regs *regs) -{ -	struct mm_struct *mm = current->mm; -	struct vm_area_struct *vma; -	unsigned long trans_exc_code; - -	/* -	 * The instruction that caused the program check has -	 * been nullified. Don't signal single step via SIGTRAP. -	 */ -	clear_tsk_thread_flag(current, TIF_PER_TRAP); - -	trans_exc_code = regs->int_parm_long; -	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) -		goto no_context; - -	down_read(&mm->mmap_sem); -	vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); -	up_read(&mm->mmap_sem); - -	if (vma) { -		update_mm(mm, current); -		return; -	} - -	/* User mode accesses just cause a SIGSEGV */ -	if (user_mode(regs)) { -		do_sigsegv(regs, SEGV_MAPERR); -		return; -	} - -no_context: -	do_no_context(regs); -} -#endif - -int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) -{ -	struct pt_regs regs; -	int access, fault; - -	/* Emulate a uaccess fault from kernel mode. */ -	regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; -	if (!irqs_disabled()) -		regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; -	regs.psw.addr = (unsigned long) __builtin_return_address(0); -	regs.psw.addr |= PSW_ADDR_AMODE; -	regs.int_code = pgm_int_code; -	regs.int_parm_long = (uaddr & PAGE_MASK) | 2; -	access = write ? VM_WRITE : VM_READ; -	fault = do_exception(®s, access); -	/* -	 * Since the fault happened in kernel mode while performing a uaccess -	 * all we need to do now is emulating a fixup in case "fault" is not -	 * zero. -	 * For the calling uaccess functions this results always in -EFAULT. -	 */ -	return fault ? -EFAULT : 0; -} -  #ifdef CONFIG_PFAULT   /*   * 'pfault' pseudo page faults routines. @@ -669,7 +754,7 @@ static int __init pfault_irq_init(void)  {  	int rc; -	rc = register_external_interrupt(0x2603, pfault_interrupt); +	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);  	if (rc)  		goto out_extint;  	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; @@ -680,7 +765,7 @@ static int __init pfault_irq_init(void)  	return 0;  out_pfault: -	unregister_external_interrupt(0x2603, pfault_interrupt); +	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);  out_extint:  	pfault_disable = 1;  	return rc; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 5d758db27bd..639fce46400 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,  	addr = start;  	len = (unsigned long) nr_pages << PAGE_SHIFT;  	end = start + len; -	if ((end < start) || (end > TASK_SIZE)) +	if ((end <= start) || (end > TASK_SIZE))  		return 0; - +	/* +	 * local_irq_save() doesn't prevent pagetable teardown, but does +	 * prevent the pagetables from being freed on s390. +	 * +	 * So long as we atomically load page table pointers versus teardown, +	 * we can follow the address down to the the page and take a ref on it. +	 */  	local_irq_save(flags);  	pgdp = pgd_offset(mm, addr);  	do { @@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,  			struct page **pages)  {  	struct mm_struct *mm = current->mm; -	unsigned long addr, len, end; -	unsigned long next; -	pgd_t *pgdp, pgd; -	int nr = 0; +	int nr, ret;  	start &= PAGE_MASK; -	addr = start; -	len = (unsigned long) nr_pages << PAGE_SHIFT; -	end = start + len; -	if ((end < start) || (end > TASK_SIZE)) -		goto slow_irqon; - -	/* -	 * local_irq_disable() doesn't prevent pagetable teardown, but does -	 * prevent the pagetables from being freed on s390. -	 * -	 * So long as we atomically load page table pointers versus teardown, -	 * we can follow the address down to the the page and take a ref on it. -	 */ -	local_irq_disable(); -	pgdp = pgd_offset(mm, addr); -	do { -		pgd = *pgdp; -		barrier(); -		next = pgd_addr_end(addr, end); -		if (pgd_none(pgd)) -			goto slow; -		if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) -			goto slow; -	} while (pgdp++, addr = next, addr != end); -	local_irq_enable(); - -	VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); -	return nr; - -	{ -		int ret; -slow: -		local_irq_enable(); -slow_irqon: -		/* Try to get the remaining pages with get_user_pages */ -		start += nr << PAGE_SHIFT; -		pages += nr; - -		down_read(&mm->mmap_sem); -		ret = get_user_pages(current, mm, start, -			(end - start) >> PAGE_SHIFT, write, 0, pages, NULL); -		up_read(&mm->mmap_sem); - -		/* Have to be a bit careful with return values */ -		if (nr > 0) { -			if (ret < 0) -				ret = nr; -			else -				ret += nr; -		} - -		return ret; -	} +	nr = __get_user_pages_fast(start, nr_pages, write, pages); +	if (nr == nr_pages) +		return nr; + +	/* Try to get the remaining pages with get_user_pages */ +	start += nr << PAGE_SHIFT; +	pages += nr; +	down_read(&mm->mmap_sem); +	ret = get_user_pages(current, mm, start, +			     nr_pages - nr, write, 0, pages, NULL); +	up_read(&mm->mmap_sem); +	/* Have to be a bit careful with return values */ +	if (nr > 0) +		ret = (ret < 0) ? nr : ret + nr; +	return ret;  } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index d261c62e40a..0ff66a7e29b 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,  	pmd_t *pmdp = (pmd_t *) ptep;  	pte_t pte = huge_ptep_get(ptep); -	if (MACHINE_HAS_IDTE) -		__pmd_idte(addr, pmdp); -	else -		__pmd_csp(pmdp); +	pmdp_flush_direct(mm, addr, pmdp);  	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;  	return pte;  } @@ -223,11 +220,6 @@ int pud_huge(pud_t pud)  	return 0;  } -int pmd_huge_support(void) -{ -	return 1; -} -  struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,  			     pmd_t *pmdp, int write)  { diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ad446b0c55b..0c1073ed1e8 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -124,8 +124,6 @@ void __init paging_init(void)  	__ctl_load(S390_lowcore.kernel_asce, 13, 13);  	arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); -	atomic_set(&init_mm.context.attach_count, 1); -  	sparse_memory_present_with_active_regions(MAX_NUMNODES);  	sparse_init();  	memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); @@ -136,6 +134,11 @@ void __init paging_init(void)  void __init mem_init(void)  { +	if (MACHINE_HAS_TLB_LC) +		cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); +	cpumask_set_cpu(0, mm_cpumask(&init_mm)); +	atomic_set(&init_mm.context.attach_count, 1); +          max_mapnr = max_low_pfn;          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index d1e0e0c7a7e..2a2e35416d2 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -128,7 +128,7 @@ void memcpy_absolute(void *dest, void *src, size_t count)  /*   * Copy memory from kernel (real) to user (virtual)   */ -int copy_to_user_real(void __user *dest, void *src, size_t count) +int copy_to_user_real(void __user *dest, void *src, unsigned long count)  {  	int offs = 0, size, rc;  	char *buf; @@ -152,32 +152,6 @@ out:  }  /* - * Copy memory from user (virtual) to kernel (real) - */ -int copy_from_user_real(void *dest, void __user *src, size_t count) -{ -	int offs = 0, size, rc; -	char *buf; - -	buf = (char *) __get_free_page(GFP_KERNEL); -	if (!buf) -		return -ENOMEM; -	rc = -EFAULT; -	while (offs < count) { -		size = min(PAGE_SIZE, count - offs); -		if (copy_from_user(buf, src + offs, size)) -			goto out; -		if (memcpy_real(dest + offs, buf, size)) -			goto out; -		offs += size; -	} -	rc = 0; -out: -	free_page((unsigned long) buf); -	return rc; -} - -/*   * Check if physical address is within prefix or zero page   */  static int is_swapped(unsigned long addr) diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index cca388253a3..5535cfe0ee1 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c @@ -6,130 +6,60 @@  #include <linux/kernel.h>  #include <linux/module.h> +#include <linux/memblock.h> +#include <linux/init.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h>  #include <asm/ipl.h>  #include <asm/sclp.h>  #include <asm/setup.h>  #define ADDR2G (1ULL << 31) -static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) +#define CHUNK_READ_WRITE 0 +#define CHUNK_READ_ONLY  1 + +static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size) +{ +	memblock_add_range(&memblock.memory, start, size, 0, 0); +	memblock_add_range(&memblock.physmem, start, size, 0, 0); +} + +void __init detect_memory_memblock(void)  {  	unsigned long long memsize, rnmax, rzm; -	unsigned long addr = 0, size; -	int i = 0, type; +	unsigned long addr, size; +	int type;  	rzm = sclp_get_rzm();  	rnmax = sclp_get_rnmax();  	memsize = rzm * rnmax;  	if (!rzm)  		rzm = 1ULL << 17; -	if (sizeof(long) == 4) { +	if (IS_ENABLED(CONFIG_32BIT)) {  		rzm = min(ADDR2G, rzm); -		memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; +		memsize = min(ADDR2G, memsize);  	} -	if (maxsize) -		memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; +	max_physmem_end = memsize; +	addr = 0; +	/* keep memblock lists close to the kernel */ +	memblock_set_bottom_up(true);  	do {  		size = 0;  		type = tprot(addr);  		do {  			size += rzm; -			if (memsize && addr + size >= memsize) +			if (max_physmem_end && addr + size >= max_physmem_end)  				break;  		} while (type == tprot(addr + size));  		if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { -			if (memsize && (addr + size > memsize)) -				size = memsize - addr; -			chunk[i].addr = addr; -			chunk[i].size = size; -			chunk[i].type = type; -			i++; +			if (max_physmem_end && (addr + size > max_physmem_end)) +				size = max_physmem_end - addr; +			memblock_physmem_add(addr, size);  		}  		addr += size; -	} while (addr < memsize && i < MEMORY_CHUNKS); -} - -/** - * detect_memory_layout - fill mem_chunk array with memory layout data - * @chunk: mem_chunk array to be filled - * @maxsize: maximum address where memory detection should stop - * - * Fills the passed in memory chunk array with the memory layout of the - * machine. The array must have a size of at least MEMORY_CHUNKS and will - * be fully initialized afterwards. - * If the maxsize paramater has a value > 0 memory detection will stop at - * that address. It is guaranteed that all chunks have an ending address - * that is smaller than maxsize. - * If maxsize is 0 all memory will be detected. - */ -void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize) -{ -	unsigned long flags, flags_dat, cr0; - -	memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); -	/* -	 * Disable IRQs, DAT and low address protection so tprot does the -	 * right thing and we don't get scheduled away with low address -	 * protection disabled. -	 */ -	local_irq_save(flags); -	flags_dat = __arch_local_irq_stnsm(0xfb); -	/* -	 * In case DAT was enabled, make sure chunk doesn't reside in vmalloc -	 * space. We have disabled DAT and any access to vmalloc area will -	 * cause an exception. -	 * If DAT was disabled we are called from early ipl code. -	 */ -	if (test_bit(5, &flags_dat)) { -		if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk))) -			goto out; -	} -	__ctl_store(cr0, 0, 0); -	__ctl_clear_bit(0, 28); -	find_memory_chunks(chunk, maxsize); -	__ctl_load(cr0, 0, 0); -out: -	__arch_local_irq_ssm(flags_dat); -	local_irq_restore(flags); -} -EXPORT_SYMBOL(detect_memory_layout); - -/* - * Create memory hole with given address and size. - */ -void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, -		     unsigned long size) -{ -	int i; - -	for (i = 0; i < MEMORY_CHUNKS; i++) { -		struct mem_chunk *chunk = &mem_chunk[i]; - -		if (chunk->size == 0) -			continue; -		if (addr > chunk->addr + chunk->size) -			continue; -		if (addr + size <= chunk->addr) -			continue; -		/* Split */ -		if ((addr > chunk->addr) && -		    (addr + size < chunk->addr + chunk->size)) { -			struct mem_chunk *new = chunk + 1; - -			memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new)); -			new->addr = addr + size; -			new->size = chunk->addr + chunk->size - new->addr; -			chunk->size = addr - chunk->addr; -			continue; -		} else if ((addr <= chunk->addr) && -			   (addr + size >= chunk->addr + chunk->size)) { -			memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk)); -			memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk)); -		} else if (addr + size < chunk->addr + chunk->size) { -			chunk->size =  chunk->addr + chunk->size - addr - size; -			chunk->addr = addr + size; -		} else if (addr > chunk->addr) { -			chunk->size = addr - chunk->addr; -		} -	} +	} while (addr < max_physmem_end); +	memblock_set_bottom_up(false); +	if (!max_physmem_end) +		max_physmem_end = memblock_end_of_DRAM();  } diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 40023290ee5..9b436c21195 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -64,6 +64,11 @@ static unsigned long mmap_rnd(void)  	return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;  } +static unsigned long mmap_base_legacy(void) +{ +	return TASK_UNMAPPED_BASE + mmap_rnd(); +} +  static inline unsigned long mmap_base(void)  {  	unsigned long gap = rlimit(RLIMIT_STACK); @@ -89,7 +94,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)  	 * bit is set, or if the expected stack growth is unlimited:  	 */  	if (mmap_is_legacy()) { -		mm->mmap_base = TASK_UNMAPPED_BASE; +		mm->mmap_base = mmap_base_legacy();  		mm->get_unmapped_area = arch_get_unmapped_area;  	} else {  		mm->mmap_base = mmap_base(); @@ -101,18 +106,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)  int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)  { -	int rc; -  	if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))  		return 0;  	if (!(flags & MAP_FIXED))  		addr = 0; -	if ((addr + len) >= TASK_SIZE) { -		rc = crst_table_upgrade(current->mm, 1UL << 53); -		if (rc) -			return rc; -		update_mm(current->mm, current); -	} +	if ((addr + len) >= TASK_SIZE) +		return crst_table_upgrade(current->mm, 1UL << 53);  	return 0;  } @@ -132,7 +131,6 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,  		rc = crst_table_upgrade(mm, 1UL << 53);  		if (rc)  			return (unsigned long) rc; -		update_mm(mm, current);  		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);  	}  	return area; @@ -155,7 +153,6 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,  		rc = crst_table_upgrade(mm, 1UL << 53);  		if (rc)  			return (unsigned long) rc; -		update_mm(mm, current);  		area = arch_get_unmapped_area_topdown(filp, addr, len,  						      pgoff, flags);  	} @@ -172,7 +169,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)  	 * bit is set, or if the expected stack growth is unlimited:  	 */  	if (mmap_is_legacy()) { -		mm->mmap_base = TASK_UNMAPPED_BASE; +		mm->mmap_base = mmap_base_legacy();  		mm->get_unmapped_area = s390_get_unmapped_area;  	} else {  		mm->mmap_base = mmap_base(); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 990397420e6..8400f494623 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -9,6 +9,7 @@  #include <asm/pgtable.h>  #include <asm/page.h> +#if PAGE_DEFAULT_KEY  static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)  {  	asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" @@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)  	return addr;  } -void storage_key_init_range(unsigned long start, unsigned long end) +void __storage_key_init_range(unsigned long start, unsigned long end)  {  	unsigned long boundary, size; @@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)  		start += PAGE_SIZE;  	}  } +#endif  static pte_t *walk_page_table(unsigned long addr)  { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index de8cbc30dcd..37b8241ec78 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -17,6 +17,7 @@  #include <linux/quicklist.h>  #include <linux/rcupdate.h>  #include <linux/slab.h> +#include <linux/swapops.h>  #include <asm/pgtable.h>  #include <asm/pgalloc.h> @@ -48,12 +49,25 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)  }  #ifdef CONFIG_64BIT +static void __crst_table_upgrade(void *arg) +{ +	struct mm_struct *mm = arg; + +	if (current->active_mm == mm) { +		clear_user_asce(); +		set_user_asce(mm); +	} +	__tlb_flush_local(); +} +  int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)  {  	unsigned long *table, *pgd;  	unsigned long entry; +	int flush;  	BUG_ON(limit > (1UL << 53)); +	flush = 0;  repeat:  	table = crst_table_alloc(mm);  	if (!table) @@ -79,12 +93,15 @@ repeat:  		mm->pgd = (pgd_t *) table;  		mm->task_size = mm->context.asce_limit;  		table = NULL; +		flush = 1;  	}  	spin_unlock_bh(&mm->page_table_lock);  	if (table)  		crst_table_free(mm, table);  	if (mm->context.asce_limit < limit)  		goto repeat; +	if (flush) +		on_each_cpu(__crst_table_upgrade, mm, 0);  	return 0;  } @@ -92,6 +109,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)  {  	pgd_t *pgd; +	if (current->active_mm == mm) { +		clear_user_asce(); +		__tlb_flush_mm(mm); +	}  	while (mm->context.asce_limit > limit) {  		pgd = mm->pgd;  		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { @@ -114,6 +135,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)  		mm->task_size = mm->context.asce_limit;  		crst_table_free(mm, (unsigned long *) pgd);  	} +	if (current->active_mm == mm) +		set_user_asce(mm);  }  #endif @@ -179,7 +202,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)  static void gmap_flush_tlb(struct gmap *gmap)  {  	if (MACHINE_HAS_IDTE) -		__tlb_flush_idte((unsigned long) gmap->table | +		__tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |  				 _ASCE_TYPE_REGION1);  	else  		__tlb_flush_global(); @@ -198,7 +221,7 @@ void gmap_free(struct gmap *gmap)  	/* Flush tlb. */  	if (MACHINE_HAS_IDTE) -		__tlb_flush_idte((unsigned long) gmap->table | +		__tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |  				 _ASCE_TYPE_REGION1);  	else  		__tlb_flush_global(); @@ -275,7 +298,7 @@ static int gmap_alloc_table(struct gmap *gmap,   * @addr: address in the guest address space   * @len: length of the memory area to unmap   * - * Returns 0 if the unmap succeded, -EINVAL if not. + * Returns 0 if the unmap succeeded, -EINVAL if not.   */  int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)  { @@ -326,7 +349,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);   * @from: source address in the parent address space   * @to: target address in the guest address space   * - * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.   */  int gmap_map_segment(struct gmap *gmap, unsigned long from,  		     unsigned long to, unsigned long len) @@ -486,6 +509,9 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,  	if (!pmd_present(*pmd) &&  	    __pte_alloc(mm, vma, pmd, vmaddr))  		return -ENOMEM; +	/* large pmds cannot yet be handled */ +	if (pmd_large(*pmd)) +		return -EFAULT;  	/* pmd now points to a valid segment table entry. */  	rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);  	if (!rmap) @@ -576,6 +602,82 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)  }  EXPORT_SYMBOL_GPL(gmap_fault); +static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) +{ +	if (!non_swap_entry(entry)) +		dec_mm_counter(mm, MM_SWAPENTS); +	else if (is_migration_entry(entry)) { +		struct page *page = migration_entry_to_page(entry); + +		if (PageAnon(page)) +			dec_mm_counter(mm, MM_ANONPAGES); +		else +			dec_mm_counter(mm, MM_FILEPAGES); +	} +	free_swap_and_cache(entry); +} + +/** + * The mm->mmap_sem lock must be held + */ +static void gmap_zap_unused(struct mm_struct *mm, unsigned long address) +{ +	unsigned long ptev, pgstev; +	spinlock_t *ptl; +	pgste_t pgste; +	pte_t *ptep, pte; + +	ptep = get_locked_pte(mm, address, &ptl); +	if (unlikely(!ptep)) +		return; +	pte = *ptep; +	if (!pte_swap(pte)) +		goto out_pte; +	/* Zap unused and logically-zero pages */ +	pgste = pgste_get_lock(ptep); +	pgstev = pgste_val(pgste); +	ptev = pte_val(pte); +	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || +	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) { +		gmap_zap_swap_entry(pte_to_swp_entry(pte), mm); +		pte_clear(mm, address, ptep); +	} +	pgste_set_unlock(ptep, pgste); +out_pte: +	pte_unmap_unlock(*ptep, ptl); +} + +/* + * this function is assumed to be called with mmap_sem held + */ +void __gmap_zap(unsigned long address, struct gmap *gmap) +{ +	unsigned long *table, *segment_ptr; +	unsigned long segment, pgstev, ptev; +	struct gmap_pgtable *mp; +	struct page *page; + +	segment_ptr = gmap_table_walk(address, gmap); +	if (IS_ERR(segment_ptr)) +		return; +	segment = *segment_ptr; +	if (segment & _SEGMENT_ENTRY_INVALID) +		return; +	page = pfn_to_page(segment >> PAGE_SHIFT); +	mp = (struct gmap_pgtable *) page->index; +	address = mp->vmaddr | (address & ~PMD_MASK); +	/* Page table is present */ +	table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN); +	table = table + ((address >> 12) & 0xff); +	pgstev = table[PTRS_PER_PTE]; +	ptev = table[0]; +	/* quick check, checked again with locks held */ +	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || +	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) +		gmap_zap_unused(gmap->mm, address); +} +EXPORT_SYMBOL_GPL(__gmap_zap); +  void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)  { @@ -653,7 +755,7 @@ EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);  /**   * gmap_ipte_notify - mark a range of ptes for invalidation notification   * @gmap: pointer to guest mapping meta data structure - * @address: virtual address in the guest address space + * @start: virtual address in the guest address space   * @len: size of area   *   * Returns 0 if for each page in the given range a gmap mapping exists and @@ -707,13 +809,12 @@ EXPORT_SYMBOL_GPL(gmap_ipte_notify);  /**   * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.   * @mm: pointer to the process mm_struct - * @addr: virtual address in the process address space   * @pte: pointer to the page table entry   *   * This function is assumed to be called with the page table lock held   * for the pte to notify.   */ -void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) +void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)  {  	unsigned long segment_offset;  	struct gmap_notifier *nb; @@ -733,6 +834,7 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)  	}  	spin_unlock(&gmap_notifier_lock);  } +EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);  static inline int page_table_with_pgste(struct page *page)  { @@ -754,15 +856,18 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,  		__free_page(page);  		return NULL;  	} -	pgtable_page_ctor(page); +	if (!pgtable_page_ctor(page)) { +		kfree(mp); +		__free_page(page); +		return NULL; +	}  	mp->vmaddr = vmaddr & PMD_MASK;  	INIT_LIST_HEAD(&mp->mapper);  	page->index = (unsigned long) mp;  	atomic_set(&page->_mapcount, 0);  	table = (unsigned long *) page_to_phys(page);  	clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); -	clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, -		    PAGE_SIZE/2); +	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);  	return table;  } @@ -780,6 +885,99 @@ static inline void page_table_free_pgste(unsigned long *table)  	__free_page(page);  } +static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd, +			unsigned long addr, unsigned long end, bool init_skey) +{ +	pte_t *start_pte, *pte; +	spinlock_t *ptl; +	pgste_t pgste; + +	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); +	pte = start_pte; +	do { +		pgste = pgste_get_lock(pte); +		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; +		if (init_skey) { +			unsigned long address; + +			pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | +					      PGSTE_GR_BIT | PGSTE_GC_BIT); + +			/* skip invalid and not writable pages */ +			if (pte_val(*pte) & _PAGE_INVALID || +			    !(pte_val(*pte) & _PAGE_WRITE)) { +				pgste_set_unlock(pte, pgste); +				continue; +			} + +			address = pte_val(*pte) & PAGE_MASK; +			page_set_storage_key(address, PAGE_DEFAULT_KEY, 1); +		} +		pgste_set_unlock(pte, pgste); +	} while (pte++, addr += PAGE_SIZE, addr != end); +	pte_unmap_unlock(start_pte, ptl); + +	return addr; +} + +static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud, +			unsigned long addr, unsigned long end, bool init_skey) +{ +	unsigned long next; +	pmd_t *pmd; + +	pmd = pmd_offset(pud, addr); +	do { +		next = pmd_addr_end(addr, end); +		if (pmd_none_or_clear_bad(pmd)) +			continue; +		next = page_table_reset_pte(mm, pmd, addr, next, init_skey); +	} while (pmd++, addr = next, addr != end); + +	return addr; +} + +static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd, +			unsigned long addr, unsigned long end, bool init_skey) +{ +	unsigned long next; +	pud_t *pud; + +	pud = pud_offset(pgd, addr); +	do { +		next = pud_addr_end(addr, end); +		if (pud_none_or_clear_bad(pud)) +			continue; +		next = page_table_reset_pmd(mm, pud, addr, next, init_skey); +	} while (pud++, addr = next, addr != end); + +	return addr; +} + +void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, +			    unsigned long end, bool init_skey) +{ +	unsigned long addr, next; +	pgd_t *pgd; + +	down_write(&mm->mmap_sem); +	if (init_skey && mm_use_skey(mm)) +		goto out_up; +	addr = start; +	pgd = pgd_offset(mm, addr); +	do { +		next = pgd_addr_end(addr, end); +		if (pgd_none_or_clear_bad(pgd)) +			continue; +		next = page_table_reset_pud(mm, pgd, addr, next, init_skey); +	} while (pgd++, addr = next, addr != end); +	if (init_skey) +		current->mm->context.use_skey = 1; +out_up: +	up_write(&mm->mmap_sem); +} +EXPORT_SYMBOL(page_table_reset_pgste); +  int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,  			  unsigned long key, bool nq)  { @@ -814,7 +1012,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,  	/* changing the guest storage key is considered a change of the page */  	if ((pgste_val(new) ^ pgste_val(old)) &  	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) -		pgste_val(new) |= PGSTE_HC_BIT; +		pgste_val(new) |= PGSTE_UC_BIT;  	pgste_set_unlock(ptep, new);  	pte_unmap_unlock(*ptep, ptl); @@ -836,6 +1034,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,  	return NULL;  } +void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, +			    unsigned long end, bool init_skey) +{ +} +  static inline void page_table_free_pgste(unsigned long *table)  {  } @@ -884,7 +1087,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)  		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);  		if (!page)  			return NULL; -		pgtable_page_ctor(page); +		if (!pgtable_page_ctor(page)) { +			__free_page(page); +			return NULL; +		}  		atomic_set(&page->_mapcount, 1);  		table = (unsigned long *) page_to_phys(page);  		clear_table(table, _PAGE_INVALID, PAGE_SIZE); @@ -1087,10 +1293,9 @@ again:  			continue;  		/* Allocate new page table with pgstes */  		new = page_table_alloc_pgste(mm, addr); -		if (!new) { -			mm->context.has_pgste = 0; -			continue; -		} +		if (!new) +			return -ENOMEM; +  		spin_lock(&mm->page_table_lock);  		if (likely((unsigned long *) pmd_deref(*pmd) == table)) {  			/* Nuke pmd entry pointing to the "short" page table */ @@ -1128,13 +1333,15 @@ static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,  		if (pud_none_or_clear_bad(pud))  			continue;  		next = page_table_realloc_pmd(tlb, mm, pud, addr, next); +		if (unlikely(IS_ERR_VALUE(next))) +			return next;  	} while (pud++, addr = next, addr != end);  	return addr;  } -static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, -			       unsigned long addr, unsigned long end) +static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, +					unsigned long addr, unsigned long end)  {  	unsigned long next;  	pgd_t *pgd; @@ -1145,7 +1352,11 @@ static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,  		if (pgd_none_or_clear_bad(pgd))  			continue;  		next = page_table_realloc_pud(tlb, mm, pgd, addr, next); +		if (unlikely(IS_ERR_VALUE(next))) +			return next;  	} while (pgd++, addr = next, addr != end); + +	return 0;  }  /* @@ -1157,10 +1368,6 @@ int s390_enable_sie(void)  	struct mm_struct *mm = tsk->mm;  	struct mmu_gather tlb; -	/* Do we have switched amode? If no, we cannot do sie */ -	if (s390_user_mode == HOME_SPACE_MODE) -		return -EINVAL; -  	/* Do we have pgstes? if yes, we are done */  	if (mm_has_pgste(tsk->mm))  		return 0; @@ -1169,15 +1376,46 @@ int s390_enable_sie(void)  	/* split thp mappings and disable thp for future mappings */  	thp_split_mm(mm);  	/* Reallocate the page tables with pgstes */ -	mm->context.has_pgste = 1;  	tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); -	page_table_realloc(&tlb, mm, 0, TASK_SIZE); +	if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE)) +		mm->context.has_pgste = 1;  	tlb_finish_mmu(&tlb, 0, TASK_SIZE);  	up_write(&mm->mmap_sem);  	return mm->context.has_pgste ? 0 : -ENOMEM;  }  EXPORT_SYMBOL_GPL(s390_enable_sie); +/* + * Enable storage key handling from now on and initialize the storage + * keys with the default key. + */ +void s390_enable_skey(void) +{ +	page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); +} +EXPORT_SYMBOL_GPL(s390_enable_skey); + +/* + * Test and reset if a guest page is dirty + */ +bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) +{ +	pte_t *pte; +	spinlock_t *ptl; +	bool dirty = false; + +	pte = get_locked_pte(gmap->mm, address, &ptl); +	if (unlikely(!pte)) +		return false; + +	if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) +		dirty = true; + +	spin_unlock(ptl); +	return dirty; +} +EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty); +  #ifdef CONFIG_TRANSPARENT_HUGEPAGE  int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,  			   pmd_t *pmdp) @@ -1222,14 +1460,14 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,  {  	struct list_head *lh = (struct list_head *) pgtable; -	assert_spin_locked(&mm->page_table_lock); +	assert_spin_locked(pmd_lockptr(mm, pmdp));  	/* FIFO */ -	if (!mm->pmd_huge_pte) +	if (!pmd_huge_pte(mm, pmdp))  		INIT_LIST_HEAD(lh);  	else -		list_add(lh, (struct list_head *) mm->pmd_huge_pte); -	mm->pmd_huge_pte = pgtable; +		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); +	pmd_huge_pte(mm, pmdp) = pgtable;  }  pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) @@ -1238,15 +1476,15 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)  	pgtable_t pgtable;  	pte_t *ptep; -	assert_spin_locked(&mm->page_table_lock); +	assert_spin_locked(pmd_lockptr(mm, pmdp));  	/* FIFO */ -	pgtable = mm->pmd_huge_pte; +	pgtable = pmd_huge_pte(mm, pmdp);  	lh = (struct list_head *) pgtable;  	if (list_empty(lh)) -		mm->pmd_huge_pte = NULL; +		pmd_huge_pte(mm, pmdp) = NULL;  	else { -		mm->pmd_huge_pte = (pgtable_t) lh->next; +		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;  		list_del(lh);  	}  	ptep = (pte_t *) pgtable; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index bcfb70b60be..fe9012a49aa 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -10,6 +10,7 @@  #include <linux/list.h>  #include <linux/hugetlb.h>  #include <linux/slab.h> +#include <linux/memblock.h>  #include <asm/pgalloc.h>  #include <asm/pgtable.h>  #include <asm/setup.h> @@ -66,7 +67,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)  	if (slab_is_available())  		pte = (pte_t *) page_table_alloc(&init_mm, address);  	else -		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); +		pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), +					  PTRS_PER_PTE * sizeof(pte_t));  	if (!pte)  		return NULL;  	clear_table((unsigned long *) pte, _PAGE_INVALID, @@ -138,7 +140,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)  	}  	ret = 0;  out: -	flush_tlb_kernel_range(start, end);  	return ret;  } @@ -265,7 +266,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)  	memset((void *)start, 0, end - start);  	ret = 0;  out: -	flush_tlb_kernel_range(start, end);  	return ret;  } @@ -373,16 +373,14 @@ out:  void __init vmem_map_init(void)  {  	unsigned long ro_start, ro_end; -	unsigned long start, end; -	int i; +	struct memblock_region *reg; +	phys_addr_t start, end;  	ro_start = PFN_ALIGN((unsigned long)&_stext);  	ro_end = (unsigned long)&_eshared & PAGE_MASK; -	for (i = 0; i < MEMORY_CHUNKS; i++) { -		if (!memory_chunk[i].size) -			continue; -		start = memory_chunk[i].addr; -		end = memory_chunk[i].addr + memory_chunk[i].size; +	for_each_memblock(memory, reg) { +		start = reg->base; +		end = reg->base + reg->size - 1;  		if (start >= ro_end || end <= ro_start)  			vmem_add_mem(start, end - start, 0);  		else if (start >= ro_start && end <= ro_end) @@ -402,23 +400,21 @@ void __init vmem_map_init(void)  }  /* - * Convert memory chunk array to a memory segment list so there is a single - * list that contains both r/w memory and shared memory segments. + * Convert memblock.memory  to a memory segment list so there is a single + * list that contains all memory segments.   */  static int __init vmem_convert_memory_chunk(void)  { +	struct memblock_region *reg;  	struct memory_segment *seg; -	int i;  	mutex_lock(&vmem_mutex); -	for (i = 0; i < MEMORY_CHUNKS; i++) { -		if (!memory_chunk[i].size) -			continue; +	for_each_memblock(memory, reg) {  		seg = kzalloc(sizeof(*seg), GFP_KERNEL);  		if (!seg)  			panic("Out of memory...\n"); -		seg->start = memory_chunk[i].addr; -		seg->size = memory_chunk[i].size; +		seg->start = reg->base; +		seg->size = reg->size;  		insert_memory_segment(seg);  	}  	mutex_unlock(&vmem_mutex);  | 
