diff options
Diffstat (limited to 'arch/s390/kernel/vdso.c')
| -rw-r--r-- | arch/s390/kernel/vdso.c | 64 | 
1 files changed, 21 insertions, 43 deletions
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index e3150dd2fe7..61364909678 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -25,12 +25,12 @@  #include <linux/compat.h>  #include <asm/asm-offsets.h>  #include <asm/pgtable.h> -#include <asm/system.h>  #include <asm/processor.h>  #include <asm/mmu.h>  #include <asm/mmu_context.h>  #include <asm/sections.h>  #include <asm/vdso.h> +#include <asm/facility.h>  #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)  extern char vdso32_start, vdso32_end; @@ -63,7 +63,7 @@ static int __init vdso_setup(char *s)  	else if (strncmp(s, "off", 4) == 0)  		vdso_enabled = 0;  	else { -		rc = strict_strtoul(s, 0, &val); +		rc = kstrtoul(s, 0, &val);  		vdso_enabled = rc ? 0 : !!val;  	}  	return !rc; @@ -84,23 +84,16 @@ struct vdso_data *vdso_data = &vdso_data_store.data;   */  static void vdso_init_data(struct vdso_data *vd)  { -	vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); +	vd->ectg_available = test_facility(31);  }  #ifdef CONFIG_64BIT  /* - * Setup per cpu vdso data page. - */ -static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd) -{ -} - -/*   * Allocate/free per cpu vdso data.   */  #define SEGMENT_ORDER	2 -int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) +int vdso_alloc_per_cpu(struct _lowcore *lowcore)  {  	unsigned long segment_table, page_table, page_frame;  	u32 *psal, *aste; @@ -108,7 +101,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)  	lowcore->vdso_per_cpu_data = __LC_PASTE; -	if (user_mode == HOME_SPACE_MODE || !vdso_enabled) +	if (!vdso_enabled)  		return 0;  	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); @@ -119,11 +112,11 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)  	clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,  		    PAGE_SIZE << SEGMENT_ORDER); -	clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, +	clear_table((unsigned long *) page_table, _PAGE_INVALID,  		    256*sizeof(unsigned long));  	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; -	*(unsigned long *) page_table = _PAGE_RO + page_frame; +	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;  	psal = (u32 *) (page_table + 256*sizeof(unsigned long));  	aste = psal + 32; @@ -132,14 +125,13 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)  		psal[i] = 0x80000000;  	lowcore->paste[4] = (u32)(addr_t) psal; -	psal[0] = 0x20000000; +	psal[0] = 0x02000000;  	psal[2] = (u32)(addr_t) aste;  	*(unsigned long *) (aste + 2) = segment_table +  		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;  	aste[4] = (u32)(addr_t) psal;  	lowcore->vdso_per_cpu_data = page_frame; -	vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);  	return 0;  out: @@ -149,12 +141,12 @@ out:  	return -ENOMEM;  } -void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) +void vdso_free_per_cpu(struct _lowcore *lowcore)  {  	unsigned long segment_table, page_table, page_frame;  	u32 *psal, *aste; -	if (user_mode == HOME_SPACE_MODE || !vdso_enabled) +	if (!vdso_enabled)  		return;  	psal = (u32 *)(addr_t) lowcore->paste[4]; @@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)  	free_pages(segment_table, SEGMENT_ORDER);  } -static void __vdso_init_cr5(void *dummy) +static void vdso_init_cr5(void)  {  	unsigned long cr5; +	if (!vdso_enabled) +		return;  	cr5 = offsetof(struct _lowcore, paste);  	__ctl_load(cr5, 5, 5);  } - -static void vdso_init_cr5(void) -{ -	if (user_mode != HOME_SPACE_MODE && vdso_enabled) -		on_each_cpu(__vdso_init_cr5, NULL, 1); -}  #endif /* CONFIG_64BIT */  /* @@ -203,7 +191,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)  	if (!uses_interp)  		return 0; -	vdso_base = mm->mmap_base;  #ifdef CONFIG_64BIT  	vdso_pagelist = vdso64_pagelist;  	vdso_pages = vdso64_pages; @@ -233,8 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)  	 * fail and end up putting it elsewhere.  	 */  	down_write(&mm->mmap_sem); -	vdso_base = get_unmapped_area(NULL, vdso_base, -				      vdso_pages << PAGE_SHIFT, 0, 0); +	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);  	if (IS_ERR_VALUE(vdso_base)) {  		rc = vdso_base;  		goto out_up; @@ -255,17 +241,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)  	 * on the "data" page of the vDSO or you'll stop getting kernel  	 * updates and your nice userland gettimeofday will be totally dead.  	 * It's fine to use that for setting breakpoints in the vDSO code -	 * pages though -	 * -	 * Make sure the vDSO gets into every core dump. -	 * Dumping its contents makes post-mortem fully interpretable later -	 * without matching up the same kernel and hardware config to see -	 * what PC values meant. +	 * pages though.  	 */  	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,  				     VM_READ|VM_EXEC| -				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| -				     VM_ALWAYSDUMP, +				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,  				     vdso_pagelist);  	if (rc)  		current->mm->context.vdso_base = 0; @@ -324,10 +304,8 @@ static int __init vdso_init(void)  	}  	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);  	vdso64_pagelist[vdso64_pages] = NULL; -#ifndef CONFIG_SMP -	if (vdso_alloc_per_cpu(0, &S390_lowcore)) +	if (vdso_alloc_per_cpu(&S390_lowcore))  		BUG(); -#endif  	vdso_init_cr5();  #endif /* CONFIG_64BIT */ @@ -337,19 +315,19 @@ static int __init vdso_init(void)  	return 0;  } -arch_initcall(vdso_init); +early_initcall(vdso_init); -int in_gate_area_no_task(unsigned long addr) +int in_gate_area_no_mm(unsigned long addr)  {  	return 0;  } -int in_gate_area(struct task_struct *task, unsigned long addr) +int in_gate_area(struct mm_struct *mm, unsigned long addr)  {  	return 0;  } -struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +struct vm_area_struct *get_gate_vma(struct mm_struct *mm)  {  	return NULL;  }  | 
