diff options
Diffstat (limited to 'arch/s390/kernel')
79 files changed, 9129 insertions, 7087 deletions
diff --git a/arch/s390/kernel/.gitignore b/arch/s390/kernel/.gitignore new file mode 100644 index 00000000000..c5f676c3c22 --- /dev/null +++ b/arch/s390/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 7d9ec924e7e..a95c4ca9961 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -14,34 +14,41 @@ endif CFLAGS_smp.o := -Wno-nonnull # +# Disable tailcall optimizations for stack / callchain walking functions +# since this might generate broken code when accessing register 15 and +# passing its content to other functions. +# +CFLAGS_stacktrace.o += -fno-optimize-sibling-calls +CFLAGS_dumpstack.o += -fno-optimize-sibling-calls + +# # Pass UTS_MACHINE for user_regset definition # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w -obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ - processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ - debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ - sysinfo.o jump_label.o +obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o +obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o +obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o +obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += dumpstack.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) +obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) -extra-y += head.o init_task.o vmlinux.lds +extra-y += head.o vmlinux.lds extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_BOOK) += topology.o -obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ - switch_cpu.o) obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o -obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ - compat_wrapper.o compat_exec_domain.o \ - $(compat-obj-y) +obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o +obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y) obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o @@ -51,10 +58,11 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -# Kexec part -S390_KEXEC_OBJS := machine_kexec.o crash.o -S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) -obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) +ifdef CONFIG_64BIT +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ + perf_cpum_cf_events.o +obj-y += runtime_instr.o cache.o +endif # vdso obj-$(CONFIG_64BIT) += vdso64/ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 6e6a72e66d6..afe1715a4eb 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -7,9 +7,10 @@ #define ASM_OFFSETS_C #include <linux/kbuild.h> +#include <linux/kvm_host.h> #include <linux/sched.h> +#include <asm/cputime.h> #include <asm/vdso.h> -#include <asm/sigp.h> #include <asm/pgtable.h> /* @@ -35,6 +36,7 @@ int main(void) DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); + DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); @@ -46,7 +48,9 @@ int main(void) DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); + DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); + DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); @@ -62,56 +66,63 @@ int main(void) DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); - DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); + DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); + DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); /* constants used by the vdso */ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); + DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); BLANK(); - /* constants for SIGP */ - DEFINE(__SIGP_STOP, sigp_stop); - DEFINE(__SIGP_RESTART, sigp_restart); - DEFINE(__SIGP_SENSE, sigp_sense); - DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); - BLANK(); + /* idle data offsets */ + DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); + DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); + DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); + DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); /* lowcore offsets */ DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); - DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr)); + DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); - DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid)); + DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); + DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); + DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); - DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id)); - DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); + DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); + DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); + DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); + DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); + DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); - DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); - BLANK(); - DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); + DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); + DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); + BLANK(); DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); + DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); @@ -129,12 +140,18 @@ int main(void) DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); + DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); + DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); + DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); + DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); + DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); - DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); + DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); + BLANK(); DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); @@ -146,6 +163,8 @@ int main(void) #ifdef CONFIG_32BIT DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); #else /* CONFIG_32BIT */ + DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); + DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); @@ -153,7 +172,11 @@ int main(void) DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); + DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); + DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); + DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); + DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); #endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 3aa4d00aaf5..797a823a227 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -1,7 +1,7 @@ /* * arch/s390/kernel/base.S * - * Copyright IBM Corp. 2006,2007 + * Copyright IBM Corp. 2006, 2007 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> */ @@ -9,6 +9,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/sigp.h> #ifdef CONFIG_64BIT @@ -88,6 +89,9 @@ ENTRY(diag308_reset) stctg %c0,%c15,0(%r4) larl %r4,.Lfpctl # Floating point control register stfpc 0(%r4) + larl %r4,.Lcontinue_psw # Save PSW flags + epsw %r2,%r3 + stm %r2,%r3,0(%r4) larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 lghi %r3,0 lg %r4,0(%r4) # Save PSW @@ -97,17 +101,26 @@ ENTRY(diag308_reset) .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) - sigp %r1,%r0,0x12 # Switch to ESAME mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode sam64 # Switch to 64 bit addressing mode larl %r4,.Lctlregs # Restore control registers lctlg %c0,%c15,0(%r4) larl %r4,.Lfpctl # Restore floating point ctl register lfpc 0(%r4) + larl %r4,.Lcontinue_psw # Restore PSW flags + lpswe 0(%r4) +.Lcontinue: br %r14 .align 16 .Lrestart_psw: .long 0x00080000,0x80000000 + .Lrestart_part2 + .section .data..nosave,"aw",@progbits +.align 8 +.Lcontinue_psw: + .quad 0,.Lcontinue + .previous + .section .bss .align 8 .Lctlregs: diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c deleted file mode 100644 index 3ae4757b006..00000000000 --- a/arch/s390/kernel/bitmap.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... - * See include/asm/{bitops.h|posix_types.h} for details - * - * Copyright IBM Corp. 1999,2009 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, - */ - -#include <linux/bitops.h> -#include <linux/module.h> - -const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; -EXPORT_SYMBOL(_oi_bitmap); - -const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; -EXPORT_SYMBOL(_ni_bitmap); - -const char _zb_findmap[] = { - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, - 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; -EXPORT_SYMBOL(_zb_findmap); - -const char _sb_findmap[] = { - 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; -EXPORT_SYMBOL(_sb_findmap); diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c new file mode 100644 index 00000000000..c0b03c28d15 --- /dev/null +++ b/arch/s390/kernel/cache.c @@ -0,0 +1,389 @@ +/* + * Extract CPU cache information and expose them via sysfs. + * + * Copyright IBM Corp. 2012 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + */ + +#include <linux/notifier.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/cpu.h> +#include <asm/facility.h> + +struct cache { + unsigned long size; + unsigned int line_size; + unsigned int associativity; + unsigned int nr_sets; + unsigned int level : 3; + unsigned int type : 2; + unsigned int private : 1; + struct list_head list; +}; + +struct cache_dir { + struct kobject *kobj; + struct cache_index_dir *index; +}; + +struct cache_index_dir { + struct kobject kobj; + int cpu; + struct cache *cache; + struct cache_index_dir *next; +}; + +enum { + CACHE_SCOPE_NOTEXISTS, + CACHE_SCOPE_PRIVATE, + CACHE_SCOPE_SHARED, + CACHE_SCOPE_RESERVED, +}; + +enum { + CACHE_TYPE_SEPARATE, + CACHE_TYPE_DATA, + CACHE_TYPE_INSTRUCTION, + CACHE_TYPE_UNIFIED, +}; + +enum { + EXTRACT_TOPOLOGY, + EXTRACT_LINE_SIZE, + EXTRACT_SIZE, + EXTRACT_ASSOCIATIVITY, +}; + +enum { + CACHE_TI_UNIFIED = 0, + CACHE_TI_DATA = 0, + CACHE_TI_INSTRUCTION, +}; + +struct cache_info { + unsigned char : 4; + unsigned char scope : 2; + unsigned char type : 2; +}; + +#define CACHE_MAX_LEVEL 8 + +union cache_topology { + struct cache_info ci[CACHE_MAX_LEVEL]; + unsigned long long raw; +}; + +static const char * const cache_type_string[] = { + "Data", + "Instruction", + "Unified", +}; + +static struct cache_dir *cache_dir_cpu[NR_CPUS]; +static LIST_HEAD(cache_list); + +void show_cacheinfo(struct seq_file *m) +{ + struct cache *cache; + int index = 0; + + list_for_each_entry(cache, &cache_list, list) { + seq_printf(m, "cache%-11d: ", index); + seq_printf(m, "level=%d ", cache->level); + seq_printf(m, "type=%s ", cache_type_string[cache->type]); + seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); + seq_printf(m, "size=%luK ", cache->size >> 10); + seq_printf(m, "line_size=%u ", cache->line_size); + seq_printf(m, "associativity=%d", cache->associativity); + seq_puts(m, "\n"); + index++; + } +} + +static inline unsigned long ecag(int ai, int li, int ti) +{ + unsigned long cmd, val; + + cmd = ai << 4 | li << 1 | ti; + asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */ + : "=d" (val) : "a" (cmd)); + return val; +} + +static int __init cache_add(int level, int private, int type) +{ + struct cache *cache; + int ti; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (!cache) + return -ENOMEM; + if (type == CACHE_TYPE_INSTRUCTION) + ti = CACHE_TI_INSTRUCTION; + else + ti = CACHE_TI_UNIFIED; + cache->size = ecag(EXTRACT_SIZE, level, ti); + cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); + cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); + cache->nr_sets = cache->size / cache->associativity; + cache->nr_sets /= cache->line_size; + cache->private = private; + cache->level = level + 1; + cache->type = type - 1; + list_add_tail(&cache->list, &cache_list); + return 0; +} + +static void __init cache_build_info(void) +{ + struct cache *cache, *next; + union cache_topology ct; + int level, private, rc; + + ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); + for (level = 0; level < CACHE_MAX_LEVEL; level++) { + switch (ct.ci[level].scope) { + case CACHE_SCOPE_SHARED: + private = 0; + break; + case CACHE_SCOPE_PRIVATE: + private = 1; + break; + default: + return; + } + if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { + rc = cache_add(level, private, CACHE_TYPE_DATA); + rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION); + } else { + rc = cache_add(level, private, ct.ci[level].type); + } + if (rc) + goto error; + } + return; +error: + list_for_each_entry_safe(cache, next, &cache_list, list) { + list_del(&cache->list); + kfree(cache); + } +} + +static struct cache_dir *cache_create_cache_dir(int cpu) +{ + struct cache_dir *cache_dir; + struct kobject *kobj = NULL; + struct device *dev; + + dev = get_cpu_device(cpu); + if (!dev) + goto out; + kobj = kobject_create_and_add("cache", &dev->kobj); + if (!kobj) + goto out; + cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); + if (!cache_dir) + goto out; + cache_dir->kobj = kobj; + cache_dir_cpu[cpu] = cache_dir; + return cache_dir; +out: + kobject_put(kobj); + return NULL; +} + +static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj) +{ + return container_of(kobj, struct cache_index_dir, kobj); +} + +static void cache_index_release(struct kobject *kobj) +{ + struct cache_index_dir *index; + + index = kobj_to_cache_index_dir(kobj); + kfree(index); +} + +static ssize_t cache_index_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kobj_attribute *kobj_attr; + + kobj_attr = container_of(attr, struct kobj_attribute, attr); + return kobj_attr->show(kobj, kobj_attr, buf); +} + +#define DEFINE_CACHE_ATTR(_name, _format, _value) \ +static ssize_t cache_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *buf) \ +{ \ + struct cache_index_dir *index; \ + \ + index = kobj_to_cache_index_dir(kobj); \ + return sprintf(buf, _format, _value); \ +} \ +static struct kobj_attribute cache_##_name##_attr = \ + __ATTR(_name, 0444, cache_##_name##_show, NULL); + +DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); +DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); +DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); +DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); +DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); +DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); + +static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) +{ + struct cache_index_dir *index; + int len; + + index = kobj_to_cache_index_dir(kobj); + len = type ? + cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) : + cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)); + len += sprintf(&buf[len], "\n"); + return len; +} + +static ssize_t shared_cpu_map_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return shared_cpu_map_func(kobj, 0, buf); +} +static struct kobj_attribute cache_shared_cpu_map_attr = + __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); + +static ssize_t shared_cpu_list_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return shared_cpu_map_func(kobj, 1, buf); +} +static struct kobj_attribute cache_shared_cpu_list_attr = + __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); + +static struct attribute *cache_index_default_attrs[] = { + &cache_type_attr.attr, + &cache_size_attr.attr, + &cache_number_of_sets_attr.attr, + &cache_ways_of_associativity_attr.attr, + &cache_level_attr.attr, + &cache_coherency_line_size_attr.attr, + &cache_shared_cpu_map_attr.attr, + &cache_shared_cpu_list_attr.attr, + NULL, +}; + +static const struct sysfs_ops cache_index_ops = { + .show = cache_index_show, +}; + +static struct kobj_type cache_index_type = { + .sysfs_ops = &cache_index_ops, + .release = cache_index_release, + .default_attrs = cache_index_default_attrs, +}; + +static int cache_create_index_dir(struct cache_dir *cache_dir, + struct cache *cache, int index, int cpu) +{ + struct cache_index_dir *index_dir; + int rc; + + index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); + if (!index_dir) + return -ENOMEM; + index_dir->cache = cache; + index_dir->cpu = cpu; + rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, + cache_dir->kobj, "index%d", index); + if (rc) + goto out; + index_dir->next = cache_dir->index; + cache_dir->index = index_dir; + return 0; +out: + kfree(index_dir); + return rc; +} + +static int cache_add_cpu(int cpu) +{ + struct cache_dir *cache_dir; + struct cache *cache; + int rc, index = 0; + + if (list_empty(&cache_list)) + return 0; + cache_dir = cache_create_cache_dir(cpu); + if (!cache_dir) + return -ENOMEM; + list_for_each_entry(cache, &cache_list, list) { + if (!cache->private) + break; + rc = cache_create_index_dir(cache_dir, cache, index, cpu); + if (rc) + return rc; + index++; + } + return 0; +} + +static void cache_remove_cpu(int cpu) +{ + struct cache_index_dir *index, *next; + struct cache_dir *cache_dir; + + cache_dir = cache_dir_cpu[cpu]; + if (!cache_dir) + return; + index = cache_dir->index; + while (index) { + next = index->next; + kobject_put(&index->kobj); + index = next; + } + kobject_put(cache_dir->kobj); + kfree(cache_dir); + cache_dir_cpu[cpu] = NULL; +} + +static int cache_hotplug(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + int cpu = (long)hcpu; + int rc = 0; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + rc = cache_add_cpu(cpu); + if (rc) + cache_remove_cpu(cpu); + break; + case CPU_DEAD: + cache_remove_cpu(cpu); + break; + } + return rc ? NOTIFY_BAD : NOTIFY_OK; +} + +static int __init cache_init(void) +{ + int cpu; + + if (!test_facility(34)) + return 0; + cache_build_info(); + + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) + cache_add_cpu(cpu); + __hotcpu_notifier(cache_hotplug, 0); + cpu_notifier_register_done(); + return 0; +} +device_initcall(cache_init); diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c deleted file mode 100644 index 914d49444f9..00000000000 --- a/arch/s390/kernel/compat_exec_domain.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for 32-bit Linux for S390 personality. - * - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Gerhard Tonn (ton@de.ibm.com) - * - * - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/personality.h> -#include <linux/sched.h> - -static struct exec_domain s390_exec_domain; - -static int __init s390_init (void) -{ - s390_exec_domain.name = "Linux/s390"; - s390_exec_domain.handler = NULL; - s390_exec_domain.pers_low = PER_LINUX32; - s390_exec_domain.pers_high = PER_LINUX32; - s390_exec_domain.signal_map = default_exec_domain.signal_map; - s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap; - register_exec_domain(&s390_exec_domain); - return 0; -} - -__initcall(s390_init); diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index ab64bdbab2a..ca38139423a 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -1,8 +1,6 @@ /* - * arch/s390x/kernel/linux32.c - * * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Gerhard Tonn (ton@de.ibm.com) * Thomas Spatzier (tspat@de.ibm.com) @@ -60,10 +58,6 @@ #include "compat_linux.h" -u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | - PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK | - PSW32_MASK_PSTATE | PSW32_ASC_HOME; - /* For this source file, we want overflow handling. */ #undef high2lowuid @@ -92,92 +86,111 @@ u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | #define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) #define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) -asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) +COMPAT_SYSCALL_DEFINE3(s390_chown16, const char __user *, filename, + u16, user, u16, group) { return sys_chown(filename, low2highuid(user), low2highgid(group)); } -asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group) +COMPAT_SYSCALL_DEFINE3(s390_lchown16, const char __user *, + filename, u16, user, u16, group) { return sys_lchown(filename, low2highuid(user), low2highgid(group)); } -asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group) +COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group) { return sys_fchown(fd, low2highuid(user), low2highgid(group)); } -asmlinkage long sys32_setregid16(u16 rgid, u16 egid) +COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) { return sys_setregid(low2highgid(rgid), low2highgid(egid)); } -asmlinkage long sys32_setgid16(u16 gid) +COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) { return sys_setgid((gid_t)gid); } -asmlinkage long sys32_setreuid16(u16 ruid, u16 euid) +COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) { return sys_setreuid(low2highuid(ruid), low2highuid(euid)); } -asmlinkage long sys32_setuid16(u16 uid) +COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) { return sys_setuid((uid_t)uid); } -asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid) +COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) { return sys_setresuid(low2highuid(ruid), low2highuid(euid), - low2highuid(suid)); + low2highuid(suid)); } -asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid) +COMPAT_SYSCALL_DEFINE3(s390_getresuid16, u16 __user *, ruidp, + u16 __user *, euidp, u16 __user *, suidp) { + const struct cred *cred = current_cred(); int retval; + u16 ruid, euid, suid; + + ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid)); + euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid)); + suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid)); - if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && - !(retval = put_user(high2lowuid(current->cred->euid), euid))) - retval = put_user(high2lowuid(current->cred->suid), suid); + if (!(retval = put_user(ruid, ruidp)) && + !(retval = put_user(euid, euidp))) + retval = put_user(suid, suidp); return retval; } -asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid) +COMPAT_SYSCALL_DEFINE3(s390_setresgid16, u16, rgid, u16, egid, u16, sgid) { return sys_setresgid(low2highgid(rgid), low2highgid(egid), - low2highgid(sgid)); + low2highgid(sgid)); } -asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid) +COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, + u16 __user *, egidp, u16 __user *, sgidp) { + const struct cred *cred = current_cred(); int retval; + u16 rgid, egid, sgid; - if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && - !(retval = put_user(high2lowgid(current->cred->egid), egid))) - retval = put_user(high2lowgid(current->cred->sgid), sgid); + rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid)); + egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid)); + sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid)); + + if (!(retval = put_user(rgid, rgidp)) && + !(retval = put_user(egid, egidp))) + retval = put_user(sgid, sgidp); return retval; } -asmlinkage long sys32_setfsuid16(u16 uid) +COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) { return sys_setfsuid((uid_t)uid); } -asmlinkage long sys32_setfsgid16(u16 gid) +COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) { return sys_setfsgid((gid_t)gid); } static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) { + struct user_namespace *user_ns = current_user_ns(); int i; u16 group; + kgid_t kgid; for (i = 0; i < group_info->ngroups; i++) { - group = (u16)GROUP_AT(group_info, i); + kgid = GROUP_AT(group_info, i); + group = (u16)from_kgid_munged(user_ns, kgid); if (put_user(group, grouplist+i)) return -EFAULT; } @@ -187,43 +200,51 @@ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist) { + struct user_namespace *user_ns = current_user_ns(); int i; u16 group; + kgid_t kgid; for (i = 0; i < group_info->ngroups; i++) { if (get_user(group, grouplist+i)) return -EFAULT; - GROUP_AT(group_info, i) = (gid_t)group; + + kgid = make_kgid(user_ns, (gid_t)group); + if (!gid_valid(kgid)) + return -EINVAL; + + GROUP_AT(group_info, i) = kgid; } return 0; } -asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) +COMPAT_SYSCALL_DEFINE2(s390_getgroups16, int, gidsetsize, u16 __user *, grouplist) { + const struct cred *cred = current_cred(); int i; if (gidsetsize < 0) return -EINVAL; - get_group_info(current->cred->group_info); - i = current->cred->group_info->ngroups; + get_group_info(cred->group_info); + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->cred->group_info)) { + if (groups16_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->cred->group_info); + put_group_info(cred->group_info); return i; } -asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) +COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplist) { struct group_info *group_info; int retval; @@ -248,257 +269,65 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) return retval; } -asmlinkage long sys32_getuid16(void) +COMPAT_SYSCALL_DEFINE0(s390_getuid16) { - return high2lowuid(current->cred->uid); + return high2lowuid(from_kuid_munged(current_user_ns(), current_uid())); } -asmlinkage long sys32_geteuid16(void) +COMPAT_SYSCALL_DEFINE0(s390_geteuid16) { - return high2lowuid(current->cred->euid); + return high2lowuid(from_kuid_munged(current_user_ns(), current_euid())); } -asmlinkage long sys32_getgid16(void) +COMPAT_SYSCALL_DEFINE0(s390_getgid16) { - return high2lowgid(current->cred->gid); + return high2lowgid(from_kgid_munged(current_user_ns(), current_gid())); } -asmlinkage long sys32_getegid16(void) +COMPAT_SYSCALL_DEFINE0(s390_getegid16) { - return high2lowgid(current->cred->egid); + return high2lowgid(from_kgid_munged(current_user_ns(), current_egid())); } -/* - * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation. - * - * This is really horribly ugly. - */ #ifdef CONFIG_SYSVIPC -asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) +COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second, + compat_ulong_t, third, compat_uptr_t, ptr) { if (call >> 16) /* hack for backward compatibility */ return -EINVAL; - switch (call) { - case SEMTIMEDOP: - return compat_sys_semtimedop(first, compat_ptr(ptr), - second, compat_ptr(third)); - case SEMOP: - /* struct sembuf is the same on 32 and 64bit :)) */ - return sys_semtimedop(first, compat_ptr(ptr), - second, NULL); - case SEMGET: - return sys_semget(first, second, third); - case SEMCTL: - return compat_sys_semctl(first, second, third, - compat_ptr(ptr)); - case MSGSND: - return compat_sys_msgsnd(first, second, third, - compat_ptr(ptr)); - case MSGRCV: - return compat_sys_msgrcv(first, second, 0, third, - 0, compat_ptr(ptr)); - case MSGGET: - return sys_msgget((key_t) first, second); - case MSGCTL: - return compat_sys_msgctl(first, second, compat_ptr(ptr)); - case SHMAT: - return compat_sys_shmat(first, second, third, - 0, compat_ptr(ptr)); - case SHMDT: - return sys_shmdt(compat_ptr(ptr)); - case SHMGET: - return sys_shmget(first, (unsigned)second, third); - case SHMCTL: - return compat_sys_shmctl(first, second, compat_ptr(ptr)); - } - - return -ENOSYS; + return compat_sys_ipc(call, first, second, third, ptr, third); } #endif -asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) -{ - if ((int)high < 0) - return -EINVAL; - else - return sys_truncate(path, (high << 32) | low); -} - -asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) +COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low) { - if ((int)high < 0) - return -EINVAL; - else - return sys_ftruncate(fd, (high << 32) | low); -} - -asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs (); - - set_fs (KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, - (struct timespec __force __user *) &t); - set_fs (old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; + return sys_truncate(path, (unsigned long)high << 32 | low); } -asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, size_t sigsetsize) +COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low) { - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - if (set) { - if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) - return -EFAULT; - s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); - } - set_fs (KERNEL_DS); - ret = sys_rt_sigprocmask(how, - set ? (sigset_t __force __user *) &s : NULL, - oset ? (sigset_t __force __user *) &s : NULL, - sigsetsize); - set_fs (old_fs); - if (ret) return ret; - if (oset) { - s32.sig[1] = (s.sig[0] >> 32); - s32.sig[0] = s.sig[0]; - if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return 0; + return sys_ftruncate(fd, (unsigned long)high << 32 | low); } -asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, - size_t sigsetsize) -{ - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs (KERNEL_DS); - ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); - set_fs (old_fs); - if (!ret) { - s32.sig[1] = (s.sig[0] >> 32); - s32.sig[0] = s.sig[0]; - if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return ret; -} - -asmlinkage long -sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; - set_fs (KERNEL_DS); - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info); - set_fs (old_fs); - return ret; -} - -/* - * sys32_execve() executes a new program after the asm stub has set - * things up for us. This should basically do what I want it to. - */ -asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv, - compat_uptr_t __user *envp) -{ - struct pt_regs *regs = task_pt_regs(current); - char *filename; - long rc; - - filename = getname(name); - rc = PTR_ERR(filename); - if (IS_ERR(filename)) - return rc; - rc = compat_do_execve(filename, argv, envp, regs); - if (rc) - goto out; - current->thread.fp_regs.fpc=0; - asm volatile("sfpc %0,0" : : "d" (0)); - rc = regs->gprs[2]; -out: - putname(filename); - return rc; -} - -asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, - size_t count, u32 poshi, u32 poslo) +COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf, + compat_size_t, count, u32, high, u32, low) { if ((compat_ssize_t) count < 0) return -EINVAL; - return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); + return sys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low); } -asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf, - size_t count, u32 poshi, u32 poslo) +COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf, + compat_size_t, count, u32, high, u32, low) { if ((compat_ssize_t) count < 0) return -EINVAL; - return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); + return sys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low); } -asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count) +COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count) { - return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count); -} - -asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count) -{ - mm_segment_t old_fs = get_fs(); - int ret; - off_t of; - - if (offset && get_user(of, offset)) - return -EFAULT; - - set_fs(KERNEL_DS); - ret = sys_sendfile(out_fd, in_fd, - offset ? (off_t __force __user *) &of : NULL, count); - set_fs(old_fs); - - if (offset && put_user(of, offset)) - return -EFAULT; - - return ret; -} - -asmlinkage long sys32_sendfile64(int out_fd, int in_fd, - compat_loff_t __user *offset, s32 count) -{ - mm_segment_t old_fs = get_fs(); - int ret; - loff_t lof; - - if (offset && get_user(lof, offset)) - return -EFAULT; - - set_fs(KERNEL_DS); - ret = sys_sendfile64(out_fd, in_fd, - offset ? (loff_t __force __user *) &lof : NULL, - count); - set_fs(old_fs); - - if (offset && put_user(lof, offset)) - return -EFAULT; - - return ret; + return sys_readahead(fd, (unsigned long)high << 32 | low, count); } struct stat64_emu31 { @@ -537,8 +366,8 @@ static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat) tmp.__st_ino = (u32)stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = (unsigned int)stat->nlink; - tmp.st_uid = stat->uid; - tmp.st_gid = stat->gid; + tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); + tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_rdev = huge_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_blksize = (u32)stat->blksize; @@ -550,7 +379,7 @@ static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat) return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } -asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf) +COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf) { struct kstat stat; int ret = vfs_stat(filename, &stat); @@ -559,7 +388,7 @@ asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 _ return ret; } -asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31 __user * statbuf) +COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf) { struct kstat stat; int ret = vfs_lstat(filename, &stat); @@ -568,7 +397,7 @@ asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31 return ret; } -asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf) +COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf) { struct kstat stat; int ret = vfs_fstat(fd, &stat); @@ -577,8 +406,8 @@ asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * sta return ret; } -asmlinkage long sys32_fstatat64(unsigned int dfd, const char __user *filename, - struct stat64_emu31 __user* statbuf, int flag) +COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename, + struct stat64_emu31 __user *, statbuf, int, flag) { struct kstat stat; int error; @@ -604,7 +433,7 @@ struct mmap_arg_struct_emu31 { compat_ulong_t offset; }; -asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) +COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg) { struct mmap_arg_struct_emu31 a; @@ -612,22 +441,20 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } -asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) +COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg) { struct mmap_arg_struct_emu31 a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } -asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count) +COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count) { if ((compat_ssize_t) count < 0) return -EINVAL; @@ -635,7 +462,7 @@ asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count) return sys_read(fd, buf, count); } -asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t count) +COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count) { if ((compat_ssize_t) count < 0) return -EINVAL; @@ -649,14 +476,13 @@ asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t cou * because the 31 bit values differ from the 64 bit values. */ -asmlinkage long -sys32_fadvise64(int fd, loff_t offset, size_t len, int advise) +COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise) { if (advise == 4) advise = POSIX_FADV_DONTNEED; else if (advise == 5) advise = POSIX_FADV_NOREUSE; - return sys_fadvise64(fd, offset, len, advise); + return sys_fadvise64(fd, (unsigned long)high << 32 | low, len, advise); } struct fadvise64_64_args { @@ -666,8 +492,7 @@ struct fadvise64_64_args { int advice; }; -asmlinkage long -sys32_fadvise64_64(struct fadvise64_64_args __user *args) +COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) { struct fadvise64_64_args a; @@ -679,3 +504,17 @@ sys32_fadvise64_64(struct fadvise64_64_args __user *args) a.advice = POSIX_FADV_NOREUSE; return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); } + +COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow, + u32, nhigh, u32, nlow, unsigned int, flags) +{ + return sys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow, + ((u64)nhigh << 32) + nlow, flags); +} + +COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow, + u32, lenhigh, u32, lenlow) +{ + return sys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow, + ((u64)lenhigh << 32) + lenlow); +} diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index 9635d759c2b..70d4b7c4bea 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -17,81 +17,6 @@ struct ipc_kludge_32 { __s32 msgtyp; }; -struct old_sigaction32 { - __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */ - compat_old_sigset_t sa_mask; /* A 32 bit mask */ - __u32 sa_flags; - __u32 sa_restorer; /* Another 32 bit pointer */ -}; - -typedef struct compat_siginfo { - int si_signo; - int si_errno; - int si_code; - - union { - int _pad[((128/sizeof(int)) - 3)]; - - /* kill() */ - struct { - pid_t _pid; /* sender's pid */ - uid_t _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - compat_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - compat_sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - pid_t _pid; /* sender's pid */ - uid_t _uid; /* sender's uid */ - compat_sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - pid_t _pid; /* which child */ - uid_t _uid; /* sender's uid */ - int _status;/* exit code */ - compat_clock_t _utime; - compat_clock_t _stime; - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ - struct { - __u32 _addr; /* faulting insn/memory ref. - pointer */ - } _sigfault; - - /* SIGPOLL */ - struct { - int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; - } _sifields; -} compat_siginfo_t; - -/* - * How these fields are to be accessed. - */ -#define si_pid _sifields._kill._pid -#define si_uid _sifields._kill._uid -#define si_status _sifields._sigchld._status -#define si_utime _sifields._sigchld._utime -#define si_stime _sifields._sigchld._stime -#define si_value _sifields._rt._sigval -#define si_int _sifields._rt._sigval.sival_int -#define si_ptr _sifields._rt._sigval.sival_ptr -#define si_addr _sifields._sigfault._addr -#define si_band _sifields._sigpoll._band -#define si_fd _sifields._sigpoll._fd -#define si_tid _sifields._timer._tid -#define si_overrun _sifields._timer._overrun - /* asm/sigcontext.h */ typedef union { @@ -102,6 +27,7 @@ typedef union typedef struct { unsigned int fpc; + unsigned int pad; freg_t32 fprs[__NUM_FPRS]; } _s390_fp_regs32; @@ -136,92 +62,59 @@ struct sigcontext32 }; /* asm/signal.h */ -struct sigaction32 { - __u32 sa_handler; /* pointer */ - __u32 sa_flags; - __u32 sa_restorer; /* pointer */ - compat_sigset_t sa_mask; /* mask last for extensibility */ -}; - -typedef struct { - __u32 ss_sp; /* pointer */ - int ss_flags; - compat_size_t ss_size; -} stack_t32; /* asm/ucontext.h */ struct ucontext32 { __u32 uc_flags; __u32 uc_link; /* pointer */ - stack_t32 uc_stack; + compat_stack_t uc_stack; _sigregs32 uc_mcontext; - compat_sigset_t uc_sigmask; /* mask last for extensibility */ + compat_sigset_t uc_sigmask; + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + unsigned char __unused[128 - sizeof(compat_sigset_t)]; }; struct stat64_emu31; struct mmap_arg_struct_emu31; struct fadvise64_64_args; -struct old_sigaction32; -struct old_sigaction32; -long sys32_chown16(const char __user * filename, u16 user, u16 group); -long sys32_lchown16(const char __user * filename, u16 user, u16 group); -long sys32_fchown16(unsigned int fd, u16 user, u16 group); -long sys32_setregid16(u16 rgid, u16 egid); -long sys32_setgid16(u16 gid); -long sys32_setreuid16(u16 ruid, u16 euid); -long sys32_setuid16(u16 uid); -long sys32_setresuid16(u16 ruid, u16 euid, u16 suid); -long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); -long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid); -long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); -long sys32_setfsuid16(u16 uid); -long sys32_setfsgid16(u16 gid); -long sys32_getgroups16(int gidsetsize, u16 __user *grouplist); -long sys32_setgroups16(int gidsetsize, u16 __user *grouplist); -long sys32_getuid16(void); -long sys32_geteuid16(void); -long sys32_getgid16(void); -long sys32_getegid16(void); -long sys32_ipc(u32 call, int first, int second, int third, u32 ptr); -long sys32_truncate64(const char __user * path, unsigned long high, - unsigned long low); -long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low); -long sys32_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); -long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, size_t sigsetsize); -long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); -long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); -long sys32_execve(const char __user *name, compat_uptr_t __user *argv, - compat_uptr_t __user *envp); -long sys32_init_module(void __user *umod, unsigned long len, - const char __user *uargs); -long sys32_delete_module(const char __user *name_user, unsigned int flags); -long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count, - u32 poshi, u32 poslo); -long sys32_pwrite64(unsigned int fd, const char __user *ubuf, - size_t count, u32 poshi, u32 poslo); -compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count); -long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, - size_t count); -long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, - s32 count); -long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf); -long sys32_lstat64(const char __user * filename, - struct stat64_emu31 __user * statbuf); -long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf); -long sys32_fstatat64(unsigned int dfd, const char __user *filename, - struct stat64_emu31 __user* statbuf, int flag); -unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg); -long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg); -long sys32_read(unsigned int fd, char __user * buf, size_t count); -long sys32_write(unsigned int fd, const char __user * buf, size_t count); -long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise); -long sys32_fadvise64_64(struct fadvise64_64_args __user *args); -long sys32_sigaction(int sig, const struct old_sigaction32 __user *act, - struct old_sigaction32 __user *oact); -long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, - struct sigaction32 __user *oact, size_t sigsetsize); -long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss); +long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group); +long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group); +long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group); +long compat_sys_s390_setregid16(u16 rgid, u16 egid); +long compat_sys_s390_setgid16(u16 gid); +long compat_sys_s390_setreuid16(u16 ruid, u16 euid); +long compat_sys_s390_setuid16(u16 uid); +long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid); +long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); +long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid); +long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); +long compat_sys_s390_setfsuid16(u16 uid); +long compat_sys_s390_setfsgid16(u16 gid); +long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist); +long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist); +long compat_sys_s390_getuid16(void); +long compat_sys_s390_geteuid16(void); +long compat_sys_s390_getgid16(void); +long compat_sys_s390_getegid16(void); +long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low); +long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low); +long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low); +long compat_sys_s390_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, u32 high, u32 low); +long compat_sys_s390_readahead(int fd, u32 high, u32 low, s32 count); +long compat_sys_s390_stat64(const char __user *filename, struct stat64_emu31 __user *statbuf); +long compat_sys_s390_lstat64(const char __user *filename, struct stat64_emu31 __user *statbuf); +long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbuf); +long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag); +long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg); +long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg); +long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count); +long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count); +long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise); +long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); +long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags); +long compat_sys_s390_fallocate(int fd, int mode, u32 offhigh, u32 offlow, u32 lenhigh, u32 lenlow); +long compat_sys_sigreturn(void); +long compat_sys_rt_sigreturn(void); + #endif /* _ASM_S390X_S390_H */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6fe78c2f95d..f204d692036 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/compat_signal.c - * - * Copyright (C) IBM Corp. 2000,2006 + * Copyright IBM Corp. 2000, 2006 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * Gerhard Tonn (ton@de.ibm.com) * @@ -27,12 +25,11 @@ #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/lowcore.h> +#include <asm/switch_to.h> #include "compat_linux.h" #include "compat_ptrace.h" #include "entry.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - typedef struct { __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; @@ -52,13 +49,10 @@ typedef struct __u32 gprs_high[NUM_GPRS]; } rt_sigframe32; -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; - if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t))) - return -EFAULT; - /* If you change siginfo_t structure, please be sure this code is fixed accordingly. It should never copy any pad contained in the structure @@ -105,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) break; } } - return err; + return err ? -EFAULT : 0; } int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) @@ -113,9 +107,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) int err; u32 tmp; - if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t))) - return -EFAULT; - err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); @@ -157,175 +148,72 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) break; } } - return err; -} - -asmlinkage long -sys32_sigaction(int sig, const struct old_sigaction32 __user *act, - struct old_sigaction32 __user *oact) -{ - struct k_sigaction new_ka, old_ka; - unsigned long sa_handler, sa_restorer; - int ret; - - if (act) { - compat_old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(sa_handler, &act->sa_handler) || - __get_user(sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - new_ka.sa.sa_handler = (__sighandler_t) sa_handler; - new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - sa_handler = (unsigned long) old_ka.sa.sa_handler; - sa_restorer = (unsigned long) old_ka.sa.sa_restorer; - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(sa_handler, &oact->sa_handler) || - __put_user(sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -asmlinkage long -sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, - struct sigaction32 __user *oact, size_t sigsetsize) -{ - struct k_sigaction new_ka, old_ka; - unsigned long sa_handler; - int ret; - compat_sigset_t set32; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - - if (act) { - ret = get_user(sa_handler, &act->sa_handler); - ret |= __copy_from_user(&set32, &act->sa_mask, - sizeof(compat_sigset_t)); - new_ka.sa.sa_mask.sig[0] = - set32.sig[0] | (((long)set32.sig[1]) << 32); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - - if (ret) - return -EFAULT; - new_ka.sa.sa_handler = (__sighandler_t) sa_handler; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); - set32.sig[0] = old_ka.sa.sa_mask.sig[0]; - ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); - ret |= __copy_to_user(&oact->sa_mask, &set32, - sizeof(compat_sigset_t)); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - } - - return ret; -} - -asmlinkage long -sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss) -{ - struct pt_regs *regs = task_pt_regs(current); - stack_t kss, koss; - unsigned long ss_sp; - int ret, err = 0; - mm_segment_t old_fs = get_fs(); - - if (uss) { - if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) - return -EFAULT; - err |= __get_user(ss_sp, &uss->ss_sp); - err |= __get_user(kss.ss_size, &uss->ss_size); - err |= __get_user(kss.ss_flags, &uss->ss_flags); - if (err) - return -EFAULT; - kss.ss_sp = (void __user *) ss_sp; - } - - set_fs (KERNEL_DS); - ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL), - (stack_t __force __user *) (uoss ? &koss : NULL), - regs->gprs[15]); - set_fs (old_fs); - - if (!ret && uoss) { - if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) - return -EFAULT; - ss_sp = (unsigned long) koss.ss_sp; - err |= __put_user(ss_sp, &uoss->ss_sp); - err |= __put_user(koss.ss_size, &uoss->ss_size); - err |= __put_user(koss.ss_flags, &uoss->ss_flags); - if (err) - return -EFAULT; - } - return ret; + return err ? -EFAULT : 0; } static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) { - _s390_regs_common32 regs32; - int err, i; + _sigregs32 user_sregs; + int i; - regs32.psw.mask = psw32_user_bits | - ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); - regs32.psw.addr = (__u32) regs->psw.addr | + user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32); + user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI; + user_sregs.regs.psw.mask |= PSW32_USER_BITS; + user_sregs.regs.psw.addr = (__u32) regs->psw.addr | (__u32)(regs->psw.mask & PSW_MASK_BA); for (i = 0; i < NUM_GPRS; i++) - regs32.gprs[i] = (__u32) regs->gprs[i]; + user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; save_access_regs(current->thread.acrs); - memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs)); - err = __copy_to_user(&sregs->regs, ®s32, sizeof(regs32)); - if (err) - return err; - save_fp_regs(¤t->thread.fp_regs); - /* s390_fp_regs and _s390_fp_regs32 are the same ! */ - return __copy_to_user(&sregs->fpregs, ¤t->thread.fp_regs, - sizeof(_s390_fp_regs32)); + memcpy(&user_sregs.regs.acrs, current->thread.acrs, + sizeof(user_sregs.regs.acrs)); + save_fp_ctl(¤t->thread.fp_regs.fpc); + save_fp_regs(current->thread.fp_regs.fprs); + memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, + sizeof(user_sregs.fpregs)); + if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) + return -EFAULT; + return 0; } static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) { - _s390_regs_common32 regs32; - int err, i; + _sigregs32 user_sregs; + int i; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; - err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); - if (err) - return err; - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | - (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | - (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); - regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); + if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) + return -EFAULT; + + if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) + return -EINVAL; + + /* Loading the floating-point-control word can fail. Do that first. */ + if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + return -EINVAL; + + /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | + (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | + (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | + (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) + regs->psw.mask = PSW_ASC_PRIMARY | + (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) - regs->gprs[i] = (__u64) regs32.gprs[i]; - memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs)); + regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; + memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, + sizeof(current->thread.acrs)); restore_access_regs(current->thread.acrs); - err = __copy_from_user(¤t->thread.fp_regs, &sregs->fpregs, - sizeof(_s390_fp_regs32)); - current->thread.fp_regs.fpc &= FPC_VALID_MASK; - if (err) - return err; + memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, + sizeof(current->thread.fp_regs)); - restore_fp_regs(¤t->thread.fp_regs); - clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ + restore_fp_regs(current->thread.fp_regs.fprs); + clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; } @@ -336,34 +224,31 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) for (i = 0; i < NUM_GPRS; i++) gprs_high[i] = regs->gprs[i] >> 32; - - return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high)); + if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high))) + return -EFAULT; + return 0; } static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) { __u32 gprs_high[NUM_GPRS]; - int err, i; + int i; - err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high)); - if (err) - return err; + if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high))) + return -EFAULT; for (i = 0; i < NUM_GPRS; i++) *(__u32 *)®s->gprs[i] = gprs_high[i]; return 0; } -asmlinkage long sys32_sigreturn(void) +COMPAT_SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = task_pt_regs(current); sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; @@ -375,35 +260,21 @@ badframe: return 0; } -asmlinkage long sys32_rt_sigreturn(void) +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; sigset_t set; - stack_t st; - __u32 ss_sp; - int err; - mm_segment_t old_fs = get_fs(); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); - st.ss_sp = compat_ptr(ss_sp); - err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); - err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); - if (err) + if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; - set_fs (KERNEL_DS); - do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); - set_fs (old_fs); return regs->gprs[2]; badframe: force_sig(SIGSEGV, current); @@ -436,13 +307,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) sp = current->sas_ss_sp + current->sas_ss_size; } - /* This is the legacy signal stack switching. */ - else if (!user_mode(regs) && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) { - sp = (unsigned long) ka->sa.sa_restorer; - } - return (void __user *)((sp - frame_size) & -8ul); } @@ -460,8 +324,6 @@ static int setup_frame32(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) { sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) - goto give_sigsegv; if (frame == (void __user *) -1UL) goto give_sigsegv; @@ -479,9 +341,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { - regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, (u16 __force __user *)(frame->retcode))) goto give_sigsegv; @@ -493,7 +355,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (PSW_USER_BITS & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); @@ -506,6 +371,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* set extra registers only for synchronous signals */ regs->gprs[4] = regs->int_code & 127; regs->gprs[5] = regs->int_parm_long; + regs->gprs[6] = task_thread_info(current)->last_break; } /* Place signal number on stack to allow backtrace from handler. */ @@ -523,8 +389,6 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, { int err = 0; rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) - goto give_sigsegv; if (frame == (void __user *) -1UL) goto give_sigsegv; @@ -535,10 +399,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->gprs[15]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]); err |= save_sigregs32(regs, &frame->uc.uc_mcontext); err |= save_sigregs_gprs_high(regs, frame->gprs_high); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); @@ -548,11 +409,12 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; + regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { - regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; - err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, - (u16 __force __user *)(frame->retcode)); + regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; + if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, + (u16 __force __user *)(frame->retcode))) + goto give_sigsegv; } /* Set up backchain. */ @@ -561,12 +423,16 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ - regs->psw.addr = (__u64) ka->sa.sa_handler; + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (PSW_USER_BITS & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (__u64 __force) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); regs->gprs[3] = (__force __u64) &frame->info; regs->gprs[4] = (__force __u64) &frame->uc; + regs->gprs[5] = task_thread_info(current)->last_break; return 0; give_sigsegv: @@ -578,10 +444,9 @@ give_sigsegv: * OK, we're invoking a handler */ -int handle_signal32(unsigned long sig, struct k_sigaction *ka, +void handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { - sigset_t blocked; int ret; /* Set up the stack frame */ @@ -590,11 +455,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, else ret = setup_frame32(sig, ka, oldset, regs); if (ret) - return ret; - sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(&blocked, sig); - set_current_blocked(&blocked); - return 0; + return; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLE_STEP)); } diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S deleted file mode 100644 index 18c51df9fe0..00000000000 --- a/arch/s390/kernel/compat_wrapper.S +++ /dev/null @@ -1,1649 +0,0 @@ -/* -* arch/s390/kernel/compat_wrapper.S -* wrapper for 31 bit compatible system calls. -* -* Copyright (C) IBM Corp. 2000,2006 -* Author(s): Gerhard Tonn (ton@de.ibm.com), -* Thomas Spatzier (tspat@de.ibm.com) -*/ - -#include <linux/linkage.h> - -ENTRY(sys32_exit_wrapper) - lgfr %r2,%r2 # int - jg sys_exit # branch to sys_exit - -ENTRY(sys32_read_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - jg sys32_read # branch to sys_read - -ENTRY(sys32_write_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # size_t - jg sys32_write # branch to system call - -ENTRY(sys32_open_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - jg sys_open # branch to system call - -ENTRY(sys32_close_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_close # branch to system call - -ENTRY(sys32_creat_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - jg sys_creat # branch to system call - -ENTRY(sys32_link_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - jg sys_link # branch to system call - -ENTRY(sys32_unlink_wrapper) - llgtr %r2,%r2 # const char * - jg sys_unlink # branch to system call - -ENTRY(sys32_chdir_wrapper) - llgtr %r2,%r2 # const char * - jg sys_chdir # branch to system call - -ENTRY(sys32_time_wrapper) - llgtr %r2,%r2 # int * - jg compat_sys_time # branch to system call - -ENTRY(sys32_mknod_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - llgfr %r4,%r4 # dev - jg sys_mknod # branch to system call - -ENTRY(sys32_chmod_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # mode_t - jg sys_chmod # branch to system call - -ENTRY(sys32_lchown16_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # __kernel_old_uid_emu31_t - llgfr %r4,%r4 # __kernel_old_uid_emu31_t - jg sys32_lchown16 # branch to system call - -ENTRY(sys32_lseek_wrapper) - llgfr %r2,%r2 # unsigned int - lgfr %r3,%r3 # off_t - llgfr %r4,%r4 # unsigned int - jg sys_lseek # branch to system call - -#sys32_getpid_wrapper # void - -ENTRY(sys32_mount_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # char * - llgfr %r5,%r5 # unsigned long - llgtr %r6,%r6 # void * - jg compat_sys_mount # branch to system call - -ENTRY(sys32_oldumount_wrapper) - llgtr %r2,%r2 # char * - jg sys_oldumount # branch to system call - -ENTRY(sys32_setuid16_wrapper) - llgfr %r2,%r2 # __kernel_old_uid_emu31_t - jg sys32_setuid16 # branch to system call - -#sys32_getuid16_wrapper # void - -ENTRY(sys32_ptrace_wrapper) - lgfr %r2,%r2 # long - lgfr %r3,%r3 # long - llgtr %r4,%r4 # long - llgfr %r5,%r5 # long - jg compat_sys_ptrace # branch to system call - -ENTRY(sys32_alarm_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_alarm # branch to system call - -ENTRY(compat_sys_utime_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct compat_utimbuf * - jg compat_sys_utime # branch to system call - -ENTRY(sys32_access_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - jg sys_access # branch to system call - -ENTRY(sys32_nice_wrapper) - lgfr %r2,%r2 # int - jg sys_nice # branch to system call - -#sys32_sync_wrapper # void - -ENTRY(sys32_kill_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - jg sys_kill # branch to system call - -ENTRY(sys32_rename_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - jg sys_rename # branch to system call - -ENTRY(sys32_mkdir_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - jg sys_mkdir # branch to system call - -ENTRY(sys32_rmdir_wrapper) - llgtr %r2,%r2 # const char * - jg sys_rmdir # branch to system call - -ENTRY(sys32_dup_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_dup # branch to system call - -ENTRY(sys32_pipe_wrapper) - llgtr %r2,%r2 # u32 * - jg sys_pipe # branch to system call - -ENTRY(compat_sys_times_wrapper) - llgtr %r2,%r2 # struct compat_tms * - jg compat_sys_times # branch to system call - -ENTRY(sys32_brk_wrapper) - llgtr %r2,%r2 # unsigned long - jg sys_brk # branch to system call - -ENTRY(sys32_setgid16_wrapper) - llgfr %r2,%r2 # __kernel_old_gid_emu31_t - jg sys32_setgid16 # branch to system call - -#sys32_getgid16_wrapper # void - -ENTRY(sys32_signal_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # __sighandler_t - jg sys_signal - -#sys32_geteuid16_wrapper # void - -#sys32_getegid16_wrapper # void - -ENTRY(sys32_acct_wrapper) - llgtr %r2,%r2 # char * - jg sys_acct # branch to system call - -ENTRY(sys32_umount_wrapper) - llgtr %r2,%r2 # char * - lgfr %r3,%r3 # int - jg sys_umount # branch to system call - -ENTRY(compat_sys_ioctl_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - llgfr %r4,%r4 # unsigned int - jg compat_sys_ioctl # branch to system call - -ENTRY(compat_sys_fcntl_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - llgfr %r4,%r4 # unsigned long - jg compat_sys_fcntl # branch to system call - -ENTRY(sys32_setpgid_wrapper) - lgfr %r2,%r2 # pid_t - lgfr %r3,%r3 # pid_t - jg sys_setpgid # branch to system call - -ENTRY(sys32_umask_wrapper) - lgfr %r2,%r2 # int - jg sys_umask # branch to system call - -ENTRY(sys32_chroot_wrapper) - llgtr %r2,%r2 # char * - jg sys_chroot # branch to system call - -ENTRY(sys32_ustat_wrapper) - llgfr %r2,%r2 # dev_t - llgtr %r3,%r3 # struct ustat * - jg compat_sys_ustat - -ENTRY(sys32_dup2_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - jg sys_dup2 # branch to system call - -#sys32_getppid_wrapper # void - -#sys32_getpgrp_wrapper # void - -#sys32_setsid_wrapper # void - -ENTRY(sys32_sigaction_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct old_sigaction * - llgtr %r4,%r4 # struct old_sigaction32 * - jg sys32_sigaction # branch to system call - -ENTRY(sys32_setreuid16_wrapper) - llgfr %r2,%r2 # __kernel_old_uid_emu31_t - llgfr %r3,%r3 # __kernel_old_uid_emu31_t - jg sys32_setreuid16 # branch to system call - -ENTRY(sys32_setregid16_wrapper) - llgfr %r2,%r2 # __kernel_old_gid_emu31_t - llgfr %r3,%r3 # __kernel_old_gid_emu31_t - jg sys32_setregid16 # branch to system call - -ENTRY(sys_sigsuspend_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgfr %r4,%r4 # old_sigset_t - jg sys_sigsuspend - -ENTRY(compat_sys_sigpending_wrapper) - llgtr %r2,%r2 # compat_old_sigset_t * - jg compat_sys_sigpending # branch to system call - -ENTRY(sys32_sethostname_wrapper) - llgtr %r2,%r2 # char * - lgfr %r3,%r3 # int - jg sys_sethostname # branch to system call - -ENTRY(compat_sys_setrlimit_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # struct rlimit_emu31 * - jg compat_sys_setrlimit # branch to system call - -ENTRY(compat_sys_old_getrlimit_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # struct rlimit_emu31 * - jg compat_sys_old_getrlimit # branch to system call - -ENTRY(compat_sys_getrlimit_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # struct rlimit_emu31 * - jg compat_sys_getrlimit # branch to system call - -ENTRY(sys32_mmap2_wrapper) - llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * - jg sys32_mmap2 # branch to system call - -ENTRY(compat_sys_getrusage_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct rusage_emu31 * - jg compat_sys_getrusage # branch to system call - -ENTRY(compat_sys_gettimeofday_wrapper) - llgtr %r2,%r2 # struct timeval_emu31 * - llgtr %r3,%r3 # struct timezone * - jg compat_sys_gettimeofday # branch to system call - -ENTRY(compat_sys_settimeofday_wrapper) - llgtr %r2,%r2 # struct timeval_emu31 * - llgtr %r3,%r3 # struct timezone * - jg compat_sys_settimeofday # branch to system call - -ENTRY(sys32_getgroups16_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # __kernel_old_gid_emu31_t * - jg sys32_getgroups16 # branch to system call - -ENTRY(sys32_setgroups16_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # __kernel_old_gid_emu31_t * - jg sys32_setgroups16 # branch to system call - -ENTRY(sys32_symlink_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - jg sys_symlink # branch to system call - -ENTRY(sys32_readlink_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # char * - lgfr %r4,%r4 # int - jg sys_readlink # branch to system call - -ENTRY(sys32_uselib_wrapper) - llgtr %r2,%r2 # const char * - jg sys_uselib # branch to system call - -ENTRY(sys32_swapon_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - jg sys_swapon # branch to system call - -ENTRY(sys32_reboot_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgfr %r4,%r4 # unsigned int - llgtr %r5,%r5 # void * - jg sys_reboot # branch to system call - -ENTRY(old32_readdir_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # void * - llgfr %r4,%r4 # unsigned int - jg compat_sys_old_readdir # branch to system call - -ENTRY(old32_mmap_wrapper) - llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * - jg old32_mmap # branch to system call - -ENTRY(sys32_munmap_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - jg sys_munmap # branch to system call - -ENTRY(sys32_truncate_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # long - jg sys_truncate # branch to system call - -ENTRY(sys32_ftruncate_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned long - jg sys_ftruncate # branch to system call - -ENTRY(sys32_fchmod_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # mode_t - jg sys_fchmod # branch to system call - -ENTRY(sys32_fchown16_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # compat_uid_t - llgfr %r4,%r4 # compat_uid_t - jg sys32_fchown16 # branch to system call - -ENTRY(sys32_getpriority_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - jg sys_getpriority # branch to system call - -ENTRY(sys32_setpriority_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - jg sys_setpriority # branch to system call - -ENTRY(compat_sys_statfs_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct compat_statfs * - jg compat_sys_statfs # branch to system call - -ENTRY(compat_sys_fstatfs_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # struct compat_statfs * - jg compat_sys_fstatfs # branch to system call - -ENTRY(compat_sys_socketcall_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # u32 * - jg compat_sys_socketcall # branch to system call - -ENTRY(sys32_syslog_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # char * - lgfr %r4,%r4 # int - jg sys_syslog # branch to system call - -ENTRY(compat_sys_setitimer_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct itimerval_emu31 * - llgtr %r4,%r4 # struct itimerval_emu31 * - jg compat_sys_setitimer # branch to system call - -ENTRY(compat_sys_getitimer_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct itimerval_emu31 * - jg compat_sys_getitimer # branch to system call - -ENTRY(compat_sys_newstat_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct stat_emu31 * - jg compat_sys_newstat # branch to system call - -ENTRY(compat_sys_newlstat_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct stat_emu31 * - jg compat_sys_newlstat # branch to system call - -ENTRY(compat_sys_newfstat_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # struct stat_emu31 * - jg compat_sys_newfstat # branch to system call - -#sys32_vhangup_wrapper # void - -ENTRY(compat_sys_wait4_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # unsigned int * - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct rusage * - jg compat_sys_wait4 # branch to system call - -ENTRY(sys32_swapoff_wrapper) - llgtr %r2,%r2 # const char * - jg sys_swapoff # branch to system call - -ENTRY(compat_sys_sysinfo_wrapper) - llgtr %r2,%r2 # struct sysinfo_emu31 * - jg compat_sys_sysinfo # branch to system call - -ENTRY(sys32_ipc_wrapper) - llgfr %r2,%r2 # uint - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - llgfr %r6,%r6 # u32 - jg sys32_ipc # branch to system call - -ENTRY(sys32_fsync_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_fsync # branch to system call - -#sys32_sigreturn_wrapper # done in sigreturn_glue - -#sys32_clone_wrapper # done in clone_glue - -ENTRY(sys32_setdomainname_wrapper) - llgtr %r2,%r2 # char * - lgfr %r3,%r3 # int - jg sys_setdomainname # branch to system call - -ENTRY(sys32_newuname_wrapper) - llgtr %r2,%r2 # struct new_utsname * - jg sys_newuname # branch to system call - -ENTRY(compat_sys_adjtimex_wrapper) - llgtr %r2,%r2 # struct compat_timex * - jg compat_sys_adjtimex # branch to system call - -ENTRY(sys32_mprotect_wrapper) - llgtr %r2,%r2 # unsigned long (actually pointer - llgfr %r3,%r3 # size_t - llgfr %r4,%r4 # unsigned long - jg sys_mprotect # branch to system call - -ENTRY(compat_sys_sigprocmask_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_old_sigset_t * - llgtr %r4,%r4 # compat_old_sigset_t * - jg compat_sys_sigprocmask # branch to system call - -ENTRY(sys_init_module_wrapper) - llgtr %r2,%r2 # void * - llgfr %r3,%r3 # unsigned long - llgtr %r4,%r4 # char * - jg sys_init_module # branch to system call - -ENTRY(sys_delete_module_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # unsigned int - jg sys_delete_module # branch to system call - -ENTRY(sys32_quotactl_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # qid_t - llgtr %r5,%r5 # caddr_t - jg sys_quotactl # branch to system call - -ENTRY(sys32_getpgid_wrapper) - lgfr %r2,%r2 # pid_t - jg sys_getpgid # branch to system call - -ENTRY(sys32_fchdir_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_fchdir # branch to system call - -ENTRY(sys32_bdflush_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # long - jg sys_bdflush # branch to system call - -ENTRY(sys32_sysfs_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - jg sys_sysfs # branch to system call - -ENTRY(sys32_personality_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_s390_personality # branch to system call - -ENTRY(sys32_setfsuid16_wrapper) - llgfr %r2,%r2 # __kernel_old_uid_emu31_t - jg sys32_setfsuid16 # branch to system call - -ENTRY(sys32_setfsgid16_wrapper) - llgfr %r2,%r2 # __kernel_old_gid_emu31_t - jg sys32_setfsgid16 # branch to system call - -ENTRY(sys32_llseek_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - llgtr %r5,%r5 # loff_t * - llgfr %r6,%r6 # unsigned int - jg sys_llseek # branch to system call - -ENTRY(sys32_getdents_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # void * - llgfr %r4,%r4 # unsigned int - jg compat_sys_getdents # branch to system call - -ENTRY(compat_sys_select_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_fd_set * - llgtr %r4,%r4 # compat_fd_set * - llgtr %r5,%r5 # compat_fd_set * - llgtr %r6,%r6 # struct compat_timeval * - jg compat_sys_select # branch to system call - -ENTRY(sys32_flock_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - jg sys_flock # branch to system call - -ENTRY(sys32_msync_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - lgfr %r4,%r4 # int - jg sys_msync # branch to system call - -ENTRY(compat_sys_readv_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct compat_iovec * - llgfr %r4,%r4 # unsigned long - jg compat_sys_readv # branch to system call - -ENTRY(compat_sys_writev_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct compat_iovec * - llgfr %r4,%r4 # unsigned long - jg compat_sys_writev # branch to system call - -ENTRY(sys32_getsid_wrapper) - lgfr %r2,%r2 # pid_t - jg sys_getsid # branch to system call - -ENTRY(sys32_fdatasync_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_fdatasync # branch to system call - -ENTRY(sys32_mlock_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - jg sys_mlock # branch to system call - -ENTRY(sys32_munlock_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - jg sys_munlock # branch to system call - -ENTRY(sys32_mlockall_wrapper) - lgfr %r2,%r2 # int - jg sys_mlockall # branch to system call - -#sys32_munlockall_wrapper # void - -ENTRY(sys32_sched_setparam_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # struct sched_param * - jg sys_sched_setparam # branch to system call - -ENTRY(sys32_sched_getparam_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # struct sched_param * - jg sys_sched_getparam # branch to system call - -ENTRY(sys32_sched_setscheduler_wrapper) - lgfr %r2,%r2 # pid_t - lgfr %r3,%r3 # int - llgtr %r4,%r4 # struct sched_param * - jg sys_sched_setscheduler # branch to system call - -ENTRY(sys32_sched_getscheduler_wrapper) - lgfr %r2,%r2 # pid_t - jg sys_sched_getscheduler # branch to system call - -#sys32_sched_yield_wrapper # void - -ENTRY(sys32_sched_get_priority_max_wrapper) - lgfr %r2,%r2 # int - jg sys_sched_get_priority_max # branch to system call - -ENTRY(sys32_sched_get_priority_min_wrapper) - lgfr %r2,%r2 # int - jg sys_sched_get_priority_min # branch to system call - -ENTRY(sys32_sched_rr_get_interval_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # struct compat_timespec * - jg sys32_sched_rr_get_interval # branch to system call - -ENTRY(compat_sys_nanosleep_wrapper) - llgtr %r2,%r2 # struct compat_timespec * - llgtr %r3,%r3 # struct compat_timespec * - jg compat_sys_nanosleep # branch to system call - -ENTRY(sys32_mremap_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - llgfr %r5,%r5 # unsigned long - llgfr %r6,%r6 # unsigned long - jg sys_mremap # branch to system call - -ENTRY(sys32_setresuid16_wrapper) - llgfr %r2,%r2 # __kernel_old_uid_emu31_t - llgfr %r3,%r3 # __kernel_old_uid_emu31_t - llgfr %r4,%r4 # __kernel_old_uid_emu31_t - jg sys32_setresuid16 # branch to system call - -ENTRY(sys32_getresuid16_wrapper) - llgtr %r2,%r2 # __kernel_old_uid_emu31_t * - llgtr %r3,%r3 # __kernel_old_uid_emu31_t * - llgtr %r4,%r4 # __kernel_old_uid_emu31_t * - jg sys32_getresuid16 # branch to system call - -ENTRY(sys32_poll_wrapper) - llgtr %r2,%r2 # struct pollfd * - llgfr %r3,%r3 # unsigned int - lgfr %r4,%r4 # long - jg sys_poll # branch to system call - -ENTRY(sys32_setresgid16_wrapper) - llgfr %r2,%r2 # __kernel_old_gid_emu31_t - llgfr %r3,%r3 # __kernel_old_gid_emu31_t - llgfr %r4,%r4 # __kernel_old_gid_emu31_t - jg sys32_setresgid16 # branch to system call - -ENTRY(sys32_getresgid16_wrapper) - llgtr %r2,%r2 # __kernel_old_gid_emu31_t * - llgtr %r3,%r3 # __kernel_old_gid_emu31_t * - llgtr %r4,%r4 # __kernel_old_gid_emu31_t * - jg sys32_getresgid16 # branch to system call - -ENTRY(sys32_prctl_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - llgfr %r5,%r5 # unsigned long - llgfr %r6,%r6 # unsigned long - jg sys_prctl # branch to system call - -#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue - -ENTRY(sys32_rt_sigaction_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct sigaction_emu31 * - llgtr %r4,%r4 # const struct sigaction_emu31 * - llgfr %r5,%r5 # size_t - jg sys32_rt_sigaction # branch to system call - -ENTRY(sys32_rt_sigprocmask_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # old_sigset_emu31 * - llgtr %r4,%r4 # old_sigset_emu31 * - llgfr %r5,%r5 # size_t - jg sys32_rt_sigprocmask # branch to system call - -ENTRY(sys32_rt_sigpending_wrapper) - llgtr %r2,%r2 # sigset_emu31 * - llgfr %r3,%r3 # size_t - jg sys32_rt_sigpending # branch to system call - -ENTRY(compat_sys_rt_sigtimedwait_wrapper) - llgtr %r2,%r2 # const sigset_emu31_t * - llgtr %r3,%r3 # siginfo_emu31_t * - llgtr %r4,%r4 # const struct compat_timespec * - llgfr %r5,%r5 # size_t - jg compat_sys_rt_sigtimedwait # branch to system call - -ENTRY(sys32_rt_sigqueueinfo_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # siginfo_emu31_t * - jg sys32_rt_sigqueueinfo # branch to system call - -ENTRY(compat_sys_rt_sigsuspend_wrapper) - llgtr %r2,%r2 # compat_sigset_t * - llgfr %r3,%r3 # compat_size_t - jg compat_sys_rt_sigsuspend - -ENTRY(sys32_pread64_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - llgfr %r5,%r5 # u32 - llgfr %r6,%r6 # u32 - jg sys32_pread64 # branch to system call - -ENTRY(sys32_pwrite64_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # size_t - llgfr %r5,%r5 # u32 - llgfr %r6,%r6 # u32 - jg sys32_pwrite64 # branch to system call - -ENTRY(sys32_chown16_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # __kernel_old_uid_emu31_t - llgfr %r4,%r4 # __kernel_old_gid_emu31_t - jg sys32_chown16 # branch to system call - -ENTRY(sys32_getcwd_wrapper) - llgtr %r2,%r2 # char * - llgfr %r3,%r3 # unsigned long - jg sys_getcwd # branch to system call - -ENTRY(sys32_capget_wrapper) - llgtr %r2,%r2 # cap_user_header_t - llgtr %r3,%r3 # cap_user_data_t - jg sys_capget # branch to system call - -ENTRY(sys32_capset_wrapper) - llgtr %r2,%r2 # cap_user_header_t - llgtr %r3,%r3 # const cap_user_data_t - jg sys_capset # branch to system call - -ENTRY(sys32_sigaltstack_wrapper) - llgtr %r2,%r2 # const stack_emu31_t * - llgtr %r3,%r3 # stack_emu31_t * - jg sys32_sigaltstack - -ENTRY(sys32_sendfile_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # __kernel_off_emu31_t * - llgfr %r5,%r5 # size_t - jg sys32_sendfile # branch to system call - -#sys32_vfork_wrapper # done in vfork_glue - -ENTRY(sys32_truncate64_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - jg sys32_truncate64 # branch to system call - -ENTRY(sys32_ftruncate64_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - jg sys32_ftruncate64 # branch to system call - -ENTRY(sys32_lchown_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # uid_t - llgfr %r4,%r4 # gid_t - jg sys_lchown # branch to system call - -#sys32_getuid_wrapper # void -#sys32_getgid_wrapper # void -#sys32_geteuid_wrapper # void -#sys32_getegid_wrapper # void - -ENTRY(sys32_setreuid_wrapper) - llgfr %r2,%r2 # uid_t - llgfr %r3,%r3 # uid_t - jg sys_setreuid # branch to system call - -ENTRY(sys32_setregid_wrapper) - llgfr %r2,%r2 # gid_t - llgfr %r3,%r3 # gid_t - jg sys_setregid # branch to system call - -ENTRY(sys32_getgroups_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # gid_t * - jg sys_getgroups # branch to system call - -ENTRY(sys32_setgroups_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # gid_t * - jg sys_setgroups # branch to system call - -ENTRY(sys32_fchown_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # uid_t - llgfr %r4,%r4 # gid_t - jg sys_fchown # branch to system call - -ENTRY(sys32_setresuid_wrapper) - llgfr %r2,%r2 # uid_t - llgfr %r3,%r3 # uid_t - llgfr %r4,%r4 # uid_t - jg sys_setresuid # branch to system call - -ENTRY(sys32_getresuid_wrapper) - llgtr %r2,%r2 # uid_t * - llgtr %r3,%r3 # uid_t * - llgtr %r4,%r4 # uid_t * - jg sys_getresuid # branch to system call - -ENTRY(sys32_setresgid_wrapper) - llgfr %r2,%r2 # gid_t - llgfr %r3,%r3 # gid_t - llgfr %r4,%r4 # gid_t - jg sys_setresgid # branch to system call - -ENTRY(sys32_getresgid_wrapper) - llgtr %r2,%r2 # gid_t * - llgtr %r3,%r3 # gid_t * - llgtr %r4,%r4 # gid_t * - jg sys_getresgid # branch to system call - -ENTRY(sys32_chown_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # uid_t - llgfr %r4,%r4 # gid_t - jg sys_chown # branch to system call - -ENTRY(sys32_setuid_wrapper) - llgfr %r2,%r2 # uid_t - jg sys_setuid # branch to system call - -ENTRY(sys32_setgid_wrapper) - llgfr %r2,%r2 # gid_t - jg sys_setgid # branch to system call - -ENTRY(sys32_setfsuid_wrapper) - llgfr %r2,%r2 # uid_t - jg sys_setfsuid # branch to system call - -ENTRY(sys32_setfsgid_wrapper) - llgfr %r2,%r2 # gid_t - jg sys_setfsgid # branch to system call - -ENTRY(sys32_pivot_root_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - jg sys_pivot_root # branch to system call - -ENTRY(sys32_mincore_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - llgtr %r4,%r4 # unsigned char * - jg sys_mincore # branch to system call - -ENTRY(sys32_madvise_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # size_t - lgfr %r4,%r4 # int - jg sys_madvise # branch to system call - -ENTRY(sys32_getdents64_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # void * - llgfr %r4,%r4 # unsigned int - jg sys_getdents64 # branch to system call - -ENTRY(compat_sys_fcntl64_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - llgfr %r4,%r4 # unsigned long - jg compat_sys_fcntl64 # branch to system call - -ENTRY(sys32_stat64_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct stat64 * - jg sys32_stat64 # branch to system call - -ENTRY(sys32_lstat64_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct stat64 * - jg sys32_lstat64 # branch to system call - -ENTRY(sys32_stime_wrapper) - llgtr %r2,%r2 # long * - jg compat_sys_stime # branch to system call - -ENTRY(sys32_sysctl_wrapper) - llgtr %r2,%r2 # struct compat_sysctl_args * - jg compat_sys_sysctl - -ENTRY(sys32_fstat64_wrapper) - llgfr %r2,%r2 # unsigned long - llgtr %r3,%r3 # struct stat64 * - jg sys32_fstat64 # branch to system call - -ENTRY(compat_sys_futex_wrapper) - llgtr %r2,%r2 # u32 * - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct compat_timespec * - llgtr %r6,%r6 # u32 * - lgf %r0,164(%r15) # int - stg %r0,160(%r15) - jg compat_sys_futex # branch to system call - -ENTRY(sys32_setxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - lgfr %r6,%r6 # int - jg sys_setxattr - -ENTRY(sys32_lsetxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - lgfr %r6,%r6 # int - jg sys_lsetxattr - -ENTRY(sys32_fsetxattr_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - lgfr %r6,%r6 # int - jg sys_fsetxattr - -ENTRY(sys32_getxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - jg sys_getxattr - -ENTRY(sys32_lgetxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - jg sys_lgetxattr - -ENTRY(sys32_fgetxattr_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # void * - llgfr %r5,%r5 # size_t - jg sys_fgetxattr - -ENTRY(sys32_listxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - jg sys_listxattr - -ENTRY(sys32_llistxattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - jg sys_llistxattr - -ENTRY(sys32_flistxattr_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - jg sys_flistxattr - -ENTRY(sys32_removexattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - jg sys_removexattr - -ENTRY(sys32_lremovexattr_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # char * - jg sys_lremovexattr - -ENTRY(sys32_fremovexattr_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # char * - jg sys_fremovexattr - -ENTRY(sys32_sched_setaffinity_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned int - llgtr %r4,%r4 # unsigned long * - jg compat_sys_sched_setaffinity - -ENTRY(sys32_sched_getaffinity_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned int - llgtr %r4,%r4 # unsigned long * - jg compat_sys_sched_getaffinity - -ENTRY(sys32_exit_group_wrapper) - lgfr %r2,%r2 # int - jg sys_exit_group # branch to system call - -ENTRY(sys32_set_tid_address_wrapper) - llgtr %r2,%r2 # int * - jg sys_set_tid_address # branch to system call - -ENTRY(sys_epoll_create_wrapper) - lgfr %r2,%r2 # int - jg sys_epoll_create # branch to system call - -ENTRY(sys_epoll_ctl_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct epoll_event * - jg sys_epoll_ctl # branch to system call - -ENTRY(sys_epoll_wait_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct epoll_event * - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - jg sys_epoll_wait # branch to system call - -ENTRY(sys32_lookup_dcookie_wrapper) - sllg %r2,%r2,32 # get high word of 64bit dcookie - or %r2,%r3 # get low word of 64bit dcookie - llgtr %r3,%r4 # char * - llgfr %r4,%r5 # size_t - jg sys_lookup_dcookie - -ENTRY(sys32_fadvise64_wrapper) - lgfr %r2,%r2 # int - sllg %r3,%r3,32 # get high word of 64bit loff_t - or %r3,%r4 # get low word of 64bit loff_t - llgfr %r4,%r5 # size_t (unsigned long) - lgfr %r5,%r6 # int - jg sys32_fadvise64 - -ENTRY(sys32_fadvise64_64_wrapper) - llgtr %r2,%r2 # struct fadvise64_64_args * - jg sys32_fadvise64_64 - -ENTRY(sys32_clock_settime_wrapper) - lgfr %r2,%r2 # clockid_t (int) - llgtr %r3,%r3 # struct compat_timespec * - jg compat_sys_clock_settime - -ENTRY(sys32_clock_gettime_wrapper) - lgfr %r2,%r2 # clockid_t (int) - llgtr %r3,%r3 # struct compat_timespec * - jg compat_sys_clock_gettime - -ENTRY(sys32_clock_getres_wrapper) - lgfr %r2,%r2 # clockid_t (int) - llgtr %r3,%r3 # struct compat_timespec * - jg compat_sys_clock_getres - -ENTRY(sys32_clock_nanosleep_wrapper) - lgfr %r2,%r2 # clockid_t (int) - lgfr %r3,%r3 # int - llgtr %r4,%r4 # struct compat_timespec * - llgtr %r5,%r5 # struct compat_timespec * - jg compat_sys_clock_nanosleep - -ENTRY(sys32_timer_create_wrapper) - lgfr %r2,%r2 # timer_t (int) - llgtr %r3,%r3 # struct compat_sigevent * - llgtr %r4,%r4 # timer_t * - jg compat_sys_timer_create - -ENTRY(sys32_timer_settime_wrapper) - lgfr %r2,%r2 # timer_t (int) - lgfr %r3,%r3 # int - llgtr %r4,%r4 # struct compat_itimerspec * - llgtr %r5,%r5 # struct compat_itimerspec * - jg compat_sys_timer_settime - -ENTRY(sys32_timer_gettime_wrapper) - lgfr %r2,%r2 # timer_t (int) - llgtr %r3,%r3 # struct compat_itimerspec * - jg compat_sys_timer_gettime - -ENTRY(sys32_timer_getoverrun_wrapper) - lgfr %r2,%r2 # timer_t (int) - jg sys_timer_getoverrun - -ENTRY(sys32_timer_delete_wrapper) - lgfr %r2,%r2 # timer_t (int) - jg sys_timer_delete - -ENTRY(sys32_io_setup_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # u32 * - jg compat_sys_io_setup - -ENTRY(sys32_io_destroy_wrapper) - llgfr %r2,%r2 # (aio_context_t) u32 - jg sys_io_destroy - -ENTRY(sys32_io_getevents_wrapper) - llgfr %r2,%r2 # (aio_context_t) u32 - lgfr %r3,%r3 # long - lgfr %r4,%r4 # long - llgtr %r5,%r5 # struct io_event * - llgtr %r6,%r6 # struct compat_timespec * - jg compat_sys_io_getevents - -ENTRY(sys32_io_submit_wrapper) - llgfr %r2,%r2 # (aio_context_t) u32 - lgfr %r3,%r3 # long - llgtr %r4,%r4 # struct iocb ** - jg compat_sys_io_submit - -ENTRY(sys32_io_cancel_wrapper) - llgfr %r2,%r2 # (aio_context_t) u32 - llgtr %r3,%r3 # struct iocb * - llgtr %r4,%r4 # struct io_event * - jg sys_io_cancel - -ENTRY(compat_sys_statfs64_wrapper) - llgtr %r2,%r2 # const char * - llgfr %r3,%r3 # compat_size_t - llgtr %r4,%r4 # struct compat_statfs64 * - jg compat_sys_statfs64 - -ENTRY(compat_sys_fstatfs64_wrapper) - llgfr %r2,%r2 # unsigned int fd - llgfr %r3,%r3 # compat_size_t - llgtr %r4,%r4 # struct compat_statfs64 * - jg compat_sys_fstatfs64 - -ENTRY(compat_sys_mq_open_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - llgfr %r4,%r4 # mode_t - llgtr %r5,%r5 # struct compat_mq_attr * - jg compat_sys_mq_open - -ENTRY(sys32_mq_unlink_wrapper) - llgtr %r2,%r2 # const char * - jg sys_mq_unlink - -ENTRY(compat_sys_mq_timedsend_wrapper) - lgfr %r2,%r2 # mqd_t - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # size_t - llgfr %r5,%r5 # unsigned int - llgtr %r6,%r6 # const struct compat_timespec * - jg compat_sys_mq_timedsend - -ENTRY(compat_sys_mq_timedreceive_wrapper) - lgfr %r2,%r2 # mqd_t - llgtr %r3,%r3 # char * - llgfr %r4,%r4 # size_t - llgtr %r5,%r5 # unsigned int * - llgtr %r6,%r6 # const struct compat_timespec * - jg compat_sys_mq_timedreceive - -ENTRY(compat_sys_mq_notify_wrapper) - lgfr %r2,%r2 # mqd_t - llgtr %r3,%r3 # struct compat_sigevent * - jg compat_sys_mq_notify - -ENTRY(compat_sys_mq_getsetattr_wrapper) - lgfr %r2,%r2 # mqd_t - llgtr %r3,%r3 # struct compat_mq_attr * - llgtr %r4,%r4 # struct compat_mq_attr * - jg compat_sys_mq_getsetattr - -ENTRY(compat_sys_add_key_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - llgtr %r4,%r4 # const void * - llgfr %r5,%r5 # size_t - llgfr %r6,%r6 # (key_serial_t) u32 - jg sys_add_key - -ENTRY(compat_sys_request_key_wrapper) - llgtr %r2,%r2 # const char * - llgtr %r3,%r3 # const char * - llgtr %r4,%r4 # const void * - llgfr %r5,%r5 # (key_serial_t) u32 - jg sys_request_key - -ENTRY(sys32_remap_file_pages_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # unsigned long - llgfr %r4,%r4 # unsigned long - llgfr %r5,%r5 # unsigned long - llgfr %r6,%r6 # unsigned long - jg sys_remap_file_pages - -ENTRY(compat_sys_waitid_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # pid_t - llgtr %r4,%r4 # siginfo_emu31_t * - lgfr %r5,%r5 # int - llgtr %r6,%r6 # struct rusage_emu31 * - jg compat_sys_waitid - -ENTRY(compat_sys_kexec_load_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # unsigned long - llgtr %r4,%r4 # struct kexec_segment * - llgfr %r5,%r5 # unsigned long - jg compat_sys_kexec_load - -ENTRY(sys_ioprio_set_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - jg sys_ioprio_set - -ENTRY(sys_ioprio_get_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - jg sys_ioprio_get - -ENTRY(sys_inotify_add_watch_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # u32 - jg sys_inotify_add_watch - -ENTRY(sys_inotify_rm_watch_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # u32 - jg sys_inotify_rm_watch - -ENTRY(compat_sys_openat_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - jg compat_sys_openat - -ENTRY(sys_mkdirat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - jg sys_mkdirat - -ENTRY(sys_mknodat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - llgfr %r5,%r5 # unsigned int - jg sys_mknodat - -ENTRY(sys_fchownat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # uid_t - llgfr %r5,%r5 # gid_t - lgfr %r6,%r6 # int - jg sys_fchownat - -ENTRY(compat_sys_futimesat_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # struct timeval * - jg compat_sys_futimesat - -ENTRY(sys32_fstatat64_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # struct stat64 * - lgfr %r5,%r5 # int - jg sys32_fstatat64 - -ENTRY(sys_unlinkat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - jg sys_unlinkat - -ENTRY(sys_renameat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - llgtr %r5,%r5 # const char * - jg sys_renameat - -ENTRY(sys_linkat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - llgtr %r5,%r5 # const char * - lgfr %r6,%r6 # int - jg sys_linkat - -ENTRY(sys_symlinkat_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - llgtr %r4,%r4 # const char * - jg sys_symlinkat - -ENTRY(sys_readlinkat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - llgtr %r4,%r4 # char * - lgfr %r5,%r5 # int - jg sys_readlinkat - -ENTRY(sys_fchmodat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - llgfr %r4,%r4 # mode_t - jg sys_fchmodat - -ENTRY(sys_faccessat_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - jg sys_faccessat - -ENTRY(compat_sys_pselect6_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # fd_set * - llgtr %r4,%r4 # fd_set * - llgtr %r5,%r5 # fd_set * - llgtr %r6,%r6 # struct timespec * - llgt %r0,164(%r15) # void * - stg %r0,160(%r15) - jg compat_sys_pselect6 - -ENTRY(compat_sys_ppoll_wrapper) - llgtr %r2,%r2 # struct pollfd * - llgfr %r3,%r3 # unsigned int - llgtr %r4,%r4 # struct timespec * - llgtr %r5,%r5 # const sigset_t * - llgfr %r6,%r6 # size_t - jg compat_sys_ppoll - -ENTRY(sys_unshare_wrapper) - llgfr %r2,%r2 # unsigned long - jg sys_unshare - -ENTRY(compat_sys_set_robust_list_wrapper) - llgtr %r2,%r2 # struct compat_robust_list_head * - llgfr %r3,%r3 # size_t - jg compat_sys_set_robust_list - -ENTRY(compat_sys_get_robust_list_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_uptr_t_t * - llgtr %r4,%r4 # compat_size_t * - jg compat_sys_get_robust_list - -ENTRY(sys_splice_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # loff_t * - lgfr %r4,%r4 # int - llgtr %r5,%r5 # loff_t * - llgfr %r6,%r6 # size_t - llgf %r0,164(%r15) # unsigned int - stg %r0,160(%r15) - jg sys_splice - -ENTRY(sys_sync_file_range_wrapper) - lgfr %r2,%r2 # int - sllg %r3,%r3,32 # get high word of 64bit loff_t - or %r3,%r4 # get low word of 64bit loff_t - sllg %r4,%r5,32 # get high word of 64bit loff_t - or %r4,%r6 # get low word of 64bit loff_t - llgf %r5,164(%r15) # unsigned int - jg sys_sync_file_range - -ENTRY(sys_tee_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgfr %r4,%r4 # size_t - llgfr %r5,%r5 # unsigned int - jg sys_tee - -ENTRY(compat_sys_vmsplice_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_iovec * - llgfr %r4,%r4 # unsigned int - llgfr %r5,%r5 # unsigned int - jg compat_sys_vmsplice - -ENTRY(sys_getcpu_wrapper) - llgtr %r2,%r2 # unsigned * - llgtr %r3,%r3 # unsigned * - llgtr %r4,%r4 # struct getcpu_cache * - jg sys_getcpu - -ENTRY(compat_sys_epoll_pwait_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct compat_epoll_event * - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - llgtr %r6,%r6 # compat_sigset_t * - llgf %r0,164(%r15) # compat_size_t - stg %r0,160(%r15) - jg compat_sys_epoll_pwait - -ENTRY(compat_sys_utimes_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # struct compat_timeval * - jg compat_sys_utimes - -ENTRY(compat_sys_utimensat_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # char * - llgtr %r4,%r4 # struct compat_timespec * - lgfr %r5,%r5 # int - jg compat_sys_utimensat - -ENTRY(compat_sys_signalfd_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_sigset_t * - llgfr %r4,%r4 # compat_size_t - jg compat_sys_signalfd - -ENTRY(sys_eventfd_wrapper) - llgfr %r2,%r2 # unsigned int - jg sys_eventfd - -ENTRY(sys_fallocate_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - sllg %r4,%r4,32 # get high word of 64bit loff_t - lr %r4,%r5 # get low word of 64bit loff_t - sllg %r5,%r6,32 # get high word of 64bit loff_t - l %r5,164(%r15) # get low word of 64bit loff_t - jg sys_fallocate - -ENTRY(sys_timerfd_create_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - jg sys_timerfd_create - -ENTRY(compat_sys_timerfd_settime_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # struct compat_itimerspec * - llgtr %r5,%r5 # struct compat_itimerspec * - jg compat_sys_timerfd_settime - -ENTRY(compat_sys_timerfd_gettime_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct compat_itimerspec * - jg compat_sys_timerfd_gettime - -ENTRY(compat_sys_signalfd4_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_sigset_t * - llgfr %r4,%r4 # compat_size_t - lgfr %r5,%r5 # int - jg compat_sys_signalfd4 - -ENTRY(sys_eventfd2_wrapper) - llgfr %r2,%r2 # unsigned int - lgfr %r3,%r3 # int - jg sys_eventfd2 - -ENTRY(sys_inotify_init1_wrapper) - lgfr %r2,%r2 # int - jg sys_inotify_init1 - -ENTRY(sys_pipe2_wrapper) - llgtr %r2,%r2 # u32 * - lgfr %r3,%r3 # int - jg sys_pipe2 # branch to system call - -ENTRY(sys_dup3_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - lgfr %r4,%r4 # int - jg sys_dup3 # branch to system call - -ENTRY(sys_epoll_create1_wrapper) - lgfr %r2,%r2 # int - jg sys_epoll_create1 # branch to system call - -ENTRY(sys32_readahead_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # u32 - llgfr %r4,%r4 # u32 - lgfr %r5,%r5 # s32 - jg sys32_readahead # branch to system call - -ENTRY(sys32_sendfile64_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # compat_loff_t * - lgfr %r5,%r5 # s32 - jg sys32_sendfile64 # branch to system call - -ENTRY(sys_tkill_wrapper) - lgfr %r2,%r2 # pid_t - lgfr %r3,%r3 # int - jg sys_tkill # branch to system call - -ENTRY(sys_tgkill_wrapper) - lgfr %r2,%r2 # pid_t - lgfr %r3,%r3 # pid_t - lgfr %r4,%r4 # int - jg sys_tgkill # branch to system call - -ENTRY(compat_sys_keyctl_wrapper) - llgfr %r2,%r2 # u32 - llgfr %r3,%r3 # u32 - llgfr %r4,%r4 # u32 - llgfr %r5,%r5 # u32 - llgfr %r6,%r6 # u32 - jg compat_sys_keyctl # branch to system call - -ENTRY(compat_sys_preadv_wrapper) - llgfr %r2,%r2 # unsigned long - llgtr %r3,%r3 # compat_iovec * - llgfr %r4,%r4 # unsigned long - llgfr %r5,%r5 # u32 - llgfr %r6,%r6 # u32 - jg compat_sys_preadv # branch to system call - -ENTRY(compat_sys_pwritev_wrapper) - llgfr %r2,%r2 # unsigned long - llgtr %r3,%r3 # compat_iovec * - llgfr %r4,%r4 # unsigned long - llgfr %r5,%r5 # u32 - llgfr %r6,%r6 # u32 - jg compat_sys_pwritev # branch to system call - -ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper) - lgfr %r2,%r2 # compat_pid_t - lgfr %r3,%r3 # compat_pid_t - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct compat_siginfo * - jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - -ENTRY(sys_perf_event_open_wrapper) - llgtr %r2,%r2 # const struct perf_event_attr * - lgfr %r3,%r3 # pid_t - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - llgfr %r6,%r6 # unsigned long - jg sys_perf_event_open # branch to system call - -ENTRY(sys_clone_wrapper) - llgfr %r2,%r2 # unsigned long - llgfr %r3,%r3 # unsigned long - llgtr %r4,%r4 # int * - llgtr %r5,%r5 # int * - jg sys_clone # branch to system call - -ENTRY(sys32_execve_wrapper) - llgtr %r2,%r2 # char * - llgtr %r3,%r3 # compat_uptr_t * - llgtr %r4,%r4 # compat_uptr_t * - jg sys32_execve # branch to system call - -ENTRY(sys_fanotify_init_wrapper) - llgfr %r2,%r2 # unsigned int - llgfr %r3,%r3 # unsigned int - jg sys_fanotify_init # branch to system call - -ENTRY(sys_fanotify_mark_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned int - sllg %r4,%r4,32 # get high word of 64bit mask - lr %r4,%r5 # get low word of 64bit mask - llgfr %r5,%r6 # unsigned int - llgt %r6,164(%r15) # char * - jg sys_fanotify_mark # branch to system call - -ENTRY(sys_prlimit64_wrapper) - lgfr %r2,%r2 # pid_t - llgfr %r3,%r3 # unsigned int - llgtr %r4,%r4 # const struct rlimit64 __user * - llgtr %r5,%r5 # struct rlimit64 __user * - jg sys_prlimit64 # branch to system call - -ENTRY(sys_name_to_handle_at_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char __user * - llgtr %r4,%r4 # struct file_handle __user * - llgtr %r5,%r5 # int __user * - lgfr %r6,%r6 # int - jg sys_name_to_handle_at - -ENTRY(compat_sys_open_by_handle_at_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct file_handle __user * - lgfr %r4,%r4 # int - jg compat_sys_open_by_handle_at - -ENTRY(compat_sys_clock_adjtime_wrapper) - lgfr %r2,%r2 # clockid_t (int) - llgtr %r3,%r3 # struct compat_timex __user * - jg compat_sys_clock_adjtime - -ENTRY(sys_syncfs_wrapper) - lgfr %r2,%r2 # int - jg sys_syncfs - -ENTRY(sys_setns_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - jg sys_setns - -ENTRY(compat_sys_process_vm_readv_wrapper) - lgfr %r2,%r2 # compat_pid_t - llgtr %r3,%r3 # struct compat_iovec __user * - llgfr %r4,%r4 # unsigned long - llgtr %r5,%r5 # struct compat_iovec __user * - llgfr %r6,%r6 # unsigned long - llgf %r0,164(%r15) # unsigned long - stg %r0,160(%r15) - jg sys_process_vm_readv - -ENTRY(compat_sys_process_vm_writev_wrapper) - lgfr %r2,%r2 # compat_pid_t - llgtr %r3,%r3 # struct compat_iovec __user * - llgfr %r4,%r4 # unsigned long - llgtr %r5,%r5 # struct compat_iovec __user * - llgfr %r6,%r6 # unsigned long - llgf %r0,164(%r15) # unsigned long - stg %r0,160(%r15) - jg sys_process_vm_writev diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c new file mode 100644 index 00000000000..45cdb37aa6f --- /dev/null +++ b/arch/s390/kernel/compat_wrapper.c @@ -0,0 +1,216 @@ +/* + * Compat system call wrappers. + * + * Copyright IBM Corp. 2014 + */ + +#include <linux/syscalls.h> +#include <linux/compat.h> +#include "entry.h" + +#define COMPAT_SYSCALL_WRAP1(name, ...) \ + COMPAT_SYSCALL_WRAPx(1, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_WRAP2(name, ...) \ + COMPAT_SYSCALL_WRAPx(2, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_WRAP3(name, ...) \ + COMPAT_SYSCALL_WRAPx(3, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_WRAP4(name, ...) \ + COMPAT_SYSCALL_WRAPx(4, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_WRAP5(name, ...) \ + COMPAT_SYSCALL_WRAPx(5, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_WRAP6(name, ...) \ + COMPAT_SYSCALL_WRAPx(6, _##name, __VA_ARGS__) + +#define __SC_COMPAT_TYPE(t, a) \ + __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a + +#define __SC_COMPAT_CAST(t, a) \ +({ \ + long __ReS = a; \ + \ + BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ + !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t)); \ + if (__TYPE_IS_L(t)) \ + __ReS = (s32)a; \ + if (__TYPE_IS_UL(t)) \ + __ReS = (u32)a; \ + if (__TYPE_IS_PTR(t)) \ + __ReS = a & 0x7fffffff; \ + (t)__ReS; \ +}) + +/* + * The COMPAT_SYSCALL_WRAP macro generates system call wrappers to be used by + * compat tasks. These wrappers will only be used for system calls where only + * the system call arguments need sign or zero extension or zeroing of the upper + * 33 bits of pointers. + * Note: since the wrapper function will afterwards call a system call which + * again performs zero and sign extension for all system call arguments with + * a size of less than eight bytes, these compat wrappers only touch those + * system call arguments with a size of eight bytes ((unsigned) long and + * pointers). Zero and sign extension for e.g. int parameters will be done by + * the regular system call wrappers. + */ +#define COMPAT_SYSCALL_WRAPx(x, name, ...) \ + asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ + asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ + { \ + return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ + } + +COMPAT_SYSCALL_WRAP1(exit, int, error_code); +COMPAT_SYSCALL_WRAP1(close, unsigned int, fd); +COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); +COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); +COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); +COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename); +COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); +COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); +COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); +COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds); +COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); +COMPAT_SYSCALL_WRAP1(nice, int, increment); +COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig); +COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); +COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); +COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); +COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes); +COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); +COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); +COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); +COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); +COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); +COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid); +COMPAT_SYSCALL_WRAP1(umask, int, mask); +COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); +COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd); +COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); +COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); +COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); +COMPAT_SYSCALL_WRAP3(readlink, const char __user *, path, char __user *, buf, int, bufsiz); +COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library); +COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); +COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); +COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); +COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode); +COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who); +COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval); +COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); +COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); +COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd); +COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); +COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); +COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); +COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); +COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); +COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); +COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid); +COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd); +COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); +COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); +COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality); +COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); +COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd); +COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); +COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid); +COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd); +COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); +COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); +COMPAT_SYSCALL_WRAP1(mlockall, int, flags); +COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); +COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); +COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); +COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid); +COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy); +COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy); +COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); +COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); +COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); +COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size); +COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); +COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); +COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); +COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid); +COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid); +COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); +COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); +COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group); +COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid); +COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); +COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid); +COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); +COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); +COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid); +COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid); +COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid); +COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid); +COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); +COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); +COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); +COMPAT_SYSCALL_WRAP5(setxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags); +COMPAT_SYSCALL_WRAP5(lsetxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags); +COMPAT_SYSCALL_WRAP5(fsetxattr, int, fd, const char __user *, name, const void __user *, value, size_t, size, int, flags); +COMPAT_SYSCALL_WRAP3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count); +COMPAT_SYSCALL_WRAP4(getxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size); +COMPAT_SYSCALL_WRAP4(lgetxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size); +COMPAT_SYSCALL_WRAP4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size); +COMPAT_SYSCALL_WRAP3(listxattr, const char __user *, path, char __user *, list, size_t, size); +COMPAT_SYSCALL_WRAP3(llistxattr, const char __user *, path, char __user *, list, size_t, size); +COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size); +COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); +COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); +COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); +COMPAT_SYSCALL_WRAP1(exit_group, int, error_code); +COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); +COMPAT_SYSCALL_WRAP1(epoll_create, int, size); +COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); +COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); +COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id); +COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id); +COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); +COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); +COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); +COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); +COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); +COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); +COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio); +COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who); +COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); +COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd); +COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); +COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); +COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); +COMPAT_SYSCALL_WRAP3(unlinkat, int, dfd, const char __user *, pathname, int, flag); +COMPAT_SYSCALL_WRAP4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname); +COMPAT_SYSCALL_WRAP5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags); +COMPAT_SYSCALL_WRAP3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname); +COMPAT_SYSCALL_WRAP4(readlinkat, int, dfd, const char __user *, path, char __user *, buf, int, bufsiz); +COMPAT_SYSCALL_WRAP3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode); +COMPAT_SYSCALL_WRAP3(faccessat, int, dfd, const char __user *, filename, int, mode); +COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags); +COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); +COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); +COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); +COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count); +COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags); +COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags); +COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags); +COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); +COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags); +COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags); +COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig); +COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig); +COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); +COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val); +COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags); +COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); +COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); +COMPAT_SYSCALL_WRAP1(syncfs, int, fd); +COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype); +COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum); +COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); +COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); +COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); +COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); +COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 3e8b8816f30..d7b0c4d2788 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/cpcmd.c - * * S390 version - * Copyright IBM Corp. 1999,2007 + * Copyright IBM Corp. 1999, 2007 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Christian Borntraeger (cborntra@de.ibm.com), */ @@ -18,7 +16,6 @@ #include <linux/string.h> #include <asm/ebcdic.h> #include <asm/cpcmd.h> -#include <asm/system.h> #include <asm/io.h> static DEFINE_SPINLOCK(cpcmd_lock); diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c deleted file mode 100644 index 8cc7c9fa64f..00000000000 --- a/arch/s390/kernel/crash.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * arch/s390/kernel/crash.c - * - * (C) Copyright IBM Corp. 2005 - * - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> - * - */ - -#include <linux/threads.h> -#include <linux/kexec.h> -#include <linux/reboot.h> - -void machine_crash_shutdown(struct pt_regs *regs) -{ -} diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 39f8fd4438f..a3b9150e680 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -11,59 +11,262 @@ #include <linux/module.h> #include <linux/gfp.h> #include <linux/slab.h> -#include <linux/crash_dump.h> #include <linux/bootmem.h> #include <linux/elf.h> +#include <linux/memblock.h> +#include <asm/os_info.h> +#include <asm/elf.h> #include <asm/ipl.h> +#include <asm/sclp.h> #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) +static struct memblock_region oldmem_region; + +static struct memblock_type oldmem_type = { + .cnt = 1, + .max = 1, + .total_size = 0, + .regions = &oldmem_region, +}; + +#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ + for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \ + &oldmem_type, p_start, \ + p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range(&i, nid, &memblock.physmem, \ + &oldmem_type, \ + p_start, p_end, p_nid)) + +struct dump_save_areas dump_save_areas; + /* - * Copy one page from "oldmem" + * Allocate and add a save area for a CPU + */ +struct save_area *dump_save_area_create(int cpu) +{ + struct save_area **save_areas, *save_area; + + save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); + if (!save_area) + return NULL; + if (cpu + 1 > dump_save_areas.count) { + dump_save_areas.count = cpu + 1; + save_areas = krealloc(dump_save_areas.areas, + dump_save_areas.count * sizeof(void *), + GFP_KERNEL | __GFP_ZERO); + if (!save_areas) { + kfree(save_area); + return NULL; + } + dump_save_areas.areas = save_areas; + } + dump_save_areas.areas[cpu] = save_area; + return save_area; +} + +/* + * Return physical address for virtual address + */ +static inline void *load_real_addr(void *addr) +{ + unsigned long real_addr; + + asm volatile( + " lra %0,0(%1)\n" + " jz 0f\n" + " la %0,0\n" + "0:" + : "=a" (real_addr) : "a" (addr) : "cc"); + return (void *)real_addr; +} + +/* + * Copy real to virtual or real memory + */ +static int copy_from_realmem(void *dest, void *src, size_t count) +{ + unsigned long size; + + if (!count) + return 0; + if (!is_vmalloc_or_module_addr(dest)) + return memcpy_real(dest, src, count); + do { + size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK)); + if (memcpy_real(load_real_addr(dest), src, size)) + return -EFAULT; + count -= size; + dest += size; + src += size; + } while (count); + return 0; +} + +/* + * Pointer to ELF header in new kernel + */ +static void *elfcorehdr_newmem; + +/* + * Copy one page from zfcpdump "oldmem" + * + * For pages below HSA size memory from the HSA is copied. Otherwise + * real memory copy is used. + */ +static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, + unsigned long src, int userbuf) +{ + int rc; + + if (src < sclp_get_hsa_size()) { + rc = memcpy_hsa(buf, src, csize, userbuf); + } else { + if (userbuf) + rc = copy_to_user_real((void __force __user *) buf, + (void *) src, csize); + else + rc = memcpy_real(buf, (void *) src, csize); + } + return rc ? rc : csize; +} + +/* + * Copy one page from kdump "oldmem" * * For the kdump reserved memory this functions performs a swap operation: * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] */ -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, - size_t csize, unsigned long offset, int userbuf) -{ - unsigned long src; +static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, + unsigned long src, int userbuf) - if (!csize) - return 0; +{ + int rc; - src = (pfn << PAGE_SHIFT) + offset; if (src < OLDMEM_SIZE) src += OLDMEM_BASE; else if (src > OLDMEM_BASE && src < OLDMEM_BASE + OLDMEM_SIZE) src -= OLDMEM_BASE; if (userbuf) - copy_to_user_real((void __force __user *) buf, (void *) src, - csize); + rc = copy_to_user_real((void __force __user *) buf, + (void *) src, csize); + else + rc = copy_from_realmem(buf, (void *) src, csize); + return (rc == 0) ? rc : csize; +} + +/* + * Copy one page from "oldmem" + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, + unsigned long offset, int userbuf) +{ + unsigned long src; + + if (!csize) + return 0; + src = (pfn << PAGE_SHIFT) + offset; + if (OLDMEM_BASE) + return copy_oldmem_page_kdump(buf, csize, src, userbuf); + else + return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf); +} + +/* + * Remap "oldmem" for kdump + * + * For the kdump reserved memory this functions performs a swap operation: + * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] + */ +static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long size_old; + int rc; + + if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) { + size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT)); + rc = remap_pfn_range(vma, from, + pfn + (OLDMEM_BASE >> PAGE_SHIFT), + size_old, prot); + if (rc || size == size_old) + return rc; + size -= size_old; + from += size_old; + pfn += size_old >> PAGE_SHIFT; + } + return remap_pfn_range(vma, from, pfn, size, prot); +} + +/* + * Remap "oldmem" for zfcpdump + * + * We only map available memory above HSA size. Memory below HSA size + * is read on demand using the copy_oldmem_page() function. + */ +static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, + unsigned long from, + unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long hsa_end = sclp_get_hsa_size(); + unsigned long size_hsa; + + if (pfn < hsa_end >> PAGE_SHIFT) { + size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT)); + if (size == size_hsa) + return 0; + size -= size_hsa; + from += size_hsa; + pfn += size_hsa >> PAGE_SHIFT; + } + return remap_pfn_range(vma, from, pfn, size, prot); +} + +/* + * Remap "oldmem" for kdump or zfcpdump + */ +int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + if (OLDMEM_BASE) + return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot); else - memcpy_real(buf, (void *) src, csize); - return csize; + return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size, + prot); } /* * Copy memory from old kernel */ -static int copy_from_oldmem(void *dest, void *src, size_t count) +int copy_from_oldmem(void *dest, void *src, size_t count) { unsigned long copied = 0; int rc; - if ((unsigned long) src < OLDMEM_SIZE) { - copied = min(count, OLDMEM_SIZE - (unsigned long) src); - rc = memcpy_real(dest, src + OLDMEM_BASE, copied); - if (rc) - return rc; + if (OLDMEM_BASE) { + if ((unsigned long) src < OLDMEM_SIZE) { + copied = min(count, OLDMEM_SIZE - (unsigned long) src); + rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied); + if (rc) + return rc; + } + } else { + unsigned long hsa_end = sclp_get_hsa_size(); + if ((unsigned long) src < hsa_end) { + copied = min(count, hsa_end - (unsigned long) src); + rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); + if (rc) + return rc; + } } - return memcpy_real(dest + copied, src + copied, count - copied); + return copy_from_realmem(dest + copied, src + copied, count - copied); } /* @@ -80,19 +283,6 @@ static void *kzalloc_panic(int len) } /* - * Get memory layout and create hole for oldmem - */ -static struct mem_chunk *get_memory_layout(void) -{ - struct mem_chunk *chunk_array; - - chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); - detect_memory_layout(chunk_array); - create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); - return chunk_array; -} - -/* * Initialize ELF note */ static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, @@ -225,28 +415,44 @@ static void *nt_prpsinfo(void *ptr) } /* - * Initialize vmcoreinfo note (new kernel) + * Get vmcoreinfo using lowcore->vmcore_info (new kernel) */ -static void *nt_vmcoreinfo(void *ptr) +static void *get_vmcoreinfo_old(unsigned long *size) { char nt_name[11], *vmcoreinfo; Elf64_Nhdr note; void *addr; if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) - return ptr; + return NULL; memset(nt_name, 0, sizeof(nt_name)); if (copy_from_oldmem(¬e, addr, sizeof(note))) - return ptr; + return NULL; if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) - return ptr; + return NULL; if (strcmp(nt_name, "VMCOREINFO") != 0) - return ptr; - vmcoreinfo = kzalloc_panic(note.n_descsz + 1); + return NULL; + vmcoreinfo = kzalloc_panic(note.n_descsz); if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) + return NULL; + *size = note.n_descsz; + return vmcoreinfo; +} + +/* + * Initialize vmcoreinfo note (new kernel) + */ +static void *nt_vmcoreinfo(void *ptr) +{ + unsigned long size; + void *vmcoreinfo; + + vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size); + if (!vmcoreinfo) + vmcoreinfo = get_vmcoreinfo_old(&size); + if (!vmcoreinfo) return ptr; - vmcoreinfo[note.n_descsz + 1] = 0; - return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO"); + return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); } /* @@ -277,8 +483,8 @@ static int get_cpu_cnt(void) { int i, cpus = 0; - for (i = 0; zfcpdump_save_areas[i]; i++) { - if (zfcpdump_save_areas[i]->pref_reg == 0) + for (i = 0; i < dump_save_areas.count; i++) { + if (dump_save_areas.areas[i]->pref_reg == 0) continue; cpus++; } @@ -290,60 +496,33 @@ static int get_cpu_cnt(void) */ static int get_mem_chunk_cnt(void) { - struct mem_chunk *chunk_array, *mem_chunk; - int i, cnt = 0; + int cnt = 0; + u64 idx; - chunk_array = get_memory_layout(); - for (i = 0; i < MEMORY_CHUNKS; i++) { - mem_chunk = &chunk_array[i]; - if (chunk_array[i].type != CHUNK_READ_WRITE && - chunk_array[i].type != CHUNK_READ_ONLY) - continue; - if (mem_chunk->size == 0) - continue; + for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) cnt++; - } - kfree(chunk_array); return cnt; } /* - * Relocate pointer in order to allow vmcore code access the data - */ -static inline unsigned long relocate(unsigned long addr) -{ - return OLDMEM_BASE + addr; -} - -/* * Initialize ELF loads (new kernel) */ -static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) +static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) { - struct mem_chunk *chunk_array, *mem_chunk; - int i; + phys_addr_t start, end; + u64 idx; - chunk_array = get_memory_layout(); - for (i = 0; i < MEMORY_CHUNKS; i++) { - mem_chunk = &chunk_array[i]; - if (mem_chunk->size == 0) - break; - if (chunk_array[i].type != CHUNK_READ_WRITE && - chunk_array[i].type != CHUNK_READ_ONLY) - continue; - else - phdr->p_filesz = mem_chunk->size; + for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { + phdr->p_filesz = end - start; phdr->p_type = PT_LOAD; - phdr->p_offset = mem_chunk->addr; - phdr->p_vaddr = mem_chunk->addr; - phdr->p_paddr = mem_chunk->addr; - phdr->p_memsz = mem_chunk->size; + phdr->p_offset = start; + phdr->p_vaddr = start; + phdr->p_paddr = start; + phdr->p_memsz = end - start; phdr->p_flags = PF_R | PF_W | PF_X; phdr->p_align = PAGE_SIZE; phdr++; } - kfree(chunk_array); - return i; } /* @@ -357,8 +536,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) ptr = nt_prpsinfo(ptr); - for (i = 0; zfcpdump_save_areas[i]; i++) { - sa = zfcpdump_save_areas[i]; + for (i = 0; i < dump_save_areas.count; i++) { + sa = dump_save_areas.areas[i]; if (sa->pref_reg == 0) continue; ptr = fill_cpu_elf_notes(ptr, sa); @@ -366,7 +545,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) ptr = nt_vmcoreinfo(ptr); memset(phdr, 0, sizeof(*phdr)); phdr->p_type = PT_NOTE; - phdr->p_offset = relocate(notes_offset); + phdr->p_offset = notes_offset; phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start); phdr->p_memsz = phdr->p_filesz; return ptr; @@ -375,7 +554,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) /* * Create ELF core header (new kernel) */ -static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) +int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) { Elf64_Phdr *phdr_notes, *phdr_loads; int mem_chunk_cnt; @@ -383,6 +562,23 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) u32 alloc_size; u64 hdr_off; + /* If we are not in kdump or zfcpdump mode return */ + if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP) + return 0; + /* If elfcorehdr= has been passed via cmdline, we use that one */ + if (elfcorehdr_addr != ELFCORE_ADDR_MAX) + return 0; + /* If we cannot get HSA size for zfcpdump return error */ + if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) + return -ENODEV; + + /* For kdump, exclude previous crashkernel memory */ + if (OLDMEM_BASE) { + oldmem_region.base = OLDMEM_BASE; + oldmem_region.size = OLDMEM_SIZE; + oldmem_type.total_size = OLDMEM_SIZE; + } + mem_chunk_cnt = get_mem_chunk_cnt(); alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + @@ -400,27 +596,52 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); /* Init loads */ hdr_off = PTR_DIFF(ptr, hdr); - loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off); - *elfcorebuf_sz = hdr_off; - *elfcorebuf = (void *) relocate((unsigned long) hdr); - BUG_ON(*elfcorebuf_sz > alloc_size); + loads_init(phdr_loads, hdr_off); + *addr = (unsigned long long) hdr; + elfcorehdr_newmem = hdr; + *size = (unsigned long long) hdr_off; + BUG_ON(elfcorehdr_size > alloc_size); + return 0; } /* - * Create kdump ELF core header in new kernel, if it has not been passed via - * the "elfcorehdr" kernel parameter + * Free ELF core header (new kernel) */ -static int setup_kdump_elfcorehdr(void) +void elfcorehdr_free(unsigned long long addr) { - size_t elfcorebuf_sz; - char *elfcorebuf; + if (!elfcorehdr_newmem) + return; + kfree((void *)(unsigned long)addr); +} - if (!OLDMEM_BASE || is_kdump_kernel()) - return -EINVAL; - s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz); - elfcorehdr_addr = (unsigned long long) elfcorebuf; - elfcorehdr_size = elfcorebuf_sz; - return 0; +/* + * Read from ELF header + */ +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + void *src = (void *)(unsigned long)*ppos; + + src = elfcorehdr_newmem ? src : src - OLDMEM_BASE; + memcpy(buf, src, count); + *ppos += count; + return count; } -subsys_initcall(setup_kdump_elfcorehdr); +/* + * Read from ELF notes data + */ +ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) +{ + void *src = (void *)(unsigned long)*ppos; + int rc; + + if (elfcorehdr_newmem) { + memcpy(buf, src, count); + } else { + rc = copy_from_oldmem(buf, src, count); + if (rc) + return rc; + } + *ppos += count; + return count; +} diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6848828b962..ee8390da6ea 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1,9 +1,8 @@ /* - * arch/s390/kernel/debug.c * S/390 debug facility * - * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999, 2012 + * * Author(s): Michael Holzheu (holzheu@de.ibm.com), * Holger Smolinski (Holger.Smolinski@de.ibm.com) * @@ -111,6 +110,7 @@ struct debug_view debug_raw_view = { NULL, NULL }; +EXPORT_SYMBOL(debug_raw_view); struct debug_view debug_hex_ascii_view = { "hex_ascii", @@ -120,6 +120,7 @@ struct debug_view debug_hex_ascii_view = { NULL, NULL }; +EXPORT_SYMBOL(debug_hex_ascii_view); static struct debug_view debug_level_view = { "level", @@ -156,6 +157,7 @@ struct debug_view debug_sprintf_view = { NULL, NULL }; +EXPORT_SYMBOL(debug_sprintf_view); /* used by dump analysis tools to determine version of debug feature */ static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; @@ -167,6 +169,7 @@ static debug_info_t *debug_area_last = NULL; static DEFINE_MUTEX(debug_mutex); static int initialized; +static int debug_critical; static const struct file_operations debug_file_ops = { .owner = THIS_MODULE, @@ -608,7 +611,7 @@ debug_open(struct inode *inode, struct file *file) debug_info_t *debug_info, *debug_info_snapshot; mutex_lock(&debug_mutex); - debug_info = file->f_path.dentry->d_inode->i_private; + debug_info = file_inode(file)->i_private; /* find debug view */ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (!debug_info->views[i]) @@ -730,6 +733,7 @@ debug_info_t *debug_register(const char *name, int pages_per_area, return debug_register_mode(name, pages_per_area, nr_areas, buf_size, S_IRUSR | S_IWUSR, 0, 0); } +EXPORT_SYMBOL(debug_register); /* * debug_unregister: @@ -748,6 +752,7 @@ debug_unregister(debug_info_t * id) out: return; } +EXPORT_SYMBOL(debug_unregister); /* * debug_set_size: @@ -810,7 +815,7 @@ debug_set_level(debug_info_t* id, int new_level) } spin_unlock_irqrestore(&id->lock,flags); } - +EXPORT_SYMBOL(debug_set_level); /* * proceed_active_entry: @@ -862,7 +867,7 @@ static inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, int exception) { - active->id.stck = get_clock(); + active->id.stck = get_tod_clock_fast(); active->id.fields.cpuid = smp_processor_id(); active->caller = __builtin_return_address(0); active->id.fields.exception = exception; @@ -884,7 +889,7 @@ static int debug_active=1; * if debug_active is already off */ static int -s390dbf_procactive(ctl_table *table, int write, +s390dbf_procactive(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (!write || debug_stoppable || !debug_active) @@ -930,7 +935,12 @@ debug_stop_all(void) if (debug_stoppable) debug_active = 0; } +EXPORT_SYMBOL(debug_stop_all); +void debug_set_critical(void) +{ + debug_critical = 1; +} /* * debug_event_common: @@ -945,7 +955,11 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len) if (!debug_active || !id->areas) return NULL; - spin_lock_irqsave(&id->lock, flags); + if (debug_critical) { + if (!spin_trylock_irqsave(&id->lock, flags)) + return NULL; + } else + spin_lock_irqsave(&id->lock, flags); active = get_active_entry(id); memset(DEBUG_DATA(active), 0, id->buf_size); memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); @@ -954,6 +968,7 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len) return active; } +EXPORT_SYMBOL(debug_event_common); /* * debug_exception_common: @@ -968,7 +983,11 @@ debug_entry_t if (!debug_active || !id->areas) return NULL; - spin_lock_irqsave(&id->lock, flags); + if (debug_critical) { + if (!spin_trylock_irqsave(&id->lock, flags)) + return NULL; + } else + spin_lock_irqsave(&id->lock, flags); active = get_active_entry(id); memset(DEBUG_DATA(active), 0, id->buf_size); memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); @@ -977,6 +996,7 @@ debug_entry_t return active; } +EXPORT_SYMBOL(debug_exception_common); /* * counts arguments in format string for sprintf view @@ -1013,7 +1033,11 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) return NULL; numargs=debug_count_numargs(string); - spin_lock_irqsave(&id->lock, flags); + if (debug_critical) { + if (!spin_trylock_irqsave(&id->lock, flags)) + return NULL; + } else + spin_lock_irqsave(&id->lock, flags); active = get_active_entry(id); curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active); va_start(ap,string); @@ -1026,6 +1050,7 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) return active; } +EXPORT_SYMBOL(debug_sprintf_event); /* * debug_sprintf_exception: @@ -1047,7 +1072,11 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) numargs=debug_count_numargs(string); - spin_lock_irqsave(&id->lock, flags); + if (debug_critical) { + if (!spin_trylock_irqsave(&id->lock, flags)) + return NULL; + } else + spin_lock_irqsave(&id->lock, flags); active = get_active_entry(id); curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active); va_start(ap,string); @@ -1060,25 +1089,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) return active; } - -/* - * debug_init: - * - is called exactly once to initialize the debug feature - */ - -static int -__init debug_init(void) -{ - int rc = 0; - - s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); - mutex_lock(&debug_mutex); - debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); - initialized = 1; - mutex_unlock(&debug_mutex); - - return rc; -} +EXPORT_SYMBOL(debug_sprintf_exception); /* * debug_register_view: @@ -1116,16 +1127,18 @@ debug_register_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) { pr_err("Registering view %s/%s would exceed the maximum " "number of views %i\n", id->name, view->name, i); - debugfs_remove(pde); rc = -1; } else { id->views[i] = view; id->debugfs_entries[i] = pde; } spin_unlock_irqrestore(&id->lock, flags); + if (rc) + debugfs_remove(pde); out: return rc; } +EXPORT_SYMBOL(debug_register_view); /* * debug_unregister_view: @@ -1134,9 +1147,9 @@ out: int debug_unregister_view(debug_info_t * id, struct debug_view *view) { - int rc = 0; - int i; + struct dentry *dentry = NULL; unsigned long flags; + int i, rc = 0; if (!id) goto out; @@ -1148,13 +1161,16 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) rc = -1; else { - debugfs_remove(id->debugfs_entries[i]); + dentry = id->debugfs_entries[i]; id->views[i] = NULL; + id->debugfs_entries[i] = NULL; } spin_unlock_irqrestore(&id->lock, flags); + debugfs_remove(dentry); out: return rc; } +EXPORT_SYMBOL(debug_unregister_view); static inline char * debug_get_user_string(const char __user *user_buf, size_t user_len) @@ -1428,10 +1444,10 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view, rc += sprintf(out_buf + rc, "| "); for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; - if (!isprint(c)) - rc += sprintf(out_buf + rc, "."); - else + if (isascii(c) && isprint(c)) rc += sprintf(out_buf + rc, "%c", c); + else + rc += sprintf(out_buf + rc, "."); } rc += sprintf(out_buf + rc, "\n"); return rc; @@ -1464,6 +1480,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, except_str, entry->id.fields.cpuid, (void *) caller); return rc; } +EXPORT_SYMBOL(debug_dflt_header_fn); /* * prints debug data sprintf-formated: @@ -1512,33 +1529,16 @@ out: } /* - * clean up module + * debug_init: + * - is called exactly once to initialize the debug feature */ -static void __exit debug_exit(void) +static int __init debug_init(void) { - debugfs_remove(debug_debugfs_root_entry); - unregister_sysctl_table(s390dbf_sysctl_header); - return; + s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); + mutex_lock(&debug_mutex); + debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL); + initialized = 1; + mutex_unlock(&debug_mutex); + return 0; } - -/* - * module definitions - */ postcore_initcall(debug_init); -module_exit(debug_exit); -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(debug_register); -EXPORT_SYMBOL(debug_unregister); -EXPORT_SYMBOL(debug_set_level); -EXPORT_SYMBOL(debug_stop_all); -EXPORT_SYMBOL(debug_register_view); -EXPORT_SYMBOL(debug_unregister_view); -EXPORT_SYMBOL(debug_event_common); -EXPORT_SYMBOL(debug_exception_common); -EXPORT_SYMBOL(debug_hex_ascii_view); -EXPORT_SYMBOL(debug_raw_view); -EXPORT_SYMBOL(debug_dflt_header_fn); -EXPORT_SYMBOL(debug_sprintf_view); -EXPORT_SYMBOL(debug_sprintf_exception); -EXPORT_SYMBOL(debug_sprintf_event); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index e2f847599c8..993efe6a887 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/dis.c - * * Disassemble s390 instructions. * * Copyright IBM Corp. 2007 @@ -24,8 +22,8 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> -#include <asm/system.h> #include <asm/uaccess.h> +#include <asm/dis.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/mathemu.h> @@ -40,17 +38,6 @@ #define ONELONG "%016lx: " #endif /* CONFIG_64BIT */ -#define OPERAND_GPR 0x1 /* Operand printed as %rx */ -#define OPERAND_FPR 0x2 /* Operand printed as %fx */ -#define OPERAND_AR 0x4 /* Operand printed as %ax */ -#define OPERAND_CR 0x8 /* Operand printed as %cx */ -#define OPERAND_DISP 0x10 /* Operand printed as displacement */ -#define OPERAND_BASE 0x20 /* Operand printed as base register */ -#define OPERAND_INDEX 0x40 /* Operand printed as index register */ -#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ -#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ -#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ - enum { UNUSED, /* Indicates the end of the operand list */ R_8, /* GPR starting at position 8 */ @@ -86,22 +73,29 @@ enum { U4_12, /* 4 bit unsigned value starting at 12 */ U4_16, /* 4 bit unsigned value starting at 16 */ U4_20, /* 4 bit unsigned value starting at 20 */ + U4_24, /* 4 bit unsigned value starting at 24 */ + U4_28, /* 4 bit unsigned value starting at 28 */ U4_32, /* 4 bit unsigned value starting at 32 */ + U4_36, /* 4 bit unsigned value starting at 36 */ U8_8, /* 8 bit unsigned value starting at 8 */ U8_16, /* 8 bit unsigned value starting at 16 */ U8_24, /* 8 bit unsigned value starting at 24 */ U8_32, /* 8 bit unsigned value starting at 32 */ I8_8, /* 8 bit signed value starting at 8 */ I8_32, /* 8 bit signed value starting at 32 */ + J12_12, /* PC relative offset at 12 */ I16_16, /* 16 bit signed value starting at 16 */ I16_32, /* 32 bit signed value starting at 16 */ U16_16, /* 16 bit unsigned value starting at 16 */ U16_32, /* 32 bit unsigned value starting at 16 */ J16_16, /* PC relative jump offset at 16 */ + J16_32, /* PC relative offset at 16 */ + I24_24, /* 24 bit signed value starting at 24 */ J32_16, /* PC relative long offset at 16 */ I32_16, /* 32 bit signed value starting at 16 */ U32_16, /* 32 bit unsigned value starting at 16 */ M_16, /* 4 bit optional mask starting at 16 */ + M_20, /* 4 bit optional mask starting at 20 */ RO_28, /* optional GPR starting at position 28 */ }; @@ -112,6 +106,8 @@ enum { enum { INSTR_INVALID, INSTR_E, + INSTR_IE_UU, + INSTR_MII_UPI, INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, @@ -121,13 +117,15 @@ enum { INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, INSTR_RRE_RR_OPT, INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, - INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, - INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, - INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, + INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR, + INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR, + INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF, + INSTR_RRF_UUFR, INSTR_RRF_UURF, + INSTR_RRR_F0FF, INSTR_RRS_RRRDU, INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, - INSTR_RSL_R0RD, + INSTR_RSL_LRDFU, INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, INSTR_RSY_RDRM, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, @@ -139,6 +137,7 @@ enum { INSTR_SIL_RDI, INSTR_SIL_RDU, INSTR_SIY_IRD, INSTR_SIY_URD, INSTR_SI_URD, + INSTR_SMI_U0RDP, INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, @@ -146,19 +145,7 @@ enum { INSTR_S_00, INSTR_S_RD, }; -struct operand { - int bits; /* The number of bits in the operand. */ - int shift; /* The number of bits to shift. */ - int flags; /* One bit syntax flags. */ -}; - -struct insn { - const char name[5]; - unsigned char opfrag; - unsigned char format; -}; - -static const struct operand operands[] = +static const struct s390_operand operands[] = { [UNUSED] = { 0, 0, 0 }, [R_8] = { 4, 8, OPERAND_GPR }, @@ -194,31 +181,42 @@ static const struct operand operands[] = [U4_12] = { 4, 12, 0 }, [U4_16] = { 4, 16, 0 }, [U4_20] = { 4, 20, 0 }, + [U4_24] = { 4, 24, 0 }, + [U4_28] = { 4, 28, 0 }, [U4_32] = { 4, 32, 0 }, + [U4_36] = { 4, 36, 0 }, [U8_8] = { 8, 8, 0 }, [U8_16] = { 8, 16, 0 }, [U8_24] = { 8, 24, 0 }, [U8_32] = { 8, 32, 0 }, + [J12_12] = { 12, 12, OPERAND_PCREL }, [I16_16] = { 16, 16, OPERAND_SIGNED }, [U16_16] = { 16, 16, 0 }, [U16_32] = { 16, 32, 0 }, [J16_16] = { 16, 16, OPERAND_PCREL }, + [J16_32] = { 16, 32, OPERAND_PCREL }, [I16_32] = { 16, 32, OPERAND_SIGNED }, + [I24_24] = { 24, 24, OPERAND_SIGNED }, [J32_16] = { 32, 16, OPERAND_PCREL }, [I32_16] = { 32, 16, OPERAND_SIGNED }, [U32_16] = { 32, 16, 0 }, [M_16] = { 4, 16, 0 }, + [M_20] = { 4, 20, 0 }, [RO_28] = { 4, 28, OPERAND_GPR } }; static const unsigned char formats[][7] = { [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, + [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 }, + [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 }, + [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 }, [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, + [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, - [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, + [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 }, [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, @@ -248,14 +246,18 @@ static const unsigned char formats[][7] = { [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, + [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 }, [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, + [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 }, [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, + [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 }, + [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 }, [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, @@ -267,12 +269,13 @@ static const unsigned char formats[][7] = { [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, + [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 }, [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, + [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, - [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, @@ -292,9 +295,10 @@ static const unsigned char formats[][7] = { [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, + [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 }, [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, - [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, - [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 }, + [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 }, + [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 }, [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, @@ -307,36 +311,157 @@ static const unsigned char formats[][7] = { enum { LONG_INSN_ALGHSIK, + LONG_INSN_ALHHHR, + LONG_INSN_ALHHLR, LONG_INSN_ALHSIK, + LONG_INSN_ALSIHN, + LONG_INSN_CDFBRA, + LONG_INSN_CDGBRA, + LONG_INSN_CDGTRA, + LONG_INSN_CDLFBR, + LONG_INSN_CDLFTR, + LONG_INSN_CDLGBR, + LONG_INSN_CDLGTR, + LONG_INSN_CEFBRA, + LONG_INSN_CEGBRA, + LONG_INSN_CELFBR, + LONG_INSN_CELGBR, + LONG_INSN_CFDBRA, + LONG_INSN_CFEBRA, + LONG_INSN_CFXBRA, + LONG_INSN_CGDBRA, + LONG_INSN_CGDTRA, + LONG_INSN_CGEBRA, + LONG_INSN_CGXBRA, + LONG_INSN_CGXTRA, + LONG_INSN_CLFDBR, + LONG_INSN_CLFDTR, + LONG_INSN_CLFEBR, LONG_INSN_CLFHSI, + LONG_INSN_CLFXBR, + LONG_INSN_CLFXTR, + LONG_INSN_CLGDBR, + LONG_INSN_CLGDTR, + LONG_INSN_CLGEBR, LONG_INSN_CLGFRL, LONG_INSN_CLGHRL, LONG_INSN_CLGHSI, + LONG_INSN_CLGXBR, + LONG_INSN_CLGXTR, LONG_INSN_CLHHSI, + LONG_INSN_CXFBRA, + LONG_INSN_CXGBRA, + LONG_INSN_CXGTRA, + LONG_INSN_CXLFBR, + LONG_INSN_CXLFTR, + LONG_INSN_CXLGBR, + LONG_INSN_CXLGTR, + LONG_INSN_FIDBRA, + LONG_INSN_FIEBRA, + LONG_INSN_FIXBRA, + LONG_INSN_LDXBRA, + LONG_INSN_LEDBRA, + LONG_INSN_LEXBRA, + LONG_INSN_LLGFAT, LONG_INSN_LLGFRL, LONG_INSN_LLGHRL, + LONG_INSN_LLGTAT, LONG_INSN_POPCNT, + LONG_INSN_RIEMIT, + LONG_INSN_RINEXT, + LONG_INSN_RISBGN, LONG_INSN_RISBHG, LONG_INSN_RISBLG, + LONG_INSN_SLHHHR, + LONG_INSN_SLHHLR, + LONG_INSN_TABORT, + LONG_INSN_TBEGIN, + LONG_INSN_TBEGINC, + LONG_INSN_PCISTG, + LONG_INSN_MPCIFC, + LONG_INSN_STPCIFC, + LONG_INSN_PCISTB, }; static char *long_insn_name[] = { [LONG_INSN_ALGHSIK] = "alghsik", + [LONG_INSN_ALHHHR] = "alhhhr", + [LONG_INSN_ALHHLR] = "alhhlr", [LONG_INSN_ALHSIK] = "alhsik", + [LONG_INSN_ALSIHN] = "alsihn", + [LONG_INSN_CDFBRA] = "cdfbra", + [LONG_INSN_CDGBRA] = "cdgbra", + [LONG_INSN_CDGTRA] = "cdgtra", + [LONG_INSN_CDLFBR] = "cdlfbr", + [LONG_INSN_CDLFTR] = "cdlftr", + [LONG_INSN_CDLGBR] = "cdlgbr", + [LONG_INSN_CDLGTR] = "cdlgtr", + [LONG_INSN_CEFBRA] = "cefbra", + [LONG_INSN_CEGBRA] = "cegbra", + [LONG_INSN_CELFBR] = "celfbr", + [LONG_INSN_CELGBR] = "celgbr", + [LONG_INSN_CFDBRA] = "cfdbra", + [LONG_INSN_CFEBRA] = "cfebra", + [LONG_INSN_CFXBRA] = "cfxbra", + [LONG_INSN_CGDBRA] = "cgdbra", + [LONG_INSN_CGDTRA] = "cgdtra", + [LONG_INSN_CGEBRA] = "cgebra", + [LONG_INSN_CGXBRA] = "cgxbra", + [LONG_INSN_CGXTRA] = "cgxtra", + [LONG_INSN_CLFDBR] = "clfdbr", + [LONG_INSN_CLFDTR] = "clfdtr", + [LONG_INSN_CLFEBR] = "clfebr", [LONG_INSN_CLFHSI] = "clfhsi", + [LONG_INSN_CLFXBR] = "clfxbr", + [LONG_INSN_CLFXTR] = "clfxtr", + [LONG_INSN_CLGDBR] = "clgdbr", + [LONG_INSN_CLGDTR] = "clgdtr", + [LONG_INSN_CLGEBR] = "clgebr", [LONG_INSN_CLGFRL] = "clgfrl", [LONG_INSN_CLGHRL] = "clghrl", [LONG_INSN_CLGHSI] = "clghsi", + [LONG_INSN_CLGXBR] = "clgxbr", + [LONG_INSN_CLGXTR] = "clgxtr", [LONG_INSN_CLHHSI] = "clhhsi", + [LONG_INSN_CXFBRA] = "cxfbra", + [LONG_INSN_CXGBRA] = "cxgbra", + [LONG_INSN_CXGTRA] = "cxgtra", + [LONG_INSN_CXLFBR] = "cxlfbr", + [LONG_INSN_CXLFTR] = "cxlftr", + [LONG_INSN_CXLGBR] = "cxlgbr", + [LONG_INSN_CXLGTR] = "cxlgtr", + [LONG_INSN_FIDBRA] = "fidbra", + [LONG_INSN_FIEBRA] = "fiebra", + [LONG_INSN_FIXBRA] = "fixbra", + [LONG_INSN_LDXBRA] = "ldxbra", + [LONG_INSN_LEDBRA] = "ledbra", + [LONG_INSN_LEXBRA] = "lexbra", + [LONG_INSN_LLGFAT] = "llgfat", [LONG_INSN_LLGFRL] = "llgfrl", [LONG_INSN_LLGHRL] = "llghrl", + [LONG_INSN_LLGTAT] = "llgtat", [LONG_INSN_POPCNT] = "popcnt", + [LONG_INSN_RIEMIT] = "riemit", + [LONG_INSN_RINEXT] = "rinext", + [LONG_INSN_RISBGN] = "risbgn", [LONG_INSN_RISBHG] = "risbhg", - [LONG_INSN_RISBLG] = "risblk", + [LONG_INSN_RISBLG] = "risblg", + [LONG_INSN_SLHHHR] = "slhhhr", + [LONG_INSN_SLHHLR] = "slhhlr", + [LONG_INSN_TABORT] = "tabort", + [LONG_INSN_TBEGIN] = "tbegin", + [LONG_INSN_TBEGINC] = "tbeginc", + [LONG_INSN_PCISTG] = "pcistg", + [LONG_INSN_MPCIFC] = "mpcifc", + [LONG_INSN_STPCIFC] = "stpcifc", + [LONG_INSN_PCISTB] = "pcistb", }; -static struct insn opcode[] = { +static struct s390_insn opcode[] = { #ifdef CONFIG_64BIT + { "bprp", 0xc5, INSTR_MII_UPI }, + { "bpp", 0xc7, INSTR_SMI_U0RDP }, + { "trtr", 0xd0, INSTR_SS_L0RDRD }, { "lmd", 0xef, INSTR_SS_RRRDRD3 }, #endif { "spm", 0x04, INSTR_RR_R0 }, @@ -371,7 +496,6 @@ static struct insn opcode[] = { { "lcdr", 0x23, INSTR_RR_FF }, { "hdr", 0x24, INSTR_RR_FF }, { "ldxr", 0x25, INSTR_RR_FF }, - { "lrdr", 0x25, INSTR_RR_FF }, { "mxr", 0x26, INSTR_RR_FF }, { "mxdr", 0x27, INSTR_RR_FF }, { "ldr", 0x28, INSTR_RR_FF }, @@ -388,7 +512,6 @@ static struct insn opcode[] = { { "lcer", 0x33, INSTR_RR_FF }, { "her", 0x34, INSTR_RR_FF }, { "ledr", 0x35, INSTR_RR_FF }, - { "lrer", 0x35, INSTR_RR_FF }, { "axr", 0x36, INSTR_RR_FF }, { "sxr", 0x37, INSTR_RR_FF }, { "ler", 0x38, INSTR_RR_FF }, @@ -396,7 +519,6 @@ static struct insn opcode[] = { { "aer", 0x3a, INSTR_RR_FF }, { "ser", 0x3b, INSTR_RR_FF }, { "mder", 0x3c, INSTR_RR_FF }, - { "mer", 0x3c, INSTR_RR_FF }, { "der", 0x3d, INSTR_RR_FF }, { "aur", 0x3e, INSTR_RR_FF }, { "sur", 0x3f, INSTR_RR_FF }, @@ -447,7 +569,6 @@ static struct insn opcode[] = { { "ae", 0x7a, INSTR_RX_FRRD }, { "se", 0x7b, INSTR_RX_FRRD }, { "mde", 0x7c, INSTR_RX_FRRD }, - { "me", 0x7c, INSTR_RX_FRRD }, { "de", 0x7d, INSTR_RX_FRRD }, { "au", 0x7e, INSTR_RX_FRRD }, { "su", 0x7f, INSTR_RX_FRRD }, @@ -525,11 +646,11 @@ static struct insn opcode[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_01[] = { +static struct s390_insn opcode_01[] = { #ifdef CONFIG_64BIT - { "sam64", 0x0e, INSTR_E }, - { "pfpo", 0x0a, INSTR_E }, { "ptff", 0x04, INSTR_E }, + { "pfpo", 0x0a, INSTR_E }, + { "sam64", 0x0e, INSTR_E }, #endif { "pr", 0x01, INSTR_E }, { "upt", 0x02, INSTR_E }, @@ -541,7 +662,7 @@ static struct insn opcode_01[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_a5[] = { +static struct s390_insn opcode_a5[] = { #ifdef CONFIG_64BIT { "iihh", 0x00, INSTR_RI_RU }, { "iihl", 0x01, INSTR_RI_RU }, @@ -563,7 +684,7 @@ static struct insn opcode_a5[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_a7[] = { +static struct s390_insn opcode_a7[] = { #ifdef CONFIG_64BIT { "tmhh", 0x02, INSTR_RI_RU }, { "tmhl", 0x03, INSTR_RI_RU }, @@ -585,18 +706,41 @@ static struct insn opcode_a7[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_b2[] = { +static struct s390_insn opcode_aa[] = { +#ifdef CONFIG_64BIT + { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, + { "rion", 0x01, INSTR_RI_RI }, + { "tric", 0x02, INSTR_RI_RI }, + { "rioff", 0x03, INSTR_RI_RI }, + { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct s390_insn opcode_b2[] = { #ifdef CONFIG_64BIT - { "sske", 0x2b, INSTR_RRF_M0RR }, { "stckf", 0x7c, INSTR_S_RD }, - { "cu21", 0xa6, INSTR_RRF_M0RR }, - { "cuutf", 0xa6, INSTR_RRF_M0RR }, - { "cu12", 0xa7, INSTR_RRF_M0RR }, - { "cutfu", 0xa7, INSTR_RRF_M0RR }, + { "lpp", 0x80, INSTR_S_RD }, + { "lcctl", 0x84, INSTR_S_RD }, + { "lpctl", 0x85, INSTR_S_RD }, + { "qsi", 0x86, INSTR_S_RD }, + { "lsctl", 0x87, INSTR_S_RD }, + { "qctri", 0x8e, INSTR_S_RD }, { "stfle", 0xb0, INSTR_S_RD }, { "lpswe", 0xb2, INSTR_S_RD }, + { "srnmb", 0xb8, INSTR_S_RD }, { "srnmt", 0xb9, INSTR_S_RD }, { "lfas", 0xbd, INSTR_S_RD }, + { "scctr", 0xe0, INSTR_RRE_RR }, + { "spctr", 0xe1, INSTR_RRE_RR }, + { "ecctr", 0xe4, INSTR_RRE_RR }, + { "epctr", 0xe5, INSTR_RRE_RR }, + { "ppa", 0xe8, INSTR_RRF_U0RR }, + { "etnd", 0xec, INSTR_RRE_R0 }, + { "ecpga", 0xed, INSTR_RRE_RR }, + { "tend", 0xf8, INSTR_S_00 }, + { "niai", 0xfa, INSTR_IE_UU }, + { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, #endif { "stidp", 0x02, INSTR_S_RD }, { "sck", 0x04, INSTR_S_RD }, @@ -615,6 +759,7 @@ static struct insn opcode_b2[] = { { "pc", 0x18, INSTR_S_RD }, { "sac", 0x19, INSTR_S_RD }, { "cfc", 0x1a, INSTR_S_RD }, + { "servc", 0x20, INSTR_RRE_RR }, { "ipte", 0x21, INSTR_RRE_RR }, { "ipm", 0x22, INSTR_RRE_R0 }, { "ivsk", 0x23, INSTR_RRE_RR }, @@ -625,9 +770,9 @@ static struct insn opcode_b2[] = { { "pt", 0x28, INSTR_RRE_RR }, { "iske", 0x29, INSTR_RRE_RR }, { "rrbe", 0x2a, INSTR_RRE_RR }, - { "sske", 0x2b, INSTR_RRE_RR }, + { "sske", 0x2b, INSTR_RRF_M0RR }, { "tb", 0x2c, INSTR_RRE_0R }, - { "dxr", 0x2d, INSTR_RRE_F0 }, + { "dxr", 0x2d, INSTR_RRE_FF }, { "pgin", 0x2e, INSTR_RRE_RR }, { "pgout", 0x2f, INSTR_RRE_RR }, { "csch", 0x30, INSTR_S_00 }, @@ -645,8 +790,8 @@ static struct insn opcode_b2[] = { { "schm", 0x3c, INSTR_S_00 }, { "bakr", 0x40, INSTR_RRE_RR }, { "cksm", 0x41, INSTR_RRE_RR }, - { "sqdr", 0x44, INSTR_RRE_F0 }, - { "sqer", 0x45, INSTR_RRE_F0 }, + { "sqdr", 0x44, INSTR_RRE_FF }, + { "sqer", 0x45, INSTR_RRE_FF }, { "stura", 0x46, INSTR_RRE_RR }, { "msta", 0x47, INSTR_RRE_R0 }, { "palb", 0x48, INSTR_RRE_00 }, @@ -672,20 +817,19 @@ static struct insn opcode_b2[] = { { "rp", 0x77, INSTR_S_RD }, { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, - { "spp", 0x80, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, { "lfpc", 0x9d, INSTR_S_RD }, { "tre", 0xa5, INSTR_RRE_RR }, - { "cuutf", 0xa6, INSTR_RRE_RR }, - { "cutfu", 0xa7, INSTR_RRE_RR }, + { "cuutf", 0xa6, INSTR_RRF_M0RR }, + { "cutfu", 0xa7, INSTR_RRF_M0RR }, { "stfl", 0xb1, INSTR_S_RD }, { "trap4", 0xff, INSTR_S_RD }, { "", 0, INSTR_INVALID } }; -static struct insn opcode_b3[] = { +static struct s390_insn opcode_b3[] = { #ifdef CONFIG_64BIT { "maylr", 0x38, INSTR_RRF_F0FF }, { "mylr", 0x39, INSTR_RRF_F0FF }, @@ -693,72 +837,87 @@ static struct insn opcode_b3[] = { { "myr", 0x3b, INSTR_RRF_F0FF }, { "mayhr", 0x3c, INSTR_RRF_F0FF }, { "myhr", 0x3d, INSTR_RRF_F0FF }, - { "cegbr", 0xa4, INSTR_RRE_RR }, - { "cdgbr", 0xa5, INSTR_RRE_RR }, - { "cxgbr", 0xa6, INSTR_RRE_RR }, - { "cgebr", 0xa8, INSTR_RRF_U0RF }, - { "cgdbr", 0xa9, INSTR_RRF_U0RF }, - { "cgxbr", 0xaa, INSTR_RRF_U0RF }, - { "cfer", 0xb8, INSTR_RRF_U0RF }, - { "cfdr", 0xb9, INSTR_RRF_U0RF }, - { "cfxr", 0xba, INSTR_RRF_U0RF }, - { "cegr", 0xc4, INSTR_RRE_RR }, - { "cdgr", 0xc5, INSTR_RRE_RR }, - { "cxgr", 0xc6, INSTR_RRE_RR }, - { "cger", 0xc8, INSTR_RRF_U0RF }, - { "cgdr", 0xc9, INSTR_RRF_U0RF }, - { "cgxr", 0xca, INSTR_RRF_U0RF }, { "lpdfr", 0x70, INSTR_RRE_FF }, { "lndfr", 0x71, INSTR_RRE_FF }, { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, { "lcdfr", 0x73, INSTR_RRE_FF }, + { "sfasr", 0x85, INSTR_RRE_R0 }, + { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR }, { "ldgr", 0xc1, INSTR_RRE_FR }, + { "cegr", 0xc4, INSTR_RRE_FR }, + { "cdgr", 0xc5, INSTR_RRE_FR }, + { "cxgr", 0xc6, INSTR_RRE_FR }, + { "cger", 0xc8, INSTR_RRF_U0RF }, + { "cgdr", 0xc9, INSTR_RRF_U0RF }, + { "cgxr", 0xca, INSTR_RRF_U0RF }, { "lgdr", 0xcd, INSTR_RRE_RF }, - { "adtr", 0xd2, INSTR_RRR_F0FF }, - { "axtr", 0xda, INSTR_RRR_F0FF }, - { "cdtr", 0xe4, INSTR_RRE_FF }, - { "cxtr", 0xec, INSTR_RRE_FF }, + { "mdtra", 0xd0, INSTR_RRF_FUFF2 }, + { "ddtra", 0xd1, INSTR_RRF_FUFF2 }, + { "adtra", 0xd2, INSTR_RRF_FUFF2 }, + { "sdtra", 0xd3, INSTR_RRF_FUFF2 }, + { "ldetr", 0xd4, INSTR_RRF_0UFF }, + { "ledtr", 0xd5, INSTR_RRF_UUFF }, + { "ltdtr", 0xd6, INSTR_RRE_FF }, + { "fidtr", 0xd7, INSTR_RRF_UUFF }, + { "mxtra", 0xd8, INSTR_RRF_FUFF2 }, + { "dxtra", 0xd9, INSTR_RRF_FUFF2 }, + { "axtra", 0xda, INSTR_RRF_FUFF2 }, + { "sxtra", 0xdb, INSTR_RRF_FUFF2 }, + { "lxdtr", 0xdc, INSTR_RRF_0UFF }, + { "ldxtr", 0xdd, INSTR_RRF_UUFF }, + { "ltxtr", 0xde, INSTR_RRE_FF }, + { "fixtr", 0xdf, INSTR_RRF_UUFF }, { "kdtr", 0xe0, INSTR_RRE_FF }, - { "kxtr", 0xe8, INSTR_RRE_FF }, - { "cedtr", 0xf4, INSTR_RRE_FF }, - { "cextr", 0xfc, INSTR_RRE_FF }, - { "cdgtr", 0xf1, INSTR_RRE_FR }, - { "cxgtr", 0xf9, INSTR_RRE_FR }, - { "cdstr", 0xf3, INSTR_RRE_FR }, - { "cxstr", 0xfb, INSTR_RRE_FR }, - { "cdutr", 0xf2, INSTR_RRE_FR }, - { "cxutr", 0xfa, INSTR_RRE_FR }, - { "cgdtr", 0xe1, INSTR_RRF_U0RF }, - { "cgxtr", 0xe9, INSTR_RRF_U0RF }, - { "csdtr", 0xe3, INSTR_RRE_RF }, - { "csxtr", 0xeb, INSTR_RRE_RF }, + { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF }, { "cudtr", 0xe2, INSTR_RRE_RF }, - { "cuxtr", 0xea, INSTR_RRE_RF }, - { "ddtr", 0xd1, INSTR_RRR_F0FF }, - { "dxtr", 0xd9, INSTR_RRR_F0FF }, + { "csdtr", 0xe3, INSTR_RRE_RF }, + { "cdtr", 0xe4, INSTR_RRE_FF }, { "eedtr", 0xe5, INSTR_RRE_RF }, - { "eextr", 0xed, INSTR_RRE_RF }, { "esdtr", 0xe7, INSTR_RRE_RF }, + { "kxtr", 0xe8, INSTR_RRE_FF }, + { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR }, + { "cuxtr", 0xea, INSTR_RRE_RF }, + { "csxtr", 0xeb, INSTR_RRE_RF }, + { "cxtr", 0xec, INSTR_RRE_FF }, + { "eextr", 0xed, INSTR_RRE_RF }, { "esxtr", 0xef, INSTR_RRE_RF }, - { "iedtr", 0xf6, INSTR_RRF_F0FR }, - { "iextr", 0xfe, INSTR_RRF_F0FR }, - { "ltdtr", 0xd6, INSTR_RRE_FF }, - { "ltxtr", 0xde, INSTR_RRE_FF }, - { "fidtr", 0xd7, INSTR_RRF_UUFF }, - { "fixtr", 0xdf, INSTR_RRF_UUFF }, - { "ldetr", 0xd4, INSTR_RRF_0UFF }, - { "lxdtr", 0xdc, INSTR_RRF_0UFF }, - { "ledtr", 0xd5, INSTR_RRF_UUFF }, - { "ldxtr", 0xdd, INSTR_RRF_UUFF }, - { "mdtr", 0xd0, INSTR_RRR_F0FF }, - { "mxtr", 0xd8, INSTR_RRR_F0FF }, + { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR }, + { "cdutr", 0xf2, INSTR_RRE_FR }, + { "cdstr", 0xf3, INSTR_RRE_FR }, + { "cedtr", 0xf4, INSTR_RRE_FF }, { "qadtr", 0xf5, INSTR_RRF_FUFF }, - { "qaxtr", 0xfd, INSTR_RRF_FUFF }, + { "iedtr", 0xf6, INSTR_RRF_F0FR }, { "rrdtr", 0xf7, INSTR_RRF_FFRU }, + { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF }, + { "cxutr", 0xfa, INSTR_RRE_FR }, + { "cxstr", 0xfb, INSTR_RRE_FR }, + { "cextr", 0xfc, INSTR_RRE_FF }, + { "qaxtr", 0xfd, INSTR_RRF_FUFF }, + { "iextr", 0xfe, INSTR_RRF_F0FR }, { "rrxtr", 0xff, INSTR_RRF_FFRU }, - { "sfasr", 0x85, INSTR_RRE_R0 }, - { "sdtr", 0xd3, INSTR_RRR_F0FF }, - { "sxtr", 0xdb, INSTR_RRR_F0FF }, #endif { "lpebr", 0x00, INSTR_RRE_FF }, { "lnebr", 0x01, INSTR_RRE_FF }, @@ -805,10 +964,10 @@ static struct insn opcode_b3[] = { { "lnxbr", 0x41, INSTR_RRE_FF }, { "ltxbr", 0x42, INSTR_RRE_FF }, { "lcxbr", 0x43, INSTR_RRE_FF }, - { "ledbr", 0x44, INSTR_RRE_FF }, - { "ldxbr", 0x45, INSTR_RRE_FF }, - { "lexbr", 0x46, INSTR_RRE_FF }, - { "fixbr", 0x47, INSTR_RRF_U0FF }, + { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF }, + { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF }, + { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF }, + { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF }, { "kxbr", 0x48, INSTR_RRE_FF }, { "cxbr", 0x49, INSTR_RRE_FF }, { "axbr", 0x4a, INSTR_RRE_FF }, @@ -818,24 +977,24 @@ static struct insn opcode_b3[] = { { "tbedr", 0x50, INSTR_RRF_U0FF }, { "tbdr", 0x51, INSTR_RRF_U0FF }, { "diebr", 0x53, INSTR_RRF_FUFF }, - { "fiebr", 0x57, INSTR_RRF_U0FF }, - { "thder", 0x58, INSTR_RRE_RR }, - { "thdr", 0x59, INSTR_RRE_RR }, + { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF }, + { "thder", 0x58, INSTR_RRE_FF }, + { "thdr", 0x59, INSTR_RRE_FF }, { "didbr", 0x5b, INSTR_RRF_FUFF }, - { "fidbr", 0x5f, INSTR_RRF_U0FF }, + { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF }, { "lpxr", 0x60, INSTR_RRE_FF }, { "lnxr", 0x61, INSTR_RRE_FF }, { "ltxr", 0x62, INSTR_RRE_FF }, { "lcxr", 0x63, INSTR_RRE_FF }, - { "lxr", 0x65, INSTR_RRE_RR }, + { "lxr", 0x65, INSTR_RRE_FF }, { "lexr", 0x66, INSTR_RRE_FF }, - { "fixr", 0x67, INSTR_RRF_U0FF }, + { "fixr", 0x67, INSTR_RRE_FF }, { "cxr", 0x69, INSTR_RRE_FF }, - { "lzer", 0x74, INSTR_RRE_R0 }, - { "lzdr", 0x75, INSTR_RRE_R0 }, - { "lzxr", 0x76, INSTR_RRE_R0 }, - { "fier", 0x77, INSTR_RRF_U0FF }, - { "fidr", 0x7f, INSTR_RRF_U0FF }, + { "lzer", 0x74, INSTR_RRE_F0 }, + { "lzdr", 0x75, INSTR_RRE_F0 }, + { "lzxr", 0x76, INSTR_RRE_F0 }, + { "fier", 0x77, INSTR_RRE_FF }, + { "fidr", 0x7f, INSTR_RRE_FF }, { "sfpc", 0x84, INSTR_RRE_RR_OPT }, { "efpc", 0x8c, INSTR_RRE_RR_OPT }, { "cefbr", 0x94, INSTR_RRE_RF }, @@ -844,13 +1003,16 @@ static struct insn opcode_b3[] = { { "cfebr", 0x98, INSTR_RRF_U0RF }, { "cfdbr", 0x99, INSTR_RRF_U0RF }, { "cfxbr", 0x9a, INSTR_RRF_U0RF }, - { "cefr", 0xb4, INSTR_RRE_RF }, - { "cdfr", 0xb5, INSTR_RRE_RF }, - { "cxfr", 0xb6, INSTR_RRE_RF }, + { "cefr", 0xb4, INSTR_RRE_FR }, + { "cdfr", 0xb5, INSTR_RRE_FR }, + { "cxfr", 0xb6, INSTR_RRE_FR }, + { "cfer", 0xb8, INSTR_RRF_U0RF }, + { "cfdr", 0xb9, INSTR_RRF_U0RF }, + { "cfxr", 0xba, INSTR_RRF_U0RF }, { "", 0, INSTR_INVALID } }; -static struct insn opcode_b9[] = { +static struct s390_insn opcode_b9[] = { #ifdef CONFIG_64BIT { "lpgr", 0x00, INSTR_RRE_RR }, { "lngr", 0x01, INSTR_RRE_RR }, @@ -888,7 +1050,23 @@ static struct insn opcode_b9[] = { { "lhr", 0x27, INSTR_RRE_RR }, { "cgfr", 0x30, INSTR_RRE_RR }, { "clgfr", 0x31, INSTR_RRE_RR }, + { "cfdtr", 0x41, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF }, { "bctgr", 0x46, INSTR_RRE_RR }, + { "cfxtr", 0x49, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR }, + { "cdftr", 0x51, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR }, + { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR }, + { "cxftr", 0x59, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF }, + { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR }, + { "cgrt", 0x60, INSTR_RRF_U0RR }, + { "clgrt", 0x61, INSTR_RRF_U0RR }, + { "crt", 0x72, INSTR_RRF_U0RR }, + { "clrt", 0x73, INSTR_RRF_U0RR }, { "ngr", 0x80, INSTR_RRE_RR }, { "ogr", 0x81, INSTR_RRE_RR }, { "xgr", 0x82, INSTR_RRE_RR }, @@ -901,32 +1079,34 @@ static struct insn opcode_b9[] = { { "slbgr", 0x89, INSTR_RRE_RR }, { "cspg", 0x8a, INSTR_RRE_RR }, { "idte", 0x8e, INSTR_RRF_R0RR }, + { "crdte", 0x8f, INSTR_RRF_RMRR }, { "llcr", 0x94, INSTR_RRE_RR }, { "llhr", 0x95, INSTR_RRE_RR }, { "esea", 0x9d, INSTR_RRE_R0 }, + { "ptf", 0xa2, INSTR_RRE_R0 }, { "lptea", 0xaa, INSTR_RRF_RURR }, + { "rrbm", 0xae, INSTR_RRE_RR }, + { "pfmf", 0xaf, INSTR_RRE_RR }, { "cu14", 0xb0, INSTR_RRF_M0RR }, { "cu24", 0xb1, INSTR_RRF_M0RR }, - { "cu41", 0xb2, INSTR_RRF_M0RR }, - { "cu42", 0xb3, INSTR_RRF_M0RR }, - { "crt", 0x72, INSTR_RRF_U0RR }, - { "cgrt", 0x60, INSTR_RRF_U0RR }, - { "clrt", 0x73, INSTR_RRF_U0RR }, - { "clgrt", 0x61, INSTR_RRF_U0RR }, - { "ptf", 0xa2, INSTR_RRE_R0 }, - { "pfmf", 0xaf, INSTR_RRE_RR }, - { "trte", 0xbf, INSTR_RRF_M0RR }, + { "cu41", 0xb2, INSTR_RRE_RR }, + { "cu42", 0xb3, INSTR_RRE_RR }, { "trtre", 0xbd, INSTR_RRF_M0RR }, + { "srstu", 0xbe, INSTR_RRE_RR }, + { "trte", 0xbf, INSTR_RRF_M0RR }, { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, - { "alhhh", 0xca, INSTR_RRF_R0RR2 }, - { "alhhl", 0xca, INSTR_RRF_R0RR2 }, - { "slhhh", 0xcb, INSTR_RRF_R0RR2 }, - { "chhr ", 0xcd, INSTR_RRE_RR }, + { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 }, + { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 }, + { "chhr", 0xcd, INSTR_RRE_RR }, { "clhhr", 0xcf, INSTR_RRE_RR }, + { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR }, + { "pcilg", 0xd2, INSTR_RRE_RR }, + { "rpcit", 0xd3, INSTR_RRE_RR }, { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, - { "slhhl", 0xdb, INSTR_RRF_R0RR2 }, + { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 }, + { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 }, { "chlr", 0xdd, INSTR_RRE_RR }, { "clhlr", 0xdf, INSTR_RRE_RR }, { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, @@ -954,13 +1134,9 @@ static struct insn opcode_b9[] = { { "kimd", 0x3e, INSTR_RRE_RR }, { "klmd", 0x3f, INSTR_RRE_RR }, { "epsw", 0x8d, INSTR_RRE_RR }, - { "trtt", 0x90, INSTR_RRE_RR }, { "trtt", 0x90, INSTR_RRF_M0RR }, - { "trto", 0x91, INSTR_RRE_RR }, { "trto", 0x91, INSTR_RRF_M0RR }, - { "trot", 0x92, INSTR_RRE_RR }, { "trot", 0x92, INSTR_RRF_M0RR }, - { "troo", 0x93, INSTR_RRE_RR }, { "troo", 0x93, INSTR_RRF_M0RR }, { "mlr", 0x96, INSTR_RRE_RR }, { "dlr", 0x97, INSTR_RRE_RR }, @@ -969,7 +1145,7 @@ static struct insn opcode_b9[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_c0[] = { +static struct s390_insn opcode_c0[] = { #ifdef CONFIG_64BIT { "lgfi", 0x01, INSTR_RIL_RI }, { "xihf", 0x06, INSTR_RIL_RU }, @@ -989,8 +1165,10 @@ static struct insn opcode_c0[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_c2[] = { +static struct s390_insn opcode_c2[] = { #ifdef CONFIG_64BIT + { "msgfi", 0x00, INSTR_RIL_RI }, + { "msfi", 0x01, INSTR_RIL_RI }, { "slgfi", 0x04, INSTR_RIL_RU }, { "slfi", 0x05, INSTR_RIL_RU }, { "agfi", 0x08, INSTR_RIL_RI }, @@ -1001,71 +1179,69 @@ static struct insn opcode_c2[] = { { "cfi", 0x0d, INSTR_RIL_RI }, { "clgfi", 0x0e, INSTR_RIL_RU }, { "clfi", 0x0f, INSTR_RIL_RU }, - { "msfi", 0x01, INSTR_RIL_RI }, - { "msgfi", 0x00, INSTR_RIL_RI }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_c4[] = { +static struct s390_insn opcode_c4[] = { #ifdef CONFIG_64BIT - { "lrl", 0x0d, INSTR_RIL_RP }, + { "llhrl", 0x02, INSTR_RIL_RP }, + { "lghrl", 0x04, INSTR_RIL_RP }, + { "lhrl", 0x05, INSTR_RIL_RP }, + { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP }, + { "sthrl", 0x07, INSTR_RIL_RP }, { "lgrl", 0x08, INSTR_RIL_RP }, + { "stgrl", 0x0b, INSTR_RIL_RP }, { "lgfrl", 0x0c, INSTR_RIL_RP }, - { "lhrl", 0x05, INSTR_RIL_RP }, - { "lghrl", 0x04, INSTR_RIL_RP }, + { "lrl", 0x0d, INSTR_RIL_RP }, { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, - { "llhrl", 0x02, INSTR_RIL_RP }, - { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP }, { "strl", 0x0f, INSTR_RIL_RP }, - { "stgrl", 0x0b, INSTR_RIL_RP }, - { "sthrl", 0x07, INSTR_RIL_RP }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_c6[] = { +static struct s390_insn opcode_c6[] = { #ifdef CONFIG_64BIT - { "crl", 0x0d, INSTR_RIL_RP }, - { "cgrl", 0x08, INSTR_RIL_RP }, - { "cgfrl", 0x0c, INSTR_RIL_RP }, - { "chrl", 0x05, INSTR_RIL_RP }, + { "exrl", 0x00, INSTR_RIL_RP }, + { "pfdrl", 0x02, INSTR_RIL_UP }, { "cghrl", 0x04, INSTR_RIL_RP }, - { "clrl", 0x0f, INSTR_RIL_RP }, + { "chrl", 0x05, INSTR_RIL_RP }, + { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP }, + { "clhrl", 0x07, INSTR_RIL_RP }, + { "cgrl", 0x08, INSTR_RIL_RP }, { "clgrl", 0x0a, INSTR_RIL_RP }, + { "cgfrl", 0x0c, INSTR_RIL_RP }, + { "crl", 0x0d, INSTR_RIL_RP }, { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, - { "clhrl", 0x07, INSTR_RIL_RP }, - { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP }, - { "pfdrl", 0x02, INSTR_RIL_UP }, - { "exrl", 0x00, INSTR_RIL_RP }, + { "clrl", 0x0f, INSTR_RIL_RP }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_c8[] = { +static struct s390_insn opcode_c8[] = { #ifdef CONFIG_64BIT { "mvcos", 0x00, INSTR_SSF_RRDRD }, { "ectg", 0x01, INSTR_SSF_RRDRD }, { "csst", 0x02, INSTR_SSF_RRDRD }, { "lpd", 0x04, INSTR_SSF_RRDRD2 }, - { "lpdg ", 0x05, INSTR_SSF_RRDRD2 }, + { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_cc[] = { +static struct s390_insn opcode_cc[] = { #ifdef CONFIG_64BIT { "brcth", 0x06, INSTR_RIL_RP }, { "aih", 0x08, INSTR_RIL_RI }, { "alsih", 0x0a, INSTR_RIL_RI }, - { "alsih", 0x0b, INSTR_RIL_RI }, + { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, { "cih", 0x0d, INSTR_RIL_RI }, - { "clih ", 0x0f, INSTR_RIL_RI }, + { "clih", 0x0f, INSTR_RIL_RI }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_e3[] = { +static struct s390_insn opcode_e3[] = { #ifdef CONFIG_64BIT { "ltg", 0x02, INSTR_RXY_RRRD }, { "lrag", 0x03, INSTR_RXY_RRRD }, @@ -1094,11 +1270,15 @@ static struct insn opcode_e3[] = { { "cg", 0x20, INSTR_RXY_RRRD }, { "clg", 0x21, INSTR_RXY_RRRD }, { "stg", 0x24, INSTR_RXY_RRRD }, + { "ntstg", 0x25, INSTR_RXY_RRRD }, { "cvdy", 0x26, INSTR_RXY_RRRD }, { "cvdg", 0x2e, INSTR_RXY_RRRD }, { "strvg", 0x2f, INSTR_RXY_RRRD }, { "cgf", 0x30, INSTR_RXY_RRRD }, { "clgf", 0x31, INSTR_RXY_RRRD }, + { "ltgf", 0x32, INSTR_RXY_RRRD }, + { "cgh", 0x34, INSTR_RXY_RRRD }, + { "pfd", 0x36, INSTR_RXY_URRD }, { "strvh", 0x3f, INSTR_RXY_RRRD }, { "bctg", 0x46, INSTR_RXY_RRRD }, { "sty", 0x50, INSTR_RXY_RRRD }, @@ -1111,21 +1291,25 @@ static struct insn opcode_e3[] = { { "cy", 0x59, INSTR_RXY_RRRD }, { "ay", 0x5a, INSTR_RXY_RRRD }, { "sy", 0x5b, INSTR_RXY_RRRD }, + { "mfy", 0x5c, INSTR_RXY_RRRD }, { "aly", 0x5e, INSTR_RXY_RRRD }, { "sly", 0x5f, INSTR_RXY_RRRD }, { "sthy", 0x70, INSTR_RXY_RRRD }, { "lay", 0x71, INSTR_RXY_RRRD }, { "stcy", 0x72, INSTR_RXY_RRRD }, { "icy", 0x73, INSTR_RXY_RRRD }, + { "laey", 0x75, INSTR_RXY_RRRD }, { "lb", 0x76, INSTR_RXY_RRRD }, { "lgb", 0x77, INSTR_RXY_RRRD }, { "lhy", 0x78, INSTR_RXY_RRRD }, { "chy", 0x79, INSTR_RXY_RRRD }, { "ahy", 0x7a, INSTR_RXY_RRRD }, { "shy", 0x7b, INSTR_RXY_RRRD }, + { "mhy", 0x7c, INSTR_RXY_RRRD }, { "ng", 0x80, INSTR_RXY_RRRD }, { "og", 0x81, INSTR_RXY_RRRD }, { "xg", 0x82, INSTR_RXY_RRRD }, + { "lgat", 0x85, INSTR_RXY_RRRD }, { "mlg", 0x86, INSTR_RXY_RRRD }, { "dlg", 0x87, INSTR_RXY_RRRD }, { "alcg", 0x88, INSTR_RXY_RRRD }, @@ -1136,22 +1320,22 @@ static struct insn opcode_e3[] = { { "llgh", 0x91, INSTR_RXY_RRRD }, { "llc", 0x94, INSTR_RXY_RRRD }, { "llh", 0x95, INSTR_RXY_RRRD }, - { "cgh", 0x34, INSTR_RXY_RRRD }, - { "laey", 0x75, INSTR_RXY_RRRD }, - { "ltgf", 0x32, INSTR_RXY_RRRD }, - { "mfy", 0x5c, INSTR_RXY_RRRD }, - { "mhy", 0x7c, INSTR_RXY_RRRD }, - { "pfd", 0x36, INSTR_RXY_URRD }, + { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD }, + { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD }, + { "lat", 0x9f, INSTR_RXY_RRRD }, { "lbh", 0xc0, INSTR_RXY_RRRD }, { "llch", 0xc2, INSTR_RXY_RRRD }, { "stch", 0xc3, INSTR_RXY_RRRD }, { "lhh", 0xc4, INSTR_RXY_RRRD }, { "llhh", 0xc6, INSTR_RXY_RRRD }, { "sthh", 0xc7, INSTR_RXY_RRRD }, + { "lfhat", 0xc8, INSTR_RXY_RRRD }, { "lfh", 0xca, INSTR_RXY_RRRD }, { "stfh", 0xcb, INSTR_RXY_RRRD }, { "chf", 0xcd, INSTR_RXY_RRRD }, { "clhf", 0xcf, INSTR_RXY_RRRD }, + { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, + { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, #endif { "lrv", 0x1e, INSTR_RXY_RRRD }, { "lrvh", 0x1f, INSTR_RXY_RRRD }, @@ -1163,18 +1347,20 @@ static struct insn opcode_e3[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_e5[] = { +static struct s390_insn opcode_e5[] = { #ifdef CONFIG_64BIT { "strag", 0x02, INSTR_SSE_RDRD }, + { "mvhhi", 0x44, INSTR_SIL_RDI }, + { "mvghi", 0x48, INSTR_SIL_RDI }, + { "mvhi", 0x4c, INSTR_SIL_RDI }, { "chhsi", 0x54, INSTR_SIL_RDI }, - { "chsi", 0x5c, INSTR_SIL_RDI }, - { "cghsi", 0x58, INSTR_SIL_RDI }, { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU }, - { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, + { "cghsi", 0x58, INSTR_SIL_RDI }, { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU }, - { "mvhhi", 0x44, INSTR_SIL_RDI }, - { "mvhi", 0x4c, INSTR_SIL_RDI }, - { "mvghi", 0x48, INSTR_SIL_RDI }, + { "chsi", 0x5c, INSTR_SIL_RDI }, + { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, + { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, + { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, #endif { "lasp", 0x00, INSTR_SSE_RDRD }, { "tprot", 0x01, INSTR_SSE_RDRD }, @@ -1183,7 +1369,7 @@ static struct insn opcode_e5[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_eb[] = { +static struct s390_insn opcode_eb[] = { #ifdef CONFIG_64BIT { "lmg", 0x04, INSTR_RSY_RRRD }, { "srag", 0x0a, INSTR_RSY_RRRD }, @@ -1195,9 +1381,11 @@ static struct insn opcode_eb[] = { { "rllg", 0x1c, INSTR_RSY_RRRD }, { "clmh", 0x20, INSTR_RSY_RURD }, { "clmy", 0x21, INSTR_RSY_RURD }, + { "clt", 0x23, INSTR_RSY_RURD }, { "stmg", 0x24, INSTR_RSY_RRRD }, { "stctg", 0x25, INSTR_RSY_CCRD }, { "stmh", 0x26, INSTR_RSY_RRRD }, + { "clgt", 0x2b, INSTR_RSY_RURD }, { "stcmh", 0x2c, INSTR_RSY_RURD }, { "stcmy", 0x2d, INSTR_RSY_RURD }, { "lctlg", 0x2f, INSTR_RSY_CCRD }, @@ -1206,13 +1394,17 @@ static struct insn opcode_eb[] = { { "cdsg", 0x3e, INSTR_RSY_RRRD }, { "bxhg", 0x44, INSTR_RSY_RRRD }, { "bxleg", 0x45, INSTR_RSY_RRRD }, + { "ecag", 0x4c, INSTR_RSY_RRRD }, { "tmy", 0x51, INSTR_SIY_URD }, { "mviy", 0x52, INSTR_SIY_URD }, { "niy", 0x54, INSTR_SIY_URD }, { "cliy", 0x55, INSTR_SIY_URD }, { "oiy", 0x56, INSTR_SIY_URD }, { "xiy", 0x57, INSTR_SIY_URD }, - { "icmh", 0x80, INSTR_RSE_RURD }, + { "asi", 0x6a, INSTR_SIY_IRD }, + { "alsi", 0x6e, INSTR_SIY_IRD }, + { "agsi", 0x7a, INSTR_SIY_IRD }, + { "algsi", 0x7e, INSTR_SIY_IRD }, { "icmh", 0x80, INSTR_RSY_RURD }, { "icmy", 0x81, INSTR_RSY_RURD }, { "clclu", 0x8f, INSTR_RSY_RRRD }, @@ -1221,11 +1413,8 @@ static struct insn opcode_eb[] = { { "lmy", 0x98, INSTR_RSY_RRRD }, { "lamy", 0x9a, INSTR_RSY_AARD }, { "stamy", 0x9b, INSTR_RSY_AARD }, - { "asi", 0x6a, INSTR_SIY_IRD }, - { "agsi", 0x7a, INSTR_SIY_IRD }, - { "alsi", 0x6e, INSTR_SIY_IRD }, - { "algsi", 0x7e, INSTR_SIY_IRD }, - { "ecag", 0x4c, INSTR_RSY_RRRD }, + { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD }, + { "sic", 0xd1, INSTR_RSY_RRRD }, { "srak", 0xdc, INSTR_RSY_RRRD }, { "slak", 0xdd, INSTR_RSY_RRRD }, { "srlk", 0xde, INSTR_RSY_RRRD }, @@ -1244,6 +1433,9 @@ static struct insn opcode_eb[] = { { "lax", 0xf7, INSTR_RSY_RRRD }, { "laa", 0xf8, INSTR_RSY_RRRD }, { "laal", 0xfa, INSTR_RSY_RRRD }, + { "lric", 0x60, INSTR_RSY_RDRM }, + { "stric", 0x61, INSTR_RSY_RDRM }, + { "mric", 0x62, INSTR_RSY_RDRM }, #endif { "rll", 0x1d, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD }, @@ -1251,45 +1443,46 @@ static struct insn opcode_eb[] = { { "", 0, INSTR_INVALID } }; -static struct insn opcode_ec[] = { +static struct s390_insn opcode_ec[] = { #ifdef CONFIG_64BIT { "brxhg", 0x44, INSTR_RIE_RRP }, { "brxlg", 0x45, INSTR_RIE_RRP }, - { "crb", 0xf6, INSTR_RRS_RRRDU }, - { "cgrb", 0xe4, INSTR_RRS_RRRDU }, - { "crj", 0x76, INSTR_RIE_RRPU }, + { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, + { "rnsbg", 0x54, INSTR_RIE_RRUUU }, + { "risbg", 0x55, INSTR_RIE_RRUUU }, + { "rosbg", 0x56, INSTR_RIE_RRUUU }, + { "rxsbg", 0x57, INSTR_RIE_RRUUU }, + { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU }, + { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU }, { "cgrj", 0x64, INSTR_RIE_RRPU }, - { "cib", 0xfe, INSTR_RIS_RURDI }, - { "cgib", 0xfc, INSTR_RIS_RURDI }, - { "cij", 0x7e, INSTR_RIE_RUPI }, - { "cgij", 0x7c, INSTR_RIE_RUPI }, - { "cit", 0x72, INSTR_RIE_R0IU }, + { "clgrj", 0x65, INSTR_RIE_RRPU }, { "cgit", 0x70, INSTR_RIE_R0IU }, - { "clrb", 0xf7, INSTR_RRS_RRRDU }, - { "clgrb", 0xe5, INSTR_RRS_RRRDU }, + { "clgit", 0x71, INSTR_RIE_R0UU }, + { "cit", 0x72, INSTR_RIE_R0IU }, + { "clfit", 0x73, INSTR_RIE_R0UU }, + { "crj", 0x76, INSTR_RIE_RRPU }, { "clrj", 0x77, INSTR_RIE_RRPU }, - { "clgrj", 0x65, INSTR_RIE_RRPU }, - { "clib", 0xff, INSTR_RIS_RURDU }, - { "clgib", 0xfd, INSTR_RIS_RURDU }, - { "clij", 0x7f, INSTR_RIE_RUPU }, + { "cgij", 0x7c, INSTR_RIE_RUPI }, { "clgij", 0x7d, INSTR_RIE_RUPU }, - { "clfit", 0x73, INSTR_RIE_R0UU }, - { "clgit", 0x71, INSTR_RIE_R0UU }, - { "rnsbg", 0x54, INSTR_RIE_RRUUU }, - { "rxsbg", 0x57, INSTR_RIE_RRUUU }, - { "rosbg", 0x56, INSTR_RIE_RRUUU }, - { "risbg", 0x55, INSTR_RIE_RRUUU }, - { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, - { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU }, + { "cij", 0x7e, INSTR_RIE_RUPI }, + { "clij", 0x7f, INSTR_RIE_RUPU }, { "ahik", 0xd8, INSTR_RIE_RRI0 }, { "aghik", 0xd9, INSTR_RIE_RRI0 }, { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, + { "cgrb", 0xe4, INSTR_RRS_RRRDU }, + { "clgrb", 0xe5, INSTR_RRS_RRRDU }, + { "crb", 0xf6, INSTR_RRS_RRRDU }, + { "clrb", 0xf7, INSTR_RRS_RRRDU }, + { "cgib", 0xfc, INSTR_RIS_RURDI }, + { "clgib", 0xfd, INSTR_RIS_RURDU }, + { "cib", 0xfe, INSTR_RIS_RURDI }, + { "clib", 0xff, INSTR_RIS_RURDU }, #endif { "", 0, INSTR_INVALID } }; -static struct insn opcode_ed[] = { +static struct s390_insn opcode_ed[] = { #ifdef CONFIG_64BIT { "mayl", 0x38, INSTR_RXF_FRRDF }, { "myl", 0x39, INSTR_RXF_FRRDF }, @@ -1297,20 +1490,24 @@ static struct insn opcode_ed[] = { { "my", 0x3b, INSTR_RXF_FRRDF }, { "mayh", 0x3c, INSTR_RXF_FRRDF }, { "myh", 0x3d, INSTR_RXF_FRRDF }, - { "ley", 0x64, INSTR_RXY_FRRD }, - { "ldy", 0x65, INSTR_RXY_FRRD }, - { "stey", 0x66, INSTR_RXY_FRRD }, - { "stdy", 0x67, INSTR_RXY_FRRD }, { "sldt", 0x40, INSTR_RXF_FRRDF }, - { "slxt", 0x48, INSTR_RXF_FRRDF }, { "srdt", 0x41, INSTR_RXF_FRRDF }, + { "slxt", 0x48, INSTR_RXF_FRRDF }, { "srxt", 0x49, INSTR_RXF_FRRDF }, { "tdcet", 0x50, INSTR_RXE_FRRD }, - { "tdcdt", 0x54, INSTR_RXE_FRRD }, - { "tdcxt", 0x58, INSTR_RXE_FRRD }, { "tdget", 0x51, INSTR_RXE_FRRD }, + { "tdcdt", 0x54, INSTR_RXE_FRRD }, { "tdgdt", 0x55, INSTR_RXE_FRRD }, + { "tdcxt", 0x58, INSTR_RXE_FRRD }, { "tdgxt", 0x59, INSTR_RXE_FRRD }, + { "ley", 0x64, INSTR_RXY_FRRD }, + { "ldy", 0x65, INSTR_RXY_FRRD }, + { "stey", 0x66, INSTR_RXY_FRRD }, + { "stdy", 0x67, INSTR_RXY_FRRD }, + { "czdt", 0xa8, INSTR_RSL_LRDFU }, + { "czxt", 0xa9, INSTR_RSL_LRDFU }, + { "cdzt", 0xaa, INSTR_RSL_LRDFU }, + { "cxzt", 0xab, INSTR_RSL_LRDFU }, #endif { "ldeb", 0x04, INSTR_RXE_FRRD }, { "lxdb", 0x05, INSTR_RXE_FRRD }, @@ -1353,7 +1550,7 @@ static struct insn opcode_ed[] = { /* Extracts an operand value from an instruction. */ static unsigned int extract_operand(unsigned char *code, - const struct operand *operand) + const struct s390_operand *operand) { unsigned int val; int bits; @@ -1389,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code, return val; } -static inline int insn_length(unsigned char code) -{ - return ((((int) code + 64) >> 7) + 1) << 1; -} - -static struct insn *find_insn(unsigned char *code) +struct s390_insn *find_insn(unsigned char *code) { unsigned char opfrag = code[1]; unsigned char opmask; - struct insn *table; + struct s390_insn *table; switch (code[0]) { case 0x01: @@ -1410,6 +1602,9 @@ static struct insn *find_insn(unsigned char *code) case 0xa7: table = opcode_a7; break; + case 0xaa: + table = opcode_aa; + break; case 0xb2: table = opcode_b2; break; @@ -1470,11 +1665,39 @@ static struct insn *find_insn(unsigned char *code) return NULL; } +/** + * insn_to_mnemonic - decode an s390 instruction + * @instruction: instruction to decode + * @buf: buffer to fill with mnemonic + * @len: length of buffer + * + * Decode the instruction at @instruction and store the corresponding + * mnemonic into @buf of length @len. + * @buf is left unchanged if the instruction could not be decoded. + * Returns: + * %0 on success, %-ENOENT if the instruction was not found. + */ +int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len) +{ + struct s390_insn *insn; + + insn = find_insn(instruction); + if (!insn) + return -ENOENT; + if (insn->name[0] == '\0') + snprintf(buf, len, "%s", + long_insn_name[(int) insn->name[1]]); + else + snprintf(buf, len, "%.5s", insn->name); + return 0; +} +EXPORT_SYMBOL_GPL(insn_to_mnemonic); + static int print_insn(char *buffer, unsigned char *code, unsigned long addr) { - struct insn *insn; + struct s390_insn *insn; const unsigned char *ops; - const struct operand *operand; + const struct s390_operand *operand; unsigned int value; char separator; char *ptr; @@ -1533,7 +1756,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) void show_code(struct pt_regs *regs) { - char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; + char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; char buffer[64], *ptr; mm_segment_t old_fs; @@ -1542,7 +1765,7 @@ void show_code(struct pt_regs *regs) /* Get a snapshot of the 64 bytes surrounding the fault address. */ old_fs = get_fs(); - set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); + set_fs(user_mode(regs) ? USER_DS : KERNEL_DS); for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { addr = regs->psw.addr - 34 + start; if (__copy_from_user(code + start - 2, @@ -1603,3 +1826,28 @@ void show_code(struct pt_regs *regs) } printk("\n"); } + +void print_fn_code(unsigned char *code, unsigned long len) +{ + char buffer[64], *ptr; + int opsize, i; + + while (len) { + ptr = buffer; + opsize = insn_length(*code); + if (opsize > len) + break; + ptr += sprintf(ptr, "%p: ", code); + for (i = 0; i < opsize; i++) + ptr += sprintf(ptr, "%02x", code[i]); + *ptr++ = '\t'; + if (i < 4) + *ptr++ = '\t'; + ptr += print_insn(ptr, code, (unsigned long) code); + *ptr++ = '\n'; + *ptr++ = 0; + printk(buffer); + code += opsize; + len -= opsize; + } +} diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c new file mode 100644 index 00000000000..acb412442e5 --- /dev/null +++ b/arch/s390/kernel/dumpstack.c @@ -0,0 +1,217 @@ +/* + * Stack dumping functions + * + * Copyright IBM Corp. 1999, 2013 + */ + +#include <linux/kallsyms.h> +#include <linux/hardirq.h> +#include <linux/kprobes.h> +#include <linux/utsname.h> +#include <linux/export.h> +#include <linux/kdebug.h> +#include <linux/ptrace.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <asm/processor.h> +#include <asm/debug.h> +#include <asm/dis.h> +#include <asm/ipl.h> + +#ifndef CONFIG_64BIT +#define LONG "%08lx " +#define FOURLONG "%08lx %08lx %08lx %08lx\n" +static int kstack_depth_to_print = 12; +#else /* CONFIG_64BIT */ +#define LONG "%016lx " +#define FOURLONG "%016lx %016lx %016lx %016lx\n" +static int kstack_depth_to_print = 20; +#endif /* CONFIG_64BIT */ + +/* + * For show_trace we have tree different stack to consider: + * - the panic stack which is used if the kernel stack has overflown + * - the asynchronous interrupt stack (cpu related) + * - the synchronous kernel stack (process related) + * The stack trace can start at any of the three stack and can potentially + * touch all of them. The order is: panic stack, async stack, sync stack. + */ +static unsigned long +__show_trace(unsigned long sp, unsigned long low, unsigned long high) +{ + struct stack_frame *sf; + struct pt_regs *regs; + unsigned long addr; + + while (1) { + sp = sp & PSW_ADDR_INSN; + if (sp < low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + addr = sf->gprs[8] & PSW_ADDR_INSN; + printk("([<%016lx>] %pSR)\n", addr, (void *)addr); + /* Follow the backchain. */ + while (1) { + low = sp; + sp = sf->back_chain & PSW_ADDR_INSN; + if (!sp) + break; + if (sp <= low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + addr = sf->gprs[8] & PSW_ADDR_INSN; + printk(" [<%016lx>] %pSR\n", addr, (void *)addr); + } + /* Zero backchain detected, check for interrupt frame. */ + sp = (unsigned long) (sf + 1); + if (sp <= low || sp > high - sizeof(*regs)) + return sp; + regs = (struct pt_regs *) sp; + addr = regs->psw.addr & PSW_ADDR_INSN; + printk(" [<%016lx>] %pSR\n", addr, (void *)addr); + low = sp; + sp = regs->gprs[15]; + } +} + +static void show_trace(struct task_struct *task, unsigned long *stack) +{ + const unsigned long frame_size = + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + register unsigned long __r15 asm ("15"); + unsigned long sp; + + sp = (unsigned long) stack; + if (!sp) + sp = task ? task->thread.ksp : __r15; + printk("Call Trace:\n"); +#ifdef CONFIG_CHECK_STACK + sp = __show_trace(sp, + S390_lowcore.panic_stack + frame_size - 4096, + S390_lowcore.panic_stack + frame_size); +#endif + sp = __show_trace(sp, + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size); + if (task) + __show_trace(sp, (unsigned long) task_stack_page(task), + (unsigned long) task_stack_page(task) + THREAD_SIZE); + else + __show_trace(sp, S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); + if (!task) + task = current; + debug_show_held_locks(task); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + register unsigned long *__r15 asm ("15"); + unsigned long *stack; + int i; + + if (!sp) + stack = task ? (unsigned long *) task->thread.ksp : __r15; + else + stack = sp; + + for (i = 0; i < kstack_depth_to_print; i++) { + if (((addr_t) stack & (THREAD_SIZE-1)) == 0) + break; + if ((i * sizeof(long) % 32) == 0) + printk("%s ", i == 0 ? "" : "\n"); + printk(LONG, *stack++); + } + printk("\n"); + show_trace(task, sp); +} + +static void show_last_breaking_event(struct pt_regs *regs) +{ +#ifdef CONFIG_64BIT + printk("Last Breaking-Event-Address:\n"); + printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); +#endif +} + +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + +void show_registers(struct pt_regs *regs) +{ + char *mode; + + mode = user_mode(regs) ? "User" : "Krnl"; + printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); + if (!user_mode(regs)) + printk(" (%pSR)", (void *)regs->psw.addr); + printk("\n"); + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, + regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); + printk(" " FOURLONG, + regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); + printk(" " FOURLONG, + regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); + printk(" " FOURLONG, + regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); + show_code(regs); +} + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + show_registers(regs); + /* Show stack backtrace if pt_regs is from kernel mode */ + if (!user_mode(regs)) + show_trace(NULL, (unsigned long *) regs->gprs[15]); + show_last_breaking_event(regs); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + + oops_enter(); + lgr_info_log(); + debug_stop_all(); + console_verbose(); + spin_lock_irq(&die_lock); + bust_spinlocks(1); + printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_SMP + printk("SMP "); +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk("DEBUG_PAGEALLOC"); +#endif + printk("\n"); + notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); + print_modules(); + show_regs(regs); + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception: panic_on_oops"); + oops_exit(); + do_exit(SIGSEGV); +} diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 52098d6dfaa..0dff972a169 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/early.c - * * Copyright IBM Corp. 2007, 2009 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -29,6 +27,7 @@ #include <asm/sysinfo.h> #include <asm/cpcmd.h> #include <asm/sclp.h> +#include <asm/facility.h> #include "entry.h" /* @@ -48,10 +47,10 @@ static void __init reset_tod_clock(void) { u64 time; - if (store_clock(&time) == 0) + if (store_tod_clock(&time) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; @@ -174,7 +173,7 @@ static noinline __init void create_kernel_nss(void) } /* re-initialize cputime accounting. */ - sched_clock_base_cc = get_clock(); + sched_clock_base_cc = get_tod_clock(); S390_lowcore.last_update_clock = sched_clock_base_cc; S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; S390_lowcore.user_timer = 0; @@ -207,6 +206,7 @@ static noinline __init void clear_bss_section(void) */ static noinline __init void init_kernel_storage_key(void) { +#if PAGE_DEFAULT_KEY unsigned long end_pfn, init_pfn; end_pfn = PFN_UP(__pa(&_end)); @@ -214,38 +214,63 @@ static noinline __init void init_kernel_storage_key(void) for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY, 0); +#endif } -static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); +static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); static noinline __init void detect_machine_type(void) { + struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page; + /* Check current-configuration-level */ - if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) { + if (stsi(NULL, 0, 0, 0) <= 2) { S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; return; } /* Get virtual-machine cpu information. */ - if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count) + if (stsi(vmms, 3, 2, 2) || !vmms->count) return; /* Running under KVM? If not we assume z/VM */ - if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; else S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } -static __init void early_pgm_check_handler(void) +static __init void setup_topology(void) +{ +#ifdef CONFIG_64BIT + int max_mnest; + + if (!test_facility(11)) + return; + S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY; + for (max_mnest = 6; max_mnest > 1; max_mnest--) { + if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0) + break; + } + topology_max_mnest = max_mnest; +#endif +} + +static void early_pgm_check_handler(void) { - unsigned long addr; const struct exception_table_entry *fixup; + unsigned long cr0, cr0_new; + unsigned long addr; addr = S390_lowcore.program_old_psw.addr; fixup = search_exception_tables(addr & PSW_ADDR_INSN); if (!fixup) disabled_wait(0); - S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; + /* Disable low address protection before storing into lowcore. */ + __ctl_store(cr0, 0, 0); + cr0_new = cr0 & ~(1UL << 28); + __ctl_load(cr0_new, 0, 0); + S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; + __ctl_load(cr0, 0, 0); } static noinline __init void setup_lowcore_early(void) @@ -262,35 +287,8 @@ static noinline __init void setup_lowcore_early(void) static noinline __init void setup_facility_list(void) { - unsigned long nr; - - S390_lowcore.stfl_fac_list = 0; - asm volatile( - " .insn s,0xb2b10000,0(0)\n" /* stfl */ - "0:\n" - EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list)); - memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); - nr = 4; /* # bytes stored by stfl */ - if (test_facility(7)) { - /* More facility bits available with stfle */ - register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1; - asm volatile(".insn s,0xb2b00000,%0" /* stfle */ - : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0) - : : "cc"); - nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ - } - memset((char *) S390_lowcore.stfle_fac_list + nr, 0, - MAX_FACILITY_BIT/8 - nr); -} - -static noinline __init void setup_hpage(void) -{ -#ifndef CONFIG_DEBUG_PAGEALLOC - if (!test_facility(2) || !test_facility(8)) - return; - S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; - __ctl_set_bit(0, 23); -#endif + stfle(S390_lowcore.stfle_fac_list, + ARRAY_SIZE(S390_lowcore.stfle_fac_list)); } static __init void detect_mvpg(void) @@ -380,18 +378,22 @@ static __init void detect_diag44(void) static __init void detect_machine_facilities(void) { #ifdef CONFIG_64BIT + if (test_facility(8)) { + S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; + __ctl_set_bit(0, 23); + } + if (test_facility(78)) + S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2; if (test_facility(3)) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; - if (test_facility(8)) - S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; - if (test_facility(11)) - S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY; - if (test_facility(27)) - S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) - S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; - if (test_facility(25)) - S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF; + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; + if (test_facility(50) && test_facility(73)) + S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + if (test_facility(66)) + S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; + if (test_facility(51)) + S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; #endif } @@ -461,7 +463,6 @@ static void __init setup_boot_command_line(void) append_to_cmdline(append_ipl_scpdata); } - /* * Save ipl parameters, clear bss memory, initialize storage keys * and create a kernel NSS at startup if the SAVESYS= parm is defined @@ -475,7 +476,6 @@ void __init startup_init(void) init_kernel_storage_key(); lockdep_init(); lockdep_off(); - sort_main_extable(); setup_lowcore_early(); setup_facility_list(); detect_machine_type(); @@ -488,9 +488,8 @@ void __init startup_init(void) detect_diag9c(); detect_diag44(); detect_machine_facilities(); - setup_hpage(); - sclp_facilities_detect(); - detect_memory_layout(memory_chunk); + setup_topology(); + sclp_early_detect(); #ifdef CONFIG_DYNAMIC_FTRACE S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; #endif diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c index cc0dc609d73..b971c6be629 100644 --- a/arch/s390/kernel/ebcdic.c +++ b/arch/s390/kernel/ebcdic.c @@ -1,10 +1,9 @@ /* - * arch/s390/kernel/ebcdic.c * ECBDIC -> ASCII, ASCII -> ECBDIC, * upper to lower case (EBCDIC) conversion tables. * * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Peschke <peschke@fh-brandenburg.de> */ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3705700ed37..70203265196 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/entry.S * S390 low-level entry points. * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), @@ -11,6 +10,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/processor.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ptrace.h> @@ -18,6 +18,8 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> +#include <asm/sigp.h> +#include <asm/irq.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 4 @@ -36,15 +38,15 @@ __PT_R13 = __PT_GPRS + 524 __PT_R14 = __PT_GPRS + 56 __PT_R15 = __PT_GPRS + 60 -_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_PER_TRAP ) -_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) - STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE + +_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) +_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_PIF_WORK = (_PIF_PER_TRAP) #define BASED(name) name-system_call(%r13) @@ -97,22 +99,22 @@ STACK_SIZE = 1 << STACK_SHIFT sra %r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: l %r15,\stack # load target stack -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro ADD64 high,low,timer al \high,\timer - al \low,\timer+4 + al \low,4+\timer brc 12,.+8 ahi \high,1 .endm .macro SUB64 high,low,timer sl \high,\timer - sl \low,\timer+4 + sl \low,4+\timer brc 3,.+8 ahi \high,-1 .endm @@ -145,22 +147,19 @@ STACK_SIZE = 1 << STACK_SHIFT * gpr2 = prev */ ENTRY(__switch_to) + stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task + st %r15,__THREAD_ksp(%r2) # store kernel stack of prev l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next - tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? - jz 0f - ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev - oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next -0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - st %r15,__THREAD_ksp(%r2) # store kernel stack of prev - l %r15,__THREAD_ksp(%r3) # load kernel stack of next - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + lr %r15,%r5 + ahi %r15,STACK_INIT # end of kernel stack of next st %r3,__LC_CURRENT # store task struct of next - mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next st %r5,__LC_THREAD_INFO # store thread info of next - ahi %r5,STACK_SIZE # end of kernel stack of next - st %r5,__LC_KERNEL_STACK # store end of kernel stack + st %r15,__LC_KERNEL_STACK # store end of kernel stack + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next + l %r15,__THREAD_ksp(%r3) # load kernel stack of next + lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: @@ -175,9 +174,9 @@ sysc_stm: stm %r8,%r15,__LC_SAVE_AREA_SYNC l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 + lhi %r14,_PIF_SYSCALL sysc_per: l %r15,__LC_KERNEL_STACK - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER @@ -185,8 +184,9 @@ sysc_vtime: mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC + st %r14,__PT_FLAGS(%r11) sysc_do_svc: - oi __TI_flags+3(%r12),_TIF_SYSCALL + l %r10,__TI_sysc_table(%r12) # 31 bit system call table lh %r8,__PT_INT_CODE+2(%r11) sla %r8,2 # shift and test for svc0 jnz sysc_nr_ok @@ -197,12 +197,11 @@ sysc_do_svc: lr %r8,%r1 sla %r8,2 sysc_nr_ok: - l %r10,BASED(.Lsys_call_table) # 31 bit system call table xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) st %r2,__PT_ORIG_GPR2(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) l %r9,0(%r8,%r10) # get system call addr. - tm __TI_flags+2(%r12),_TIF_TRACE >> 8 + tm __TI_flags+3(%r12),_TIF_TRACE jnz sysc_tracesys basr %r14,%r9 # call sys_xxxx st %r2,__PT_R2(%r11) # store return value @@ -212,9 +211,12 @@ sysc_return: sysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno sysc_restore - tm __TI_flags+3(%r12),_TIF_WORK_SVC - jnz sysc_work # check for work - ni __TI_flags+3(%r12),255-_TIF_SYSCALL + tm __PT_FLAGS+3(%r11),_PIF_WORK + jnz sysc_work + tm __TI_flags+3(%r12),_TIF_WORK + jnz sysc_work # check for thread work + tm __LC_CPU_FLAGS+3,_CIF_WORK + jnz sysc_work sysc_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) stpt __LC_EXIT_TIMER @@ -226,16 +228,18 @@ sysc_done: # One of the work bits is on. Find out which one. # sysc_work: - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED jo sysc_reschedule + tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP + jo sysc_singlestep tm __TI_flags+3(%r12),_TIF_SIGPENDING jo sysc_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME jo sysc_notify_resume - tm __TI_flags+3(%r12),_TIF_PER_TRAP - jo sysc_singlestep + tm __LC_CPU_FLAGS+3,_CIF_ASCE + jo sysc_uaccess j sysc_return # beware of critical section cleanup # @@ -247,7 +251,7 @@ sysc_reschedule: br %r1 # call schedule # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # sysc_mcck_pending: l %r1,BASED(.Lhandle_mcck) @@ -255,16 +259,24 @@ sysc_mcck_pending: br %r1 # TIF bit will be cleared by handler # +# _CIF_ASCE is set, load user space asce +# +sysc_uaccess: + ni __LC_CPU_FLAGS+3,255-_CIF_ASCE + lctl %c1,%c1,__LC_USER_ASCE # load primary asce + j sysc_return + +# # _TIF_SIGPENDING is set, call do_signal # sysc_sigpending: - ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_signal) basr %r14,%r1 # call do_signal - tm __TI_flags+3(%r12),_TIF_SYSCALL + tm __PT_FLAGS+3(%r11),_PIF_SYSCALL jno sysc_return lm %r2,%r7,__PT_R2(%r11) # load svc arguments + l %r10,__TI_sysc_table(%r12) # 31 bit system call table xr %r8,%r8 # svc 0 returns -ENOSYS clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) jnl sysc_nr_ok # invalid svc number -> do svc 0 @@ -282,10 +294,10 @@ sysc_notify_resume: br %r1 # call do_notify_resume # -# _TIF_PER_TRAP is set, call do_per_trap +# _PIF_PER_TRAP is set, call do_per_trap # sysc_singlestep: - ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) + ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_per_trap) la %r14,BASED(sysc_return) @@ -315,7 +327,7 @@ sysc_tracego: basr %r14,%r9 # call sys_xxx st %r2,__PT_R2(%r11) # store return value sysc_tracenogo: - tm __TI_flags+2(%r12),_TIF_TRACE >> 8 + tm __TI_flags+3(%r12),_TIF_TRACE jz sysc_return l %r1,BASED(.Ltrace_exit) lr %r2,%r11 # pass pointer to pt_regs @@ -329,48 +341,19 @@ ENTRY(ret_from_fork) la %r11,STACK_FRAME_OVERHEAD(%r15) l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jo 0f - st %r15,__PT_R15(%r11) # store stack pointer for new kthread -0: l %r1,BASED(.Lschedule_tail) + l %r1,BASED(.Lschedule_tail) basr %r14,%r1 # call schedule_tail TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts + tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? + jne sysc_tracenogo + # it's a kernel thread + lm %r9,%r10,__PT_R9(%r11) # load gprs +ENTRY(kernel_thread_starter) + la %r2,0(%r10) + basr %r14,%r9 j sysc_tracenogo -# -# kernel_execve function needs to deal with pt_regs that is not -# at the usual place -# -ENTRY(kernel_execve) - stm %r12,%r15,48(%r15) - lr %r14,%r15 - l %r13,__LC_SVC_NEW_PSW+4 - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - st %r14,__SF_BACKCHAIN(%r15) - la %r12,STACK_FRAME_OVERHEAD(%r15) - xc 0(__PT_SIZE,%r12),0(%r12) - l %r1,BASED(.Ldo_execve) - lr %r5,%r12 - basr %r14,%r1 # call do_execve - ltr %r2,%r2 - je 0f - ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) - lm %r12,%r15,48(%r15) - br %r14 - # execve succeeded. -0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts - l %r15,__LC_KERNEL_STACK # load ksp - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) - mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs - l %r12,__LC_THREAD_INFO - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - ssm __LC_SVC_NEW_PSW # reenable interrupts - l %r1,BASED(.Lexecve_tail) - basr %r14,%r1 # call execve_tail - j sysc_return - /* * Program check handler routine */ @@ -388,25 +371,26 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER l %r15,__LC_KERNEL_STACK -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC stm %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f l %r1,__TI_task(%r12) tmh %r8,0x0001 # kernel per event ? jz pgm_kprobe - oi __TI_flags+3(%r12),_TIF_PER_TRAP + oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE - mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID + mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE + mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 0: REENABLE_IRQS xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ljump_table) @@ -434,9 +418,9 @@ pgm_kprobe: # single stepped system call # pgm_svcper: - oi __TI_flags+3(%r12),_TIF_PER_TRAP mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) + lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs /* @@ -458,20 +442,35 @@ io_skip: stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC stm %r8,%r9,__PT_PSW(%r11) + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) +io_loop: l %r1,BASED(.Ldo_IRQ) lr %r2,%r11 # pass pointer to pt_regs + lhi %r3,IO_INTERRUPT + tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? + jz io_call + lhi %r3,THIN_INTERRUPT +io_call: basr %r14,%r1 # call do_IRQ + tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR + jz io_return + tpi 0 + jz io_return + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + j io_loop io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON io_tif: - tm __TI_flags+3(%r12),_TIF_WORK_INT + tm __TI_flags+3(%r12),_TIF_WORK jnz io_work # there is work to do (signals etc.) + tm __LC_CPU_FLAGS+3,_CIF_WORK + jnz io_work io_restore: mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) - ni __LC_RETURN_PSW+1,0xfd # clean wait state bit stpt __LC_EXIT_TIMER lm %r0,%r15,__PT_R0(%r11) lpsw __LC_RETURN_PSW @@ -479,7 +478,7 @@ io_done: # # There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK_INT work +# 1) if we return to user space we can do all _TIF_WORK work # 2) if we return to kernel code and preemptive scheduling is enabled check # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. @@ -515,7 +514,6 @@ io_work: # io_work_user: l %r1,__LC_KERNEL_STACK - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -523,11 +521,9 @@ io_work_user: # # One of the work bits is on. Find out which one. -# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED -# and _TIF_MCCK_PENDING # io_work_tif: - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING jo io_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED jo io_reschedule @@ -535,10 +531,12 @@ io_work_tif: jo io_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME jo io_notify_resume + tm __LC_CPU_FLAGS+3,_CIF_ASCE + jo io_uaccess j io_return # beware of critical section cleanup # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # io_mcck_pending: # TRACE_IRQS_ON already done at io_return @@ -548,6 +546,14 @@ io_mcck_pending: j io_return # +# _CIF_ASCE is set, load user space asce +# +io_uaccess: + ni __LC_CPU_FLAGS+3,255-_CIF_ASCE + lctl %c1,%c1,__LC_USER_ASCE # load primary asce + j io_return + +# # _TIF_NEED_RESCHED is set, call schedule # io_reschedule: @@ -604,14 +610,32 @@ ext_skip: stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC stm %r8,%r9,__PT_PSW(%r11) + mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR + mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF + l %r1,BASED(.Ldo_IRQ) lr %r2,%r11 # pass pointer to pt_regs - l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code - l %r4,__LC_EXT_PARAMS # get external parameters - l %r1,BASED(.Ldo_extint) - basr %r14,%r1 # call do_extint + lhi %r3,EXT_INTERRUPT + basr %r14,%r1 # call do_IRQ j io_return +/* + * Load idle PSW. The second "half" of this function is in cleanup_idle. + */ +ENTRY(psw_idle) + st %r3,__SF_EMPTY(%r15) + basr %r1,0 + la %r1,psw_idle_lpsw+4-.(%r1) + st %r1,__SF_EMPTY+4(%r15) + oi __SF_EMPTY+4(%r15),0x80 + stck __CLOCK_IDLE_ENTER(%r2) + stpt __TIMER_IDLE_ENTER(%r2) +psw_idle_lpsw: + lpsw __SF_EMPTY(%r15) + br %r14 +psw_idle_end: + __critical_end: /* @@ -650,8 +674,10 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) + xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) lr %r2,%r11 # pass pointer to pt_regs @@ -659,13 +685,12 @@ mcck_skip: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return l %r1,__LC_KERNEL_STACK # switch to kernel stack - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r15) lr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __TI_flags+3(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING jno mcck_return TRACE_IRQS_OFF l %r1,BASED(.Lhandle_mcck) @@ -673,7 +698,6 @@ mcck_skip: TRACE_IRQS_ON mcck_return: mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW - ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f lm %r0,%r15,__PT_R0(%r11) @@ -688,80 +712,36 @@ mcck_panic: sra %r14,PAGE_SHIFT jz 0f l %r15,__LC_PANIC_STACK + j mcck_skip 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j mcck_skip -/* - * Restart interruption handler, kick starter for additional CPUs - */ -#ifdef CONFIG_SMP - __CPUINIT -ENTRY(restart_int_handler) - basr %r1,0 -restart_base: - spt restart_vtime-restart_base(%r1) - stck __LC_LAST_UPDATE_CLOCK - mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) - mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) - l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp - lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs - lam %a0,%a15,__LC_AREGS_SAVE_AREA - lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone - l %r1,__LC_THREAD_INFO - mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) - mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) - xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - basr %r14,0 - l %r14,restart_addr-.(%r14) - basr %r14,%r14 # call start_secondary -restart_addr: - .long start_secondary - .align 8 -restart_vtime: - .long 0x7fffffff,0xffffffff - .previous -#else -/* - * If we do not run with SMP enabled, let the new CPU crash ... - */ -ENTRY(restart_int_handler) - basr %r1,0 -restart_base: - lpsw restart_crash-restart_base(%r1) - .align 8 -restart_crash: - .long 0x000a0000,0x00000000 -restart_go: -#endif - # # PSW restart interrupt handler # -ENTRY(psw_restart_int_handler) +ENTRY(restart_int_handler) st %r15,__LC_SAVE_AREA_RESTART - basr %r15,0 -0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack - l %r15,0(%r15) + l %r15,__LC_RESTART_STACK ahi %r15,-__PT_SIZE # create pt_regs on stack + xc 0(__PT_SIZE,%r15),0(%r15) stm %r0,%r14,__PT_R0(%r15) mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw - ahi %r15,-STACK_FRAME_OVERHEAD - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - basr %r14,0 -1: l %r14,.Ldo_restart-1b(%r14) - basr %r14,%r14 - basr %r14,0 # load disabled wait PSW if -2: lpsw restart_psw_crash-2b(%r14) # do_restart returns - .align 4 -.Ldo_restart: - .long do_restart -.Lrestart_stack: - .long restart_stack - .align 8 -restart_psw_crash: - .long 0x000a0000,0x00000000 + restart_psw_crash + ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) + l %r1,__LC_RESTART_FN # load fn, parm & source cpu + l %r2,__LC_RESTART_DATA + l %r3,__LC_RESTART_SOURCE + ltr %r3,%r3 # test source cpu address + jm 1f # negative -> skip source stop +0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu + brc 10,0b # wait for status stored +1: basr %r14,%r1 # call function + stap __SF_EMPTY(%r15) # store cpu address + lh %r3,__SF_EMPTY(%r15) +2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu + brc 2,2b +3: j 3b .section .kprobes.text, "ax" @@ -773,12 +753,10 @@ restart_psw_crash: */ stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack - ahi %r15,-__PT_SIZE # create pt_regs - stm %r0,%r7,__PT_R0(%r15) - stm %r8,%r9,__PT_PSW(%r15) + la %r11,STACK_FRAME_OVERHEAD(%r15) + stm %r0,%r7,__PT_R0(%r11) + stm %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(32,%r11),0(%r14) - lr %r15,%r11 - ahi %r15,-STACK_FRAME_OVERHEAD l %r1,BASED(1f) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) lr %r2,%r11 # pass pointer to pt_regs @@ -795,6 +773,8 @@ cleanup_table: .long io_tif + 0x80000000 .long io_restore + 0x80000000 .long io_done + 0x80000000 + .long psw_idle + 0x80000000 + .long psw_idle_end + 0x80000000 cleanup_critical: cl %r9,BASED(cleanup_table) # system_call @@ -813,6 +793,10 @@ cleanup_critical: jl cleanup_io_tif cl %r9,BASED(cleanup_table+28) # io_done jl cleanup_io_restore + cl %r9,BASED(cleanup_table+32) # psw_idle + jl 0f + cl %r9,BASED(cleanup_table+36) # psw_idle_end + jl cleanup_idle 0: br %r14 cleanup_system_call: @@ -852,15 +836,16 @@ cleanup_system_call: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER # set up saved register 11 l %r15,__LC_KERNEL_STACK - ahi %r15,-__PT_SIZE - st %r15,12(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + st %r9,12(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC - stm %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC + stm %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC + xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) + mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL # setup saved register 15 - ahi %r15,-STACK_FRAME_OVERHEAD st %r15,28(%r11) # r15 stack pointer # set new psw address and exit l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 @@ -896,7 +881,6 @@ cleanup_io_restore: jhe 0f l %r9,12(%r11) # get saved r11 pointer to pt_regs mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit mvc 0(32,%r11),__PT_R8(%r9) lm %r0,%r7,__PT_R0(%r9) 0: lm %r8,%r9,__LC_RETURN_PSW @@ -904,11 +888,47 @@ cleanup_io_restore: cleanup_io_restore_insn: .long io_done - 4 + 0x80000000 +cleanup_idle: + # copy interrupt clock & cpu timer + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER + chi %r11,__LC_SAVE_AREA_ASYNC + je 0f + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER +0: # check if stck has been executed + cl %r9,BASED(cleanup_idle_insn) + jhe 1f + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) +1: # account system time going idle + lm %r9,%r10,__LC_STEAL_TIMER + ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) + SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK + stm %r9,%r10,__LC_STEAL_TIMER + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) + lm %r9,%r10,__LC_SYSTEM_TIMER + ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER + SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) + stm %r9,%r10,__LC_SYSTEM_TIMER + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) + # prepare return psw + n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits + l %r9,24(%r11) # return from psw_idle + br %r14 +cleanup_idle_insn: + .long psw_idle_lpsw + 0x80000000 +cleanup_idle_wait: + .long 0xfcfdffff + /* * Integer constants */ .align 4 -.Lnr_syscalls: .long NR_syscalls +.Lnr_syscalls: + .long NR_syscalls +.Lvtimer_max: + .quad 0x7fffffffffffffff /* * Symbol constants @@ -916,12 +936,9 @@ cleanup_io_restore_insn: .Ldo_machine_check: .long s390_do_machine_check .Lhandle_mcck: .long s390_handle_mcck .Ldo_IRQ: .long do_IRQ -.Ldo_extint: .long do_extint .Ldo_signal: .long do_signal .Ldo_notify_resume: .long do_notify_resume .Ldo_per_trap: .long do_per_trap -.Ldo_execve: .long do_execve -.Lexecve_tail: .long execve_tail .Ljump_table: .long pgm_check_table .Lschedule: .long schedule #ifdef CONFIG_PREEMPT @@ -930,7 +947,6 @@ cleanup_io_restore_insn: .Ltrace_enter: .long do_syscall_trace_enter .Ltrace_exit: .long do_syscall_trace_exit .Lschedule_tail: .long schedule_tail -.Lsys_call_table: .long sys_call_table .Lsysc_per: .long sysc_per + 0x80000000 #ifdef CONFIG_TRACE_IRQFLAGS .Lhardirqs_on: .long trace_hardirqs_on_caller diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index bf538aaf407..6ac78192455 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -4,61 +4,70 @@ #include <linux/types.h> #include <linux/signal.h> #include <asm/ptrace.h> +#include <asm/cputime.h> - -extern void (*pgm_check_table[128])(struct pt_regs *); extern void *restart_stack; +extern unsigned long suspend_zero_pages; + +void system_call(void); +void pgm_check_handler(void); +void ext_int_handler(void); +void io_int_handler(void); +void mcck_int_handler(void); +void restart_int_handler(void); +void restart_call_handler(void); +void psw_idle(struct s390_idle_data *, unsigned long); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); void do_protection_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs); -void do_asce_exception(struct pt_regs *regs); + +void addressing_exception(struct pt_regs *regs); +void data_exception(struct pt_regs *regs); +void default_trap_handler(struct pt_regs *regs); +void divide_exception(struct pt_regs *regs); +void execute_exception(struct pt_regs *regs); +void hfp_divide_exception(struct pt_regs *regs); +void hfp_overflow_exception(struct pt_regs *regs); +void hfp_significance_exception(struct pt_regs *regs); +void hfp_sqrt_exception(struct pt_regs *regs); +void hfp_underflow_exception(struct pt_regs *regs); +void illegal_op(struct pt_regs *regs); +void operand_exception(struct pt_regs *regs); +void overflow_exception(struct pt_regs *regs); +void privileged_op(struct pt_regs *regs); +void space_switch_exception(struct pt_regs *regs); +void special_op_exception(struct pt_regs *regs); +void specification_exception(struct pt_regs *regs); +void transaction_exception(struct pt_regs *regs); +void translation_exception(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs); void syscall_trace(struct pt_regs *regs, int entryexit); void kernel_stack_overflow(struct pt_regs * regs); void do_signal(struct pt_regs *regs); -int handle_signal32(unsigned long sig, struct k_sigaction *ka, +void handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); void do_notify_resume(struct pt_regs *regs); -void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); +void __init init_IRQ(void); +void do_IRQ(struct pt_regs *regs, int irq); void do_restart(void); -int __cpuinit start_secondary(void *cpuvoid); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); - +int setup_profiling_timer(unsigned int multiplier); void __init time_init(void); +int pfn_is_nosave(unsigned long); +void s390_early_resume(void); +unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); struct s390_mmap_arg_struct; struct fadvise64_64_args; struct old_sigaction; -long sys_mmap2(struct s390_mmap_arg_struct __user *arg); -long sys_s390_ipc(uint call, int first, unsigned long second, - unsigned long third, void __user *ptr); long sys_s390_personality(unsigned int personality); -long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low, - size_t len, int advice); -long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); -long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, - u32 len_low); -long sys_fork(void); -long sys_clone(unsigned long newsp, unsigned long clone_flags, - int __user *parent_tidptr, int __user *child_tidptr); -long sys_vfork(void); -void execve_tail(void); -long sys_execve(const char __user *name, const char __user *const __user *argv, - const char __user *const __user *envp); -long sys_sigsuspend(int history0, int history1, old_sigset_t mask); -long sys_sigaction(int sig, const struct old_sigaction __user *act, - struct old_sigaction __user *oact); -long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss); -long sys_sigreturn(void); -long sys_rt_sigreturn(void); -long sys32_sigreturn(void); -long sys32_rt_sigreturn(void); +long sys_s390_runtime_instr(int command, int signum); #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 412a7b8783d..f2e674c702e 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/entry64.S * S390 low-level entry points. * - * Copyright (C) IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), @@ -11,6 +10,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/processor.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ptrace.h> @@ -18,6 +18,8 @@ #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> +#include <asm/sigp.h> +#include <asm/irq.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -38,14 +40,13 @@ __PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE -_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING | _TIF_PER_TRAP ) -_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_MCCK_PENDING) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) -_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) +_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) +_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_PIF_WORK = (_PIF_PER_TRAP) #define BASED(name) name-system_call(%r13) @@ -71,26 +72,35 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #endif .endm - .macro SPP newpp -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + .macro LPP newpp +#if IS_ENABLED(CONFIG_KVM) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp #endif .endm - .macro HANDLE_SIE_INTERCEPT scratch -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __TI_flags+6(%r12),_TIF_SIE>>8 - jz .+42 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP - jz .+8 - .insn s,0xb2800000,BASED(.Lhost_id) # set host id + .macro HANDLE_SIE_INTERCEPT scratch,reason +#if IS_ENABLED(CONFIG_KVM) + tmhh %r8,0x0001 # interrupting from user ? + jnz .+62 lgr \scratch,%r9 - slg \scratch,BASED(.Lsie_loop) - clg \scratch,BASED(.Lsie_length) - jhe .+10 - lg %r9,BASED(.Lsie_loop) + slg \scratch,BASED(.Lsie_critical) + clg \scratch,BASED(.Lsie_critical_length) + .if \reason==1 + # Some program interrupts are suppressing (e.g. protection). + # We must also check the instruction after SIE in that case. + # do_protection_exception will rewind to rewind_pad + jh .+42 + .else + jhe .+42 + .endif + lg %r14,__SF_EMPTY(%r15) # get control block pointer + LPP __SF_EMPTY+16(%r15) # set host id + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit + mvi __SF_EMPTY+31(%r15),\reason # set exit reason #endif .endm @@ -118,10 +128,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) srag %r14,%r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: lg %r15,\stack # load target stack -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro UPDATE_VTIME scratch,enter_timer @@ -148,6 +158,14 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) ssm __LC_RETURN_PSW .endm + .macro STCK savearea +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + .insn s,0xb27c0000,\savearea # store clock fast +#else + .insn s,0xb2050000,\savearea # store clock +#endif + .endm + .section .kprobes.text, "ax" /* @@ -158,22 +176,19 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) * gpr2 = prev */ ENTRY(__switch_to) + stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task + stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next - tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? - jz 0f - ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev - oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next -0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + lgr %r15,%r5 + aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next - mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next stg %r5,__LC_THREAD_INFO # store thread info of next - aghi %r5,STACK_SIZE # end of kernel stack of next - stg %r5,__LC_KERNEL_STACK # store end of kernel stack + stg %r15,__LC_KERNEL_STACK # store end of kernel stack + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 + mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next + lg %r15,__THREAD_ksp(%r3) # load kernel stack of next + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 __critical_start: @@ -188,10 +203,9 @@ sysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + lghi %r14,_PIF_SYSCALL sysc_per: lg %r15,__LC_KERNEL_STACK - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER @@ -200,8 +214,9 @@ sysc_vtime: mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC + stg %r14,__PT_FLAGS(%r11) sysc_do_svc: - oi __TI_flags+7(%r12),_TIF_SYSCALL + lg %r10,__TI_sysc_table(%r12) # address of system call table llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 jnz sysc_nr_ok @@ -212,18 +227,11 @@ sysc_do_svc: sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,2 sysc_nr_ok: - larl %r10,sys_call_table # 64 bit system call table -#ifdef CONFIG_COMPAT - tm __TI_flags+5(%r12),(_TIF_31BIT>>16) - jno sysc_noemu - larl %r10,sys_call_table_emu # 31 bit system call table -sysc_noemu: -#endif xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lgf %r9,0(%r8,%r10) # get system call add. - tm __TI_flags+6(%r12),_TIF_TRACE >> 8 + tm __TI_flags+7(%r12),_TIF_TRACE jnz sysc_tracesys basr %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value @@ -233,9 +241,12 @@ sysc_return: sysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno sysc_restore - tm __TI_flags+7(%r12),_TIF_WORK_SVC + tm __PT_FLAGS+7(%r11),_PIF_WORK + jnz sysc_work + tm __TI_flags+7(%r12),_TIF_WORK jnz sysc_work # check for work - ni __TI_flags+7(%r12),255-_TIF_SYSCALL + tm __LC_CPU_FLAGS+7,_CIF_WORK + jnz sysc_work sysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -250,16 +261,18 @@ sysc_done: # One of the work bits is on. Find out which one. # sysc_work: - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo sysc_reschedule + tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP + jo sysc_singlestep tm __TI_flags+7(%r12),_TIF_SIGPENDING jo sysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo sysc_notify_resume - tm __TI_flags+7(%r12),_TIF_PER_TRAP - jo sysc_singlestep + tm __LC_CPU_FLAGS+7,_CIF_ASCE + jo sysc_uaccess j sysc_return # beware of critical section cleanup # @@ -270,24 +283,32 @@ sysc_reschedule: jg schedule # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # sysc_mcck_pending: larl %r14,sysc_return jg s390_handle_mcck # TIF bit will be cleared by handler # +# _CIF_ASCE is set, load user space asce +# +sysc_uaccess: + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + j sysc_return + +# # _TIF_SIGPENDING is set, call do_signal # sysc_sigpending: - ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal - tm __TI_flags+7(%r12),_TIF_SYSCALL + tm __PT_FLAGS+7(%r11),_PIF_SYSCALL jno sysc_return lmg %r2,%r7,__PT_R2(%r11) # load svc arguments + lg %r10,__TI_sysc_table(%r12) # address of system call table lghi %r8,0 # svc 0 returns -ENOSYS - lh %r1,__PT_INT_CODE+2(%r11) # load new svc number + llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number cghi %r1,NR_syscalls jnl sysc_nr_ok # invalid svc number -> do svc 0 slag %r8,%r1,2 @@ -302,10 +323,10 @@ sysc_notify_resume: jg do_notify_resume # -# _TIF_PER_TRAP is set, call do_per_trap +# _PIF_PER_TRAP is set, call do_per_trap # sysc_singlestep: - ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) + ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP lgr %r2,%r11 # pass pointer to pt_regs larl %r14,sysc_return jg do_per_trap @@ -332,7 +353,7 @@ sysc_tracego: basr %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value sysc_tracenogo: - tm __TI_flags+6(%r12),_TIF_TRACE >> 8 + tm __TI_flags+7(%r12),_TIF_TRACE jz sysc_return lgr %r2,%r11 # pass pointer to pt_regs larl %r14,sysc_return @@ -344,44 +365,18 @@ sysc_tracenogo: ENTRY(ret_from_fork) la %r11,STACK_FRAME_OVERHEAD(%r15) lg %r12,__LC_THREAD_INFO - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jo 0f - stg %r15,__PT_R15(%r11) # store stack pointer for new kthread -0: brasl %r14,schedule_tail + brasl %r14,schedule_tail TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts + tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? + jne sysc_tracenogo + # it's a kernel thread + lmg %r9,%r10,__PT_R9(%r11) # load gprs +ENTRY(kernel_thread_starter) + la %r2,0(%r10) + basr %r14,%r9 j sysc_tracenogo -# -# kernel_execve function needs to deal with pt_regs that is not -# at the usual place -# -ENTRY(kernel_execve) - stmg %r12,%r15,96(%r15) - lgr %r14,%r15 - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - stg %r14,__SF_BACKCHAIN(%r15) - la %r12,STACK_FRAME_OVERHEAD(%r15) - xc 0(__PT_SIZE,%r12),0(%r12) - lgr %r5,%r12 - brasl %r14,do_execve - ltgfr %r2,%r2 - je 0f - aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) - lmg %r12,%r15,96(%r15) - br %r14 - # execve succeeded. -0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts - lg %r15,__LC_KERNEL_STACK # load ksp - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) - mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs - lg %r12,__LC_THREAD_INFO - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - ssm __LC_SVC_NEW_PSW # reenable interrupts - brasl %r14,execve_tail - j sysc_return - /* * Program check handler routine */ @@ -393,7 +388,7 @@ ENTRY(pgm_check_handler) lg %r12,__LC_THREAD_INFO larl %r13,system_call lmg %r8,%r9,__LC_PGM_OLD_PSW - HANDLE_SIE_INTERCEPT %r14 + HANDLE_SIE_INTERCEPT %r14,1 tmhh %r8,0x0001 # test problem state bit jnz 1f # -> fault in user space tmhh %r8,0x4000 # PER bit set in old PSW ? @@ -401,35 +396,40 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER LAST_BREAK %r14 lg %r15,__LC_KERNEL_STACK -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) + lg %r14,__TI_task(%r12) + lghi %r13,__LC_PGM_TDB + tm __LC_PGM_ILC+2,0x02 # check for transaction abort + jz 2f + mvc __THREAD_trap_tdb(256,%r14),0(%r13) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f - lg %r1,__TI_task(%r12) tmhh %r8,0x0001 # kernel per event ? jz pgm_kprobe - oi __TI_flags+7(%r12),_TIF_PER_TRAP - mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE - mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID + oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP + mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS + mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE + mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 0: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f - sll %r10,3 + sll %r10,2 je sysc_return - lg %r1,0(%r10,%r1) # load address of handler routine + lgf %r1,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler j sysc_return @@ -448,24 +448,24 @@ pgm_kprobe: # single stepped system call # pgm_svcper: - oi __TI_flags+7(%r12),_TIF_PER_TRAP mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW larl %r14,sysc_per stg %r14,__LC_RETURN_PSW+8 + lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs /* * IO interrupt handler routine */ ENTRY(io_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO larl %r13,system_call lmg %r8,%r9,__LC_IO_OLD_PSW - HANDLE_SIE_INTERCEPT %r14 + HANDLE_SIE_INTERCEPT %r14,2 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT tmhh %r8,0x0001 # interrupting from user? jz io_skip @@ -475,21 +475,36 @@ io_skip: stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) +io_loop: lgr %r2,%r11 # pass pointer to pt_regs + lghi %r3,IO_INTERRUPT + tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? + jz io_call + lghi %r3,THIN_INTERRUPT +io_call: brasl %r14,do_IRQ + tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR + jz io_return + tpi 0 + jz io_return + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID + j io_loop io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON io_tif: - tm __TI_flags+7(%r12),_TIF_WORK_INT + tm __TI_flags+7(%r12),_TIF_WORK jnz io_work # there is work to do (signals etc.) + tm __LC_CPU_FLAGS+7,_CIF_WORK + jnz io_work io_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r11,%r15,__PT_R11(%r11) @@ -498,7 +513,7 @@ io_done: # # There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK_INT work +# 1) if we return to user space we can do all _TIF_WORK work # 2) if we return to kernel code and kvm is enabled check if we need to # modify the psw to leave SIE # 3) if we return to kernel code and preemptive scheduling is enabled check @@ -535,7 +550,6 @@ io_work: # io_work_user: lg %r1,__LC_KERNEL_STACK - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -543,11 +557,9 @@ io_work_user: # # One of the work bits is on. Find out which one. -# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED -# and _TIF_MCCK_PENDING # io_work_tif: - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo io_mcck_pending tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo io_reschedule @@ -555,10 +567,12 @@ io_work_tif: jo io_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo io_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_ASCE + jo io_uaccess j io_return # beware of critical section cleanup # -# _TIF_MCCK_PENDING is set, call handler +# _CIF_MCCK_PENDING is set, call handler # io_mcck_pending: # TRACE_IRQS_ON already done at io_return @@ -567,6 +581,14 @@ io_mcck_pending: j io_return # +# _CIF_ASCE is set, load user space asce +# +io_uaccess: + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + j io_return + +# # _TIF_NEED_RESCHED is set, call schedule # io_reschedule: @@ -605,14 +627,14 @@ io_notify_resume: * External interrupt handler routine */ ENTRY(ext_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO larl %r13,system_call lmg %r8,%r9,__LC_EXT_OLD_PSW - HANDLE_SIE_INTERCEPT %r14 + HANDLE_SIE_INTERCEPT %r14,3 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT tmhh %r8,0x0001 # interrupting from user ? jz ext_skip @@ -622,22 +644,39 @@ ext_skip: stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) + lghi %r1,__LC_EXT_PARAMS2 + mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR + mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS + mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF - lghi %r1,4096 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs - llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code - llgf %r4,__LC_EXT_PARAMS # get external parameter - lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter - brasl %r14,do_extint + lghi %r3,EXT_INTERRUPT + brasl %r14,do_IRQ j io_return +/* + * Load idle PSW. The second "half" of this function is in cleanup_idle. + */ +ENTRY(psw_idle) + stg %r3,__SF_EMPTY(%r15) + larl %r1,psw_idle_lpsw+4 + stg %r1,__SF_EMPTY+8(%r15) + STCK __CLOCK_IDLE_ENTER(%r2) + stpt __TIMER_IDLE_ENTER(%r2) +psw_idle_lpsw: + lpswe __SF_EMPTY(%r15) + br %r14 +psw_idle_end: + __critical_end: /* * Machine check handler routines */ ENTRY(mcck_int_handler) - stck __LC_MCCK_CLOCK + STCK __LC_MCCK_CLOCK la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs @@ -645,7 +684,7 @@ ENTRY(mcck_int_handler) lg %r12,__LC_THREAD_INFO larl %r13,system_call lmg %r8,%r9,__LC_MCK_OLD_PSW - HANDLE_SIE_INTERCEPT %r14 + HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? jo mcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA @@ -672,22 +711,23 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA - mvc __PT_R0(128,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __TI_flags+7(%r12),_TIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jno mcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck @@ -696,7 +736,6 @@ mcck_return: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW - ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f stpt __LC_EXIT_TIMER @@ -713,68 +752,32 @@ mcck_panic: 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j mcck_skip -/* - * Restart interruption handler, kick starter for additional CPUs - */ -#ifdef CONFIG_SMP - __CPUINIT -ENTRY(restart_int_handler) - basr %r1,0 -restart_base: - spt restart_vtime-restart_base(%r1) - stck __LC_LAST_UPDATE_CLOCK - mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) - mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) - lghi %r10,__LC_GPREGS_SAVE_AREA - lg %r15,120(%r10) # load ksp - lghi %r10,__LC_CREGS_SAVE_AREA - lctlg %c0,%c15,0(%r10) # get new ctl regs - lghi %r10,__LC_AREGS_SAVE_AREA - lam %a0,%a15,0(%r10) - lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone - lg %r1,__LC_THREAD_INFO - mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) - mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) - xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - brasl %r14,start_secondary - .align 8 -restart_vtime: - .long 0x7fffffff,0xffffffff - .previous -#else -/* - * If we do not run with SMP enabled, let the new CPU crash ... - */ -ENTRY(restart_int_handler) - basr %r1,0 -restart_base: - lpswe restart_crash-restart_base(%r1) - .align 8 -restart_crash: - .long 0x000a0000,0x00000000,0x00000000,0x00000000 -restart_go: -#endif - # # PSW restart interrupt handler # -ENTRY(psw_restart_int_handler) +ENTRY(restart_int_handler) stg %r15,__LC_SAVE_AREA_RESTART - larl %r15,restart_stack # load restart stack - lg %r15,0(%r15) + lg %r15,__LC_RESTART_STACK aghi %r15,-__PT_SIZE # create pt_regs on stack + xc 0(__PT_SIZE,%r15),0(%r15) stmg %r0,%r14,__PT_R0(%r15) mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw - aghi %r15,-STACK_FRAME_OVERHEAD - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - brasl %r14,do_restart - larl %r14,restart_psw_crash # load disabled wait PSW if - lpswe 0(%r14) # do_restart returns - .align 8 -restart_psw_crash: - .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash + aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) + lg %r1,__LC_RESTART_FN # load fn, parm & source cpu + lg %r2,__LC_RESTART_DATA + lg %r3,__LC_RESTART_SOURCE + ltgr %r3,%r3 # test source cpu address + jm 1f # negative -> skip source stop +0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu + brc 10,0b # wait for status stored +1: basr %r14,%r1 # call function + stap __SF_EMPTY(%r15) # store cpu address + llgh %r3,__SF_EMPTY(%r15) +2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu + brc 2,2b +3: j 3b .section .kprobes.text, "ax" @@ -785,14 +788,12 @@ restart_psw_crash: * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: - lg %r11,__LC_PANIC_STACK # change to panic stack - aghi %r11,-__PT_SIZE # create pt_regs + lg %r15,__LC_PANIC_STACK # change to panic stack + la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 - lgr %r15,%r11 - aghi %r15,-STACK_FRAME_OVERHEAD xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow @@ -808,6 +809,8 @@ cleanup_table: .quad io_tif .quad io_restore .quad io_done + .quad psw_idle + .quad psw_idle_end cleanup_critical: clg %r9,BASED(cleanup_table) # system_call @@ -826,6 +829,10 @@ cleanup_critical: jl cleanup_io_tif clg %r9,BASED(cleanup_table+56) # io_done jl cleanup_io_restore + clg %r9,BASED(cleanup_table+64) # psw_idle + jl 0f + clg %r9,BASED(cleanup_table+72) # psw_idle_end + jl cleanup_idle 0: br %r14 @@ -870,15 +877,16 @@ cleanup_system_call: mvc __TI_last_break(8,%r12),16(%r11) 0: # set up saved register r11 lg %r15,__LC_KERNEL_STACK - aghi %r15,-__PT_SIZE - stg %r15,24(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + stg %r9,24(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC - stmg %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC + stmg %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC + xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) + mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL # setup saved register r15 - aghi %r15,-STACK_FRAME_OVERHEAD stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,sysc_do_svc @@ -915,7 +923,6 @@ cleanup_io_restore: je 0f lg %r9,24(%r11) # get saved r11 pointer to pt_regs mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 0: lmg %r8,%r9,__LC_RETURN_PSW @@ -923,6 +930,37 @@ cleanup_io_restore: cleanup_io_restore_insn: .quad io_done - 4 +cleanup_idle: + # copy interrupt clock & cpu timer + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER + cghi %r11,__LC_SAVE_AREA_ASYNC + je 0f + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER +0: # check if stck & stpt have been executed + clg %r9,BASED(cleanup_idle_insn) + jhe 1f + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) +1: # account system time going idle + lg %r9,__LC_STEAL_TIMER + alg %r9,__CLOCK_IDLE_ENTER(%r2) + slg %r9,__LC_LAST_UPDATE_CLOCK + stg %r9,__LC_STEAL_TIMER + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) + lg %r9,__LC_SYSTEM_TIMER + alg %r9,__LC_LAST_UPDATE_TIMER + slg %r9,__TIMER_IDLE_ENTER(%r2) + stg %r9,__LC_SYSTEM_TIMER + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) + # prepare return psw + nihh %r8,0xfcfd # clear irq & wait state bits + lg %r9,48(%r11) # return from psw_idle + br %r14 +cleanup_idle_insn: + .quad psw_idle_lpsw + /* * Integer constants */ @@ -933,7 +971,7 @@ cleanup_io_restore_insn: .quad __critical_end - __critical_start -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block @@ -943,54 +981,50 @@ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 + xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_THREAD_INFO # pointer thread_info struct - oi __TI_flags+6(%r14),_TIF_SIE>>8 -sie_loop: - lg %r14,__LC_THREAD_INFO # pointer thread_info struct - tm __TI_flags+7(%r14),_TIF_EXIT_SIE - jnz sie_exit lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 jz sie_gmap lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce sie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer - SPP __SF_EMPTY(%r15) # set guest id + oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now + tm __SIE_PROG20+3(%r14),1 # last exit... + jnz sie_done + LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) sie_done: - SPP __SF_EMPTY+16(%r15) # set host id - lg %r14,__LC_THREAD_INFO # pointer thread_info struct -sie_exit: + LPP __SF_EMPTY+16(%r15) # set host id + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) +# some program checks are suppressing. C code (e.g. do_protection_exception) +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +# instructions between sie64a and sie_done should not cause program +# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# See also HANDLE_SIE_INTERCEPT +rewind_pad: + nop 0 + .globl sie_exit +sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lghi %r2,0 + lg %r2,__SF_EMPTY+24(%r15) # return exit reason code br %r14 sie_fault: - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - lg %r14,__LC_THREAD_INFO # pointer thread_info struct - ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) - lg %r14,__SF_EMPTY+8(%r15) # load guest register save area - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lghi %r2,-EFAULT - br %r14 + lghi %r14,-EFAULT + stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + j sie_exit .align 8 -.Lsie_loop: - .quad sie_loop -.Lsie_length: - .quad sie_done - sie_loop -.Lhost_id: - .quad 0 - - .section __ex_table,"a" - .quad sie_loop,sie_fault - .previous +.Lsie_critical: + .quad sie_gmap +.Lsie_critical_length: + .quad sie_done - sie_gmap + + EX_TABLE(rewind_pad,sie_fault) + EX_TABLE(sie_exit,sie_fault) #endif .section .rodata, "a" @@ -1003,6 +1037,7 @@ sys_call_table: #ifdef CONFIG_COMPAT #define SYSCALL(esa,esame,emu) .long emu + .globl sys_call_table_emu sys_call_table_emu: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 78bdf0e5dff..54d6493c4a5 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -15,12 +15,7 @@ #include <linux/kprobes.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> - -#ifdef CONFIG_64BIT -#define MCOUNT_OFFSET_RET 12 -#else -#define MCOUNT_OFFSET_RET 22 -#endif +#include "entry.h" #ifdef CONFIG_DYNAMIC_FTRACE @@ -135,9 +130,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return 0; } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - *(unsigned long *) data = 0; return 0; } @@ -155,14 +149,14 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) - goto out; - trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; + ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; + trace.func = ip; + trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to. */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; + if (!ftrace_graph_entry(&trace)) + goto out; + if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; - } parent = (unsigned long) return_to_handler; out: return parent; @@ -182,7 +176,7 @@ int ftrace_enable_ftrace_graph_caller(void) offset = ((void *) prepare_ftrace_return - (void *) ftrace_graph_caller) / 2; - return probe_kernel_write(ftrace_graph_caller + 2, + return probe_kernel_write((void *) ftrace_graph_caller + 2, &offset, sizeof(offset)); } @@ -190,7 +184,7 @@ int ftrace_disable_ftrace_graph_caller(void) { static unsigned short offset = 0x0002; - return probe_kernel_write(ftrace_graph_caller + 2, + return probe_kernel_write((void *) ftrace_graph_caller + 2, &offset, sizeof(offset)); } diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index c27a0727f93..e88d35d7495 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -34,125 +34,7 @@ #endif __HEAD -#ifndef CONFIG_IPL - .org 0 - .long 0x00080000,0x80000000+startup # Just a restart PSW -#else -#ifdef CONFIG_IPL_TAPE -#define IPL_BS 1024 - .org 0 - .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded - .long 0x27000000,0x60000001 # by ipl to addresses 0-23. - .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). - .long 0x00000000,0x00000000 # external old psw - .long 0x00000000,0x00000000 # svc old psw - .long 0x00000000,0x00000000 # program check old psw - .long 0x00000000,0x00000000 # machine check old psw - .long 0x00000000,0x00000000 # io old psw - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x000a0000,0x00000058 # external new psw - .long 0x000a0000,0x00000060 # svc new psw - .long 0x000a0000,0x00000068 # program check new psw - .long 0x000a0000,0x00000070 # machine check new psw - .long 0x00080000,0x80000000+.Lioint # io new psw - - .org 0x100 -# -# subroutine for loading from tape -# Parameters: -# R1 = device number -# R2 = load address -.Lloader: - st %r14,.Lldret - la %r3,.Lorbread # r3 = address of orb - la %r5,.Lirb # r5 = address of irb - st %r2,.Lccwread+4 # initialize CCW data addresses - lctl %c6,%c6,.Lcr6 - slr %r2,%r2 -.Lldlp: - la %r6,3 # 3 retries -.Lssch: - ssch 0(%r3) # load chunk of IPL_BS bytes - bnz .Llderr -.Lw4end: - bas %r14,.Lwait4io - tm 8(%r5),0x82 # do we have a problem ? - bnz .Lrecov - slr %r7,%r7 - icm %r7,3,10(%r5) # get residual count - lcr %r7,%r7 - la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read - ar %r2,%r7 # add to total size - tm 8(%r5),0x01 # found a tape mark ? - bnz .Ldone - l %r0,.Lccwread+4 # update CCW data addresses - ar %r0,%r7 - st %r0,.Lccwread+4 - b .Lldlp -.Ldone: - l %r14,.Lldret - br %r14 # r2 contains the total size -.Lrecov: - bas %r14,.Lsense # do the sensing - bct %r6,.Lssch # dec. retry count & branch - b .Llderr -# -# Sense subroutine -# -.Lsense: - st %r14,.Lsnsret - la %r7,.Lorbsense - ssch 0(%r7) # start sense command - bnz .Llderr - bas %r14,.Lwait4io - l %r14,.Lsnsret - tm 8(%r5),0x82 # do we have a problem ? - bnz .Llderr - br %r14 -# -# Wait for interrupt subroutine -# -.Lwait4io: - lpsw .Lwaitpsw -.Lioint: - c %r1,0xb8 # compare subchannel number - bne .Lwait4io - tsch 0(%r5) - slr %r0,%r0 - tm 8(%r5),0x82 # do we have a problem ? - bnz .Lwtexit - tm 8(%r5),0x04 # got device end ? - bz .Lwait4io -.Lwtexit: - br %r14 -.Llderr: - lpsw .Lcrash - - .align 8 -.Lorbread: - .long 0x00000000,0x0080ff00,.Lccwread - .align 8 -.Lorbsense: - .long 0x00000000,0x0080ff00,.Lccwsense - .align 8 -.Lccwread: - .long 0x02200000+IPL_BS,0x00000000 -.Lccwsense: - .long 0x04200001,0x00000000 -.Lwaitpsw: - .long 0x020a0000,0x80000000+.Lioint - -.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -.Lcr6: .long 0xff000000 - .align 8 -.Lcrash:.long 0x000a0000,0x00000000 -.Lldret:.long 0 -.Lsnsret: .long 0 -#endif /* CONFIG_IPL_TAPE */ -#ifdef CONFIG_IPL_VM #define IPL_BS 0x730 .org 0 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded @@ -170,7 +52,7 @@ __HEAD .long 0x02000370,0x60000050 # the channel program the PSW .long 0x020003c0,0x60000050 # at location 0 is loaded. .long 0x02000410,0x60000050 # Initial processing starts - .long 0x02000460,0x60000050 # at 0xf0 = iplstart. + .long 0x02000460,0x60000050 # at 0x200 = iplstart. .long 0x020004b0,0x60000050 .long 0x02000500,0x60000050 .long 0x02000550,0x60000050 @@ -180,11 +62,54 @@ __HEAD .long 0x02000690,0x60000050 .long 0x020006e0,0x20000050 - .org 0xf0 + .org 0x200 +# +# subroutine to set architecture mode +# +.Lsetmode: +#ifdef CONFIG_64BIT + mvi __LC_AR_MODE_ID,1 # set esame flag + slr %r0,%r0 # set cpuid to zero + lhi %r1,2 # mode 2 = esame (dump) + sigp %r1,%r0,0x12 # switch to esame mode + bras %r13,0f + .fill 16,4,0x0 +0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs + sam31 # switch to 31 bit addressing mode +#else + mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) +#endif + br %r14 + +# +# subroutine to wait for end I/O +# +.Lirqwait: +#ifdef CONFIG_64BIT + mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw + lpsw .Lwaitpsw +.Lioint: + br %r14 + .align 8 +.Lnewpsw: + .quad 0x0000000080000000,.Lioint +#else + mvc 0x78(8),.Lnewpsw # set up IO interrupt psw + lpsw .Lwaitpsw +.Lioint: + br %r14 + .align 8 +.Lnewpsw: + .long 0x00080000,0x80000000+.Lioint +#endif +.Lwaitpsw: + .long 0x020a0000,0x80000000+.Lioint + # # subroutine for loading cards from the reader # .Lloader: + la %r4,0(%r14) la %r3,.Lorb # r2 = address of orb into r2 la %r5,.Lirb # r4 = address of irb la %r6,.Lccws @@ -201,9 +126,7 @@ __HEAD ssch 0(%r3) # load chunk of 1600 bytes bnz .Llderr .Lwait4irq: - mvc 0x78(8),.Lnewpsw # set up IO interrupt psw - lpsw .Lwaitpsw -.Lioint: + bas %r14,.Lirqwait c %r1,0xb8 # compare subchannel number bne .Lwait4irq tsch 0(%r5) @@ -222,7 +145,7 @@ __HEAD sr %r0,%r3 # #ccws*80-residual=#bytes read ar %r2,%r0 - br %r14 # r2 contains the total size + br %r4 # r2 contains the total size .Lcont: ahi %r2,0x640 # add 0x640 to total size @@ -246,19 +169,15 @@ __HEAD .Lloadp:.long 0,0 .align 8 .Lcrash:.long 0x000a0000,0x00000000 -.Lnewpsw: - .long 0x00080000,0x80000000+.Lioint -.Lwaitpsw: - .long 0x020a0000,0x80000000+.Lioint .align 8 .Lccws: .rept 19 .long 0x02600050,0x00000000 .endr .long 0x02200050,0x00000000 -#endif /* CONFIG_IPL_VM */ iplstart: + bas %r14,.Lsetmode # Immediately switch to 64 bit mode lh %r1,0xb8 # test if subchannel number bct %r1,.Lnoload # is valid l %r1,0xb8 # load ipl subchannel number @@ -325,12 +244,11 @@ iplstart: clc 0(3,%r2),.L_eof bz .Lagain2 -#ifdef CONFIG_IPL_VM # # reset files in VM reader # - stidp __LC_SAVE_AREA_SYNC # store cpuid - tm __LC_SAVE_AREA_SYNC,0xff# running VM ? + stidp .Lcpuid # store cpuid + tm .Lcpuid,0xff # running VM ? bno .Lnoreset la %r2,.Lreset lhi %r3,26 @@ -342,24 +260,14 @@ iplstart: tm 31(%r5),0xff # bits is set in the schib bz .Lnoreset .Lwaitforirq: - mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw -.Lwaitrdrirq: - lpsw .Lrdrwaitpsw -.Lrdrint: + bas %r14,.Lirqwait # wait for IO interrupt c %r1,0xb8 # compare subchannel number - bne .Lwaitrdrirq + bne .Lwaitforirq la %r5,.Lirb tsch 0(%r5) .Lnoreset: b .Lnoload - .align 8 -.Lrdrnewpsw: - .long 0x00080000,0x80000000+.Lrdrint -.Lrdrwaitpsw: - .long 0x020a0000,0x80000000+.Lrdrint -#endif - # # everything loaded, go for it # @@ -375,8 +283,8 @@ iplstart: .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" .L_eof: .long 0xc5d6c600 /* C'EOF' */ .L_hdr: .long 0xc8c4d900 /* C'HDR' */ - -#endif /* CONFIG_IPL */ + .align 8 +.Lcpuid:.fill 8,1,0 # # SALIPL loader support. Based on a patch by Rob van der Heij. @@ -386,6 +294,7 @@ iplstart: .org 0x800 ENTRY(start) stm %r0,%r15,0x07b0 # store registers + bas %r14,.Lsetmode # Immediately switch to 64 bit mode basr %r12,%r0 .base: l %r11,.parm @@ -466,36 +375,53 @@ ENTRY(startup) ENTRY(startup_kdump) j .Lep_startup_kdump .Lep_startup_normal: +#ifdef CONFIG_64BIT + mvi __LC_AR_MODE_ID,1 # set esame flag + slr %r0,%r0 # set cpuid to zero + lhi %r1,2 # mode 2 = esame (dump) + sigp %r1,%r0,0x12 # switch to esame mode + bras %r13,0f + .fill 16,4,0x0 +0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs + sam31 # switch to 31 bit addressing mode +#else + mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) +#endif basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 stck __LC_LAST_UPDATE_CLOCK - spt 5f-.LPG0(%r13) - mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) + spt 6f-.LPG0(%r13) + mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) + xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST #ifndef CONFIG_MARCH_G5 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} - xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list tm __LC_STFL_FAC_LIST,0x01 # stfle available ? jz 0f - la %r0,0 + la %r0,1 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended -0: l %r0,__LC_STFL_FAC_LIST - n %r0,2f+8-.LPG0(%r13) - cl %r0,2f+8-.LPG0(%r13) - jne 1f - l %r0,__LC_STFL_FAC_LIST+4 - n %r0,2f+12-.LPG0(%r13) - cl %r0,2f+12-.LPG0(%r13) - je 3f -1: l %r15,.Lstack-.LPG0(%r13) + # verify if all required facilities are supported by the machine +0: la %r1,__LC_STFL_FAC_LIST + la %r2,3f+8-.LPG0(%r13) + l %r3,0(%r2) +1: l %r0,0(%r1) + n %r0,4(%r2) + cl %r0,4(%r2) + jne 2f + la %r1,4(%r1) + la %r2,4(%r2) + ahi %r3,-1 + jnz 1b + j 4f +2: l %r15,.Lstack-.LPG0(%r13) ahi %r15,-96 la %r2,.Lals_string-.LPG0(%r13) l %r3,.Lsclp_print-.LPG0(%r13) basr %r14,%r3 - lpsw 2f-.LPG0(%r13) # machine type not good enough, crash + lpsw 3f-.LPG0(%r13) # machine type not good enough, crash .Lals_string: .asciz "The Linux kernel requires more recent processor hardware" .Lsclp_print: @@ -503,54 +429,58 @@ ENTRY(startup_kdump) .Lstack: .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) .align 16 -2: .long 0x000a0000,0x8badcccc +3: .long 0x000a0000,0x8badcccc + +# List of facilities that are required. If not all facilities are present +# the kernel will crash. Format is number of facility words with bits set, +# followed by the facility words. + #if defined(CONFIG_64BIT) -#if defined(CONFIG_MARCH_Z196) - .long 0xc100efe3, 0xf46c0000 +#if defined(CONFIG_MARCH_ZEC12) + .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 +#elif defined(CONFIG_MARCH_Z196) + .long 2, 0xc100eff2, 0xf46c0000 #elif defined(CONFIG_MARCH_Z10) - .long 0xc100efe3, 0xf0680000 + .long 2, 0xc100eff2, 0xf0680000 #elif defined(CONFIG_MARCH_Z9_109) - .long 0xc100efc3, 0x00000000 + .long 1, 0xc100efc2 #elif defined(CONFIG_MARCH_Z990) - .long 0xc0002000, 0x00000000 + .long 1, 0xc0002000 #elif defined(CONFIG_MARCH_Z900) - .long 0xc0000000, 0x00000000 + .long 1, 0xc0000000 #endif #else -#if defined(CONFIG_MARCH_Z196) - .long 0x8100c880, 0x00000000 +#if defined(CONFIG_MARCH_ZEC12) + .long 1, 0x8100c880 +#elif defined(CONFIG_MARCH_Z196) + .long 1, 0x8100c880 #elif defined(CONFIG_MARCH_Z10) - .long 0x8100c880, 0x00000000 + .long 1, 0x8100c880 #elif defined(CONFIG_MARCH_Z9_109) - .long 0x8100c880, 0x00000000 + .long 1, 0x8100c880 #elif defined(CONFIG_MARCH_Z990) - .long 0x80002000, 0x00000000 + .long 1, 0x80002000 #elif defined(CONFIG_MARCH_Z900) - .long 0x80000000, 0x00000000 + .long 1, 0x80000000 #endif #endif -3: +4: #endif #ifdef CONFIG_64BIT - mvi __LC_AR_MODE_ID,1 # set esame flag - slr %r0,%r0 # set cpuid to zero - lhi %r1,2 # mode 2 = esame (dump) - sigp %r1,%r0,0x12 # switch to esame mode + /* Continue with 64bit startup code in head64.S */ sam64 # switch to 64 bit mode - larl %r13,4f - lmh %r0,%r15,0(%r13) # clear high-order half jg startup_continue -4: .fill 16,4,0x0 #else - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) - l %r13,4f-.LPG0(%r13) + /* Continue with 31bit startup code in head31.S */ + l %r13,5f-.LPG0(%r13) b 0(%r13) .align 8 -4: .long startup_continue +5: .long startup_continue #endif + .align 8 -5: .long 0x7fffffff,0xffffffff +6: .long 0x7fffffff,0xffffffff #include "head_kdump.S" diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index d3f1ab7d90a..6dbe80983a2 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/head31.S - * - * Copyright (C) IBM Corp. 2005,2010 + * Copyright IBM Corp. 2005, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -61,7 +59,6 @@ ENTRY(startup_continue) .long 0 # cr13: home space segment table .long 0xc0000000 # cr14: machine check handling off .long 0 # cr15: linkage stack operations -.Lmchunk:.long memory_chunk .Lbss_bgn: .long __bss_start .Lbss_end: .long _end .Lparmaddr: .long PARMAREA @@ -80,10 +77,7 @@ ENTRY(startup_continue) ENTRY(_ehead) -#ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 -#endif - # # startup-code, running in absolute addressing mode # diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 99348c0eaa4..d7c00507568 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/head64.S - * - * Copyright (C) IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -61,7 +59,7 @@ ENTRY(startup_continue) .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off - .quad 0 # cr15: linkage stack operations + .quad .Llinkage_stack # cr15: linkage stack operations .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 @@ -69,19 +67,19 @@ ENTRY(startup_continue) .Lparmaddr: .quad PARMAREA .align 64 -.Lduct: .long 0,0,0,0,.Lduald,0,0,0 +.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0 .long 0,0,0,0,0,0,0,0 +.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0 .align 128 .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr +.Llinkage_stack: + .long 0,0,0x89000000,0,0,0,0x8a000000,0 ENTRY(_ehead) -#ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 -#endif - # # startup-code, running in absolute addressing mode # diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S index e1ac3893e97..085a95eb315 100644 --- a/arch/s390/kernel/head_kdump.S +++ b/arch/s390/kernel/head_kdump.S @@ -5,6 +5,8 @@ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> */ +#include <asm/sigp.h> + #define DATAMOVER_ADDR 0x4000 #define COPY_PAGE_ADDR 0x6000 @@ -19,7 +21,7 @@ .align 2 .Lep_startup_kdump: lhi %r1,2 # mode 2 = esame (dump) - sigp %r1,%r0,0x12 # Switch to esame mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode sam64 # Switch to 64 bit addressing basr %r13,0 .Lbase: @@ -83,23 +85,10 @@ .align 2 startup_kdump_relocated: basr %r13,0 -0: - mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW - mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW - lhi %r1,1 # Start new kernel - diag %r1,%r1,0x308 # with diag 308 - -.Lno_diag308: # No diag 308 - sam31 # Switch to 31 bit addr mode - sr %r1,%r1 # Erase register r1 - sr %r2,%r2 # Erase register r2 - sigp %r1,%r2,0x12 # Switch to 31 bit arch mode - lpsw 0 # Start new kernel... +0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel... .align 8 .Lrestart_psw: - .long 0x00080000,0x80000000 + startup -.Lpgm_psw: - .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308 + .quad 0x0000000080000000,0x0000000000000000 + startup #else .align 2 .Lep_startup_kdump: diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c deleted file mode 100644 index 4d1c9fb0b54..00000000000 --- a/arch/s390/kernel/init_task.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * arch/s390/kernel/init_task.c - * - * S390 version - * - * Derived from "arch/i386/kernel/init_task.c" - */ - -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init_task.h> -#include <linux/mqueue.h> - -#include <asm/uaccess.h> -#include <asm/pgtable.h> - -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -/* - * Initial thread structure. - * - * We need to make sure that this is THREAD_SIZE aligned due to the - * way process stacks are handled. This is done by having a special - * "init_task" linker map entry.. - */ -union thread_union init_thread_union __init_task_data = - { INIT_THREAD_INFO(init_task) }; - -/* - * Initial task structure. - * - * All other task structs will be allocated on slabs in fork.c - */ -struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_task); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index affa8e68124..633ca750453 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1,8 +1,7 @@ /* - * arch/s390/kernel/ipl.c * ipl/reipl/dump support for Linux on s390. * - * Copyright IBM Corp. 2005,2007 + * Copyright IBM Corp. 2005, 2012 * Author(s): Michael Holzheu <holzheu@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com> * Volker Sameske <sameske@de.ibm.com> @@ -17,6 +16,7 @@ #include <linux/fs.h> #include <linux/gfp.h> #include <linux/crash_dump.h> +#include <linux/debug_locks.h> #include <asm/ipl.h> #include <asm/smp.h> #include <asm/setup.h> @@ -25,8 +25,9 @@ #include <asm/ebcdic.h> #include <asm/reset.h> #include <asm/sclp.h> -#include <asm/sigp.h> #include <asm/checksum.h> +#include <asm/debug.h> +#include <asm/os_info.h> #include "entry.h" #define IPL_PARM_BLOCK_VERSION 0 @@ -571,7 +572,7 @@ static void __ipl_run(void *unused) static void ipl_run(struct shutdown_trigger *trigger) { - smp_switch_to_ipl_cpu(__ipl_run, NULL); + smp_call_ipl_cpu(__ipl_run, NULL); } static int __init ipl_init(void) @@ -753,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = { .write = reipl_fcp_scpdata_write, }; -DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", +DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", reipl_block_fcp->ipl_info.fcp.wwpn); -DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", +DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n", reipl_block_fcp->ipl_info.fcp.lun); DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", reipl_block_fcp->ipl_info.fcp.bootprog); @@ -950,6 +951,13 @@ static struct attribute_group reipl_nss_attr_group = { .attrs = reipl_nss_attrs, }; +static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block) +{ + reipl_block_actual = reipl_block; + os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual, + reipl_block->hdr.len); +} + /* reipl type */ static int reipl_set_type(enum ipl_type type) @@ -965,7 +973,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_CCW_VM; else reipl_method = REIPL_METHOD_CCW_CIO; - reipl_block_actual = reipl_block_ccw; + set_reipl_block_actual(reipl_block_ccw); break; case IPL_TYPE_FCP: if (diag308_set_works) @@ -974,7 +982,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_FCP_RO_VM; else reipl_method = REIPL_METHOD_FCP_RO_DIAG; - reipl_block_actual = reipl_block_fcp; + set_reipl_block_actual(reipl_block_fcp); break; case IPL_TYPE_FCP_DUMP: reipl_method = REIPL_METHOD_FCP_DUMP; @@ -984,7 +992,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_NSS_DIAG; else reipl_method = REIPL_METHOD_NSS; - reipl_block_actual = reipl_block_nss; + set_reipl_block_actual(reipl_block_nss); break; case IPL_TYPE_UNKNOWN: reipl_method = REIPL_METHOD_DEFAULT; @@ -1101,7 +1109,7 @@ static void __reipl_run(void *unused) static void reipl_run(struct shutdown_trigger *trigger) { - smp_switch_to_ipl_cpu(__reipl_run, NULL); + smp_call_ipl_cpu(__reipl_run, NULL); } static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) @@ -1256,6 +1264,29 @@ static int __init reipl_fcp_init(void) return 0; } +static int __init reipl_type_init(void) +{ + enum ipl_type reipl_type = ipl_info.type; + struct ipl_parameter_block *reipl_block; + unsigned long size; + + reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size); + if (!reipl_block) + goto out; + /* + * If we have an OS info reipl block, this will be used + */ + if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) { + memcpy(reipl_block_fcp, reipl_block, size); + reipl_type = IPL_TYPE_FCP; + } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) { + memcpy(reipl_block_ccw, reipl_block, size); + reipl_type = IPL_TYPE_CCW; + } +out: + return reipl_set_type(reipl_type); +} + static int __init reipl_init(void) { int rc; @@ -1277,10 +1308,7 @@ static int __init reipl_init(void) rc = reipl_nss_init(); if (rc) return rc; - rc = reipl_set_type(ipl_info.type); - if (rc) - return rc; - return 0; + return reipl_type_init(); } static struct shutdown_action __refdata reipl_action = { @@ -1295,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = { /* FCP dump device attributes */ -DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", +DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n", dump_block_fcp->ipl_info.fcp.wwpn); -DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", +DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n", dump_block_fcp->ipl_info.fcp.lun); DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", dump_block_fcp->ipl_info.fcp.bootprog); @@ -1386,6 +1414,16 @@ static struct kobj_attribute dump_type_attr = static struct kset *dump_kset; +static void diag308_dump(void *dump_block) +{ + diag308(DIAG308_SET, dump_block); + while (1) { + if (diag308(DIAG308_DUMP, NULL) != 0x302) + break; + udelay_simple(USEC_PER_SEC); + } +} + static void __dump_run(void *unused) { struct ccw_dev_id devid; @@ -1404,12 +1442,10 @@ static void __dump_run(void *unused) __cpcmd(buf, NULL, 0, NULL); break; case DUMP_METHOD_CCW_DIAG: - diag308(DIAG308_SET, dump_block_ccw); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_ccw); break; case DUMP_METHOD_FCP_DIAG: - diag308(DIAG308_SET, dump_block_fcp); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_fcp); break; default: break; @@ -1421,7 +1457,7 @@ static void dump_run(struct shutdown_trigger *trigger) if (dump_method == DUMP_METHOD_NONE) return; smp_send_stop(); - smp_switch_to_ipl_cpu(__dump_run, NULL); + smp_call_ipl_cpu(__dump_run, NULL); } static int __init dump_ccw_init(void) @@ -1499,30 +1535,12 @@ static struct shutdown_action __refdata dump_action = { static void dump_reipl_run(struct shutdown_trigger *trigger) { - preempt_disable(); - /* - * Bypass dynamic address translation (DAT) when storing IPL parameter - * information block address and checksum into the prefix area - * (corresponding to absolute addresses 0-8191). - * When enhanced DAT applies and the STE format control in one, - * the absolute address is formed without prefixing. In this case a - * normal store (stg/st) into the prefix area would no more match to - * absolute addresses 0-8191. - */ -#ifdef CONFIG_64BIT - asm volatile("sturg %0,%1" - :: "a" ((unsigned long) reipl_block_actual), - "a" (&lowcore_ptr[smp_processor_id()]->ipib)); -#else - asm volatile("stura %0,%1" - :: "a" ((unsigned long) reipl_block_actual), - "a" (&lowcore_ptr[smp_processor_id()]->ipib)); -#endif - asm volatile("stura %0,%1" - :: "a" (csum_partial(reipl_block_actual, - reipl_block_actual->hdr.len, 0)), - "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum)); - preempt_enable(); + unsigned long ipib = (unsigned long) reipl_block_actual; + unsigned int csum; + + csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); + mem_assign_absolute(S390_lowcore.ipib, ipib); + mem_assign_absolute(S390_lowcore.ipib_checksum, csum); dump_run(trigger); } @@ -1573,7 +1591,7 @@ static struct kset *vmcmd_kset; static void vmcmd_run(struct shutdown_trigger *trigger) { - char *cmd, *next_cmd; + char *cmd; if (strcmp(trigger->name, ON_REIPL_STR) == 0) cmd = vmcmd_on_reboot; @@ -1590,15 +1608,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger) if (strlen(cmd) == 0) return; - do { - next_cmd = strchr(cmd, '\n'); - if (next_cmd) { - next_cmd[0] = 0; - next_cmd += 1; - } - __cpcmd(cmd, NULL, 0, NULL); - cmd = next_cmd; - } while (cmd != NULL); + __cpcmd(cmd, NULL, 0, NULL); } static int vmcmd_init(void) @@ -1623,9 +1633,7 @@ static void stop_run(struct shutdown_trigger *trigger) if (strcmp(trigger->name, ON_PANIC_STR) == 0 || strcmp(trigger->name, ON_RESTART_STR) == 0) disabled_wait((unsigned long) __builtin_return_address(0)); - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) - cpu_relax(); - for (;;); + smp_stop_cpu(); } static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, @@ -1713,6 +1721,7 @@ static struct kobj_attribute on_panic_attr = static void do_panic(void) { + lgr_info_log(); on_panic_trigger.action->fn(&on_panic_trigger); stop_run(&on_panic_trigger); } @@ -1738,9 +1747,9 @@ static ssize_t on_restart_store(struct kobject *kobj, static struct kobj_attribute on_restart_attr = __ATTR(on_restart, 0644, on_restart_show, on_restart_store); -void do_restart(void) +static void __do_restart(void *ignore) { - smp_restart_with_online_cpu(); + __arch_local_irq_stosm(0x04); /* enable DAT */ smp_send_stop(); #ifdef CONFIG_CRASH_DUMP crash_kexec(NULL); @@ -1749,6 +1758,14 @@ void do_restart(void) stop_run(&on_restart_trigger); } +void do_restart(void) +{ + tracing_off(); + debug_locks_off(); + lgr_info_log(); + smp_call_online_cpu(__do_restart, NULL); +} + /* on halt */ static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; @@ -2034,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data) __ctl_clear_bit(0,28); /* Set new machine check handler */ - S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; + S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; /* Set new program check handler */ - S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; + S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index b9a7fdd9c81..99b0b09646c 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2004,2011 + * Copyright IBM Corp. 2004, 2011 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -18,161 +18,195 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/cpu.h> +#include <linux/irq.h> #include <asm/irq_regs.h> #include <asm/cputime.h> #include <asm/lowcore.h> #include <asm/irq.h> +#include <asm/hw_irq.h> #include "entry.h" +DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); +EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); + struct irq_class { char *name; char *desc; }; -static const struct irq_class intrclass_names[] = { - {.name = "EXT" }, - {.name = "I/O" }, - {.name = "CLK", .desc = "[EXT] Clock Comparator" }, - {.name = "EXC", .desc = "[EXT] External Call" }, - {.name = "EMS", .desc = "[EXT] Emergency Signal" }, - {.name = "TMR", .desc = "[EXT] CPU Timer" }, - {.name = "TAL", .desc = "[EXT] Timing Alert" }, - {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, - {.name = "DSD", .desc = "[EXT] DASD Diag" }, - {.name = "VRT", .desc = "[EXT] Virtio" }, - {.name = "SCP", .desc = "[EXT] Service Call" }, - {.name = "IUC", .desc = "[EXT] IUCV" }, - {.name = "CPM", .desc = "[EXT] CPU Measurement" }, - {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, - {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, - {.name = "DAS", .desc = "[I/O] DASD" }, - {.name = "C15", .desc = "[I/O] 3215" }, - {.name = "C70", .desc = "[I/O] 3270" }, - {.name = "TAP", .desc = "[I/O] Tape" }, - {.name = "VMR", .desc = "[I/O] Unit Record Devices" }, - {.name = "LCS", .desc = "[I/O] LCS" }, - {.name = "CLW", .desc = "[I/O] CLAW" }, - {.name = "CTC", .desc = "[I/O] CTC" }, - {.name = "APB", .desc = "[I/O] AP Bus" }, - {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, - {.name = "NMI", .desc = "[NMI] Machine Check" }, +/* + * The list of "main" irq classes on s390. This is the list of interrupts + * that appear both in /proc/stat ("intr" line) and /proc/interrupts. + * Historically only external and I/O interrupts have been part of /proc/stat. + * We can't add the split external and I/O sub classes since the first field + * in the "intr" line in /proc/stat is supposed to be the sum of all other + * fields. + * Since the external and I/O interrupt fields are already sums we would end + * up with having a sum which accounts each interrupt twice. + */ +static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { + [EXT_INTERRUPT] = {.name = "EXT"}, + [IO_INTERRUPT] = {.name = "I/O"}, + [THIN_INTERRUPT] = {.name = "AIO"}, }; /* + * The list of split external and I/O interrupts that appear only in + * /proc/interrupts. + * In addition this list contains non external / I/O events like NMIs. + */ +static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { + [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, + [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, + [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, + [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, + [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, + [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, + [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, + [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, + [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, + [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, + [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, + [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, + [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, + [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, + [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, + [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, + [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"}, + [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"}, + [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, + [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, + [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, + [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, + [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, + [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, + [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, + [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, + [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, + [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, + [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, + [IRQIO_VAI] = {.name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, + [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, + [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, +}; + +void __init init_IRQ(void) +{ + init_cio_interrupts(); + init_airq_interrupts(); + init_ext_interrupts(); +} + +void do_IRQ(struct pt_regs *regs, int irq) +{ + struct pt_regs *old_regs; + + old_regs = set_irq_regs(regs); + irq_enter(); + if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) + /* Serve timer interrupts first. */ + clock_comparator_work(); + generic_handle_irq(irq); + irq_exit(); + set_irq_regs(old_regs); +} + +/* * show_interrupts is needed by /proc/interrupts. */ int show_interrupts(struct seq_file *p, void *v) { - int i = *(loff_t *) v, j; + int irq = *(loff_t *) v; + int cpu; get_online_cpus(); - if (i == 0) { + if (irq == 0) { seq_puts(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(cpu) + seq_printf(p, "CPU%d ", cpu); seq_putc(p, '\n'); + goto out; } - - if (i < NR_IRQS) { - seq_printf(p, "%s: ", intrclass_names[i].name); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); -#endif - if (intrclass_names[i].desc) - seq_printf(p, " %s", intrclass_names[i].desc); - seq_putc(p, '\n'); - } + if (irq < NR_IRQS) { + if (irq >= NR_IRQS_BASE) + goto out; + seq_printf(p, "%s: ", irqclass_main_desc[irq].name); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); + seq_putc(p, '\n'); + goto out; + } + for (irq = 0; irq < NR_ARCH_IRQS; irq++) { + seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", + per_cpu(irq_stat, cpu).irqs[irq]); + if (irqclass_sub_desc[irq].desc) + seq_printf(p, " %s", irqclass_sub_desc[irq].desc); + seq_putc(p, '\n'); + } +out: put_online_cpus(); - return 0; + return 0; +} + +unsigned int arch_dynirq_lower_bound(unsigned int from) +{ + return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; } /* * Switch to the asynchronous interrupt stack for softirq execution. */ -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags, old, new; - - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) { - /* Get current stack pointer. */ - asm volatile("la %0,0(15)" : "=a" (old)); - /* Check against async. stack address range. */ - new = S390_lowcore.async_stack; - if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { - /* Need to switch to the async. stack. */ - new -= STACK_FRAME_OVERHEAD; - ((struct stack_frame *) new)->back_chain = old; - - asm volatile(" la 15,0(%0)\n" - " basr 14,%2\n" - " la 15,0(%1)\n" - : : "a" (new), "a" (old), - "a" (__do_softirq) - : "0", "1", "2", "3", "4", "5", "14", - "cc", "memory" ); - } else - /* We are already on the async stack. */ - __do_softirq(); + unsigned long old, new; + + /* Get current stack pointer. */ + asm volatile("la %0,0(15)" : "=a" (old)); + /* Check against async. stack address range. */ + new = S390_lowcore.async_stack; + if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { + /* Need to switch to the async. stack. */ + new -= STACK_FRAME_OVERHEAD; + ((struct stack_frame *) new)->back_chain = old; + asm volatile(" la 15,0(%0)\n" + " basr 14,%2\n" + " la 15,0(%1)\n" + : : "a" (new), "a" (old), + "a" (__do_softirq) + : "0", "1", "2", "3", "4", "5", "14", + "cc", "memory" ); + } else { + /* We are already on the async stack. */ + __do_softirq(); } - - local_irq_restore(flags); } -#ifdef CONFIG_PROC_FS -void init_irq_proc(void) -{ - struct proc_dir_entry *root_irq_dir; - - root_irq_dir = proc_mkdir("irq", NULL); - create_prof_cpu_mask(root_irq_dir); -} -#endif - /* * ext_int_hash[index] is the list head for all external interrupts that hash * to this index. */ -static struct list_head ext_int_hash[256]; +static struct hlist_head ext_int_hash[32] ____cacheline_aligned; struct ext_int_info { ext_int_handler_t handler; - u16 code; - struct list_head entry; + struct hlist_node entry; struct rcu_head rcu; + u16 code; }; /* ext_int_hash_lock protects the handler lists for external interrupts */ -DEFINE_SPINLOCK(ext_int_hash_lock); - -static void __init init_external_interrupts(void) -{ - int idx; - - for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) - INIT_LIST_HEAD(&ext_int_hash[idx]); -} +static DEFINE_SPINLOCK(ext_int_hash_lock); static inline int ext_hash(u16 code) { - return (code + (code >> 9)) & 0xff; -} - -static void ext_int_hash_update(struct rcu_head *head) -{ - struct ext_int_info *p = container_of(head, struct ext_int_info, rcu); + BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash))); - kfree(p); + return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1); } -int register_external_interrupt(u16 code, ext_int_handler_t handler) +int register_external_irq(u16 code, ext_int_handler_t handler) { struct ext_int_info *p; unsigned long flags; @@ -186,83 +220,88 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) index = ext_hash(code); spin_lock_irqsave(&ext_int_hash_lock, flags); - list_add_rcu(&p->entry, &ext_int_hash[index]); + hlist_add_head_rcu(&p->entry, &ext_int_hash[index]); spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } -EXPORT_SYMBOL(register_external_interrupt); +EXPORT_SYMBOL(register_external_irq); -int unregister_external_interrupt(u16 code, ext_int_handler_t handler) +int unregister_external_irq(u16 code, ext_int_handler_t handler) { struct ext_int_info *p; unsigned long flags; int index = ext_hash(code); spin_lock_irqsave(&ext_int_hash_lock, flags); - list_for_each_entry_rcu(p, &ext_int_hash[index], entry) + hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) { if (p->code == code && p->handler == handler) { - list_del_rcu(&p->entry); - call_rcu(&p->rcu, ext_int_hash_update); + hlist_del_rcu(&p->entry); + kfree_rcu(p, rcu); } + } spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } -EXPORT_SYMBOL(unregister_external_interrupt); +EXPORT_SYMBOL(unregister_external_irq); -void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, - unsigned int param32, unsigned long param64) +static irqreturn_t do_ext_interrupt(int irq, void *dummy) { - struct pt_regs *old_regs; - unsigned short code; + struct pt_regs *regs = get_irq_regs(); + struct ext_code ext_code; struct ext_int_info *p; int index; - code = (unsigned short) ext_int_code; - old_regs = set_irq_regs(regs); - s390_idle_check(regs, S390_lowcore.int_clock, - S390_lowcore.async_enter_timer); - irq_enter(); - if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) - /* Serve timer interrupts first. */ - clock_comparator_work(); - kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; - if (code != 0x1004) + ext_code = *(struct ext_code *) ®s->int_code; + if (ext_code.code != EXT_IRQ_CLK_COMP) __get_cpu_var(s390_idle).nohz_delay = 1; - index = ext_hash(code); + index = ext_hash(ext_code.code); rcu_read_lock(); - list_for_each_entry_rcu(p, &ext_int_hash[index], entry) - if (likely(p->code == code)) - p->handler(ext_int_code, param32, param64); + hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) { + if (unlikely(p->code != ext_code.code)) + continue; + p->handler(ext_code, regs->int_parm, regs->int_parm_long); + } rcu_read_unlock(); - irq_exit(); - set_irq_regs(old_regs); + return IRQ_HANDLED; } -void __init init_IRQ(void) +static struct irqaction external_interrupt = { + .name = "EXT", + .handler = do_ext_interrupt, +}; + +void __init init_ext_interrupts(void) { - init_external_interrupts(); + int idx; + + for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) + INIT_HLIST_HEAD(&ext_int_hash[idx]); + + irq_set_chip_and_handler(EXT_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(EXT_INTERRUPT, &external_interrupt); } -static DEFINE_SPINLOCK(sc_irq_lock); -static int sc_irq_refcount; +static DEFINE_SPINLOCK(irq_subclass_lock); +static unsigned char irq_subclass_refcount[64]; -void service_subclass_irq_register(void) +void irq_subclass_register(enum irq_subclass subclass) { - spin_lock(&sc_irq_lock); - if (!sc_irq_refcount) - ctl_set_bit(0, 9); - sc_irq_refcount++; - spin_unlock(&sc_irq_lock); + spin_lock(&irq_subclass_lock); + if (!irq_subclass_refcount[subclass]) + ctl_set_bit(0, subclass); + irq_subclass_refcount[subclass]++; + spin_unlock(&irq_subclass_lock); } -EXPORT_SYMBOL(service_subclass_irq_register); +EXPORT_SYMBOL(irq_subclass_register); -void service_subclass_irq_unregister(void) +void irq_subclass_unregister(enum irq_subclass subclass) { - spin_lock(&sc_irq_lock); - sc_irq_refcount--; - if (!sc_irq_refcount) - ctl_clear_bit(0, 9); - spin_unlock(&sc_irq_lock); + spin_lock(&irq_subclass_lock); + irq_subclass_refcount[subclass]--; + if (!irq_subclass_refcount[subclass]) + ctl_clear_bit(0, subclass); + spin_unlock(&irq_subclass_lock); } -EXPORT_SYMBOL(service_subclass_irq_unregister); +EXPORT_SYMBOL(irq_subclass_unregister); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 64b761aef00..bc71a7b95af 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright (C) IBM Corporation, 2002, 2006 + * Copyright IBM Corp. 2002, 2006 * * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> */ @@ -26,19 +26,42 @@ #include <linux/stop_machine.h> #include <linux/kdebug.h> #include <linux/uaccess.h> -#include <asm/cacheflush.h> -#include <asm/sections.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hardirq.h> +#include <asm/cacheflush.h> +#include <asm/sections.h> +#include <asm/dis.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = { }; +DEFINE_INSN_CACHE_OPS(dmainsn); + +static void *alloc_dmainsn_page(void) +{ + return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); +} + +static void free_dmainsn_page(void *page) +{ + free_page((unsigned long)page); +} + +struct kprobe_insn_cache kprobe_dmainsn_slots = { + .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), + .alloc = alloc_dmainsn_page, + .free = free_dmainsn_page, + .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), + .insn_size = MAX_INSN_SIZE, +}; + static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) { + if (!is_known_insn((unsigned char *)insn)) + return -EINVAL; switch (insn[0] >> 8) { case 0x0c: /* bassm */ case 0x0b: /* bsm */ @@ -47,6 +70,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) case 0xac: /* stnsm */ case 0xad: /* stosm */ return -EINVAL; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x00: /* exrl */ + return -EINVAL; + } } switch (insn[0]) { case 0x0101: /* pr */ @@ -100,35 +128,160 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) fixup |= FIXUP_RETURN_REGISTER; break; case 0xc0: - if ((insn[0] & 0x0f) == 0x00 || /* larl */ - (insn[0] & 0x0f) == 0x05) /* brasl */ - fixup |= FIXUP_RETURN_REGISTER; + if ((insn[0] & 0x0f) == 0x05) /* brasl */ + fixup |= FIXUP_RETURN_REGISTER; break; case 0xeb: - if ((insn[2] & 0xff) == 0x44 || /* bxhg */ - (insn[2] & 0xff) == 0x45) /* bxleg */ + switch (insn[2] & 0xff) { + case 0x44: /* bxhg */ + case 0x45: /* bxleg */ fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } break; case 0xe3: /* bctg */ if ((insn[2] & 0xff) == 0x46) fixup = FIXUP_BRANCH_NOT_TAKEN; break; + case 0xec: + switch (insn[2] & 0xff) { + case 0xe5: /* clgrb */ + case 0xe6: /* cgrb */ + case 0xf6: /* crb */ + case 0xf7: /* clrb */ + case 0xfc: /* cgib */ + case 0xfd: /* cglib */ + case 0xfe: /* cib */ + case 0xff: /* clib */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } + break; } return fixup; } +static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn) +{ + /* Check if we have a RIL-b or RIL-c format instruction which + * we need to modify in order to avoid instruction emulation. */ + switch (insn[0] >> 8) { + case 0xc0: + if ((insn[0] & 0x0f) == 0x00) /* larl */ + return true; + break; + case 0xc4: + switch (insn[0] & 0x0f) { + case 0x02: /* llhrl */ + case 0x04: /* lghrl */ + case 0x05: /* lhrl */ + case 0x06: /* llghrl */ + case 0x07: /* sthrl */ + case 0x08: /* lgrl */ + case 0x0b: /* stgrl */ + case 0x0c: /* lgfrl */ + case 0x0d: /* lrl */ + case 0x0e: /* llgfrl */ + case 0x0f: /* strl */ + return true; + } + break; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x02: /* pfdrl */ + case 0x04: /* cghrl */ + case 0x05: /* chrl */ + case 0x06: /* clghrl */ + case 0x07: /* clhrl */ + case 0x08: /* cgrl */ + case 0x0a: /* clgrl */ + case 0x0c: /* cgfrl */ + case 0x0d: /* crl */ + case 0x0e: /* clgfrl */ + case 0x0f: /* clrl */ + return true; + } + break; + } + return false; +} + +static void __kprobes copy_instruction(struct kprobe *p) +{ + s64 disp, new_disp; + u64 addr, new_addr; + + memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); + if (!is_insn_relative_long(p->ainsn.insn)) + return; + /* + * For pc-relative instructions in RIL-b or RIL-c format patch the + * RI2 displacement field. We have already made sure that the insn + * slot for the patched instruction is within the same 2GB area + * as the original instruction (either kernel image or module area). + * Therefore the new displacement will always fit. + */ + disp = *(s32 *)&p->ainsn.insn[1]; + addr = (u64)(unsigned long)p->addr; + new_addr = (u64)(unsigned long)p->ainsn.insn; + new_disp = ((addr + (disp * 2)) - new_addr) / 2; + *(s32 *)&p->ainsn.insn[1] = new_disp; +} + +static inline int is_kernel_addr(void *addr) +{ + return addr < (void *)_end; +} + +static inline int is_module_addr(void *addr) +{ +#ifdef CONFIG_64BIT + BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); + if (addr < (void *)MODULES_VADDR) + return 0; + if (addr > (void *)MODULES_END) + return 0; +#endif + return 1; +} + +static int __kprobes s390_get_insn_slot(struct kprobe *p) +{ + /* + * Get an insn slot that is within the same 2GB area like the original + * instruction. That way instructions with a 32bit signed displacement + * field can be patched and executed within the insn slot. + */ + p->ainsn.insn = NULL; + if (is_kernel_addr(p->addr)) + p->ainsn.insn = get_dmainsn_slot(); + else if (is_module_addr(p->addr)) + p->ainsn.insn = get_insn_slot(); + return p->ainsn.insn ? 0 : -ENOMEM; +} + +static void __kprobes s390_free_insn_slot(struct kprobe *p) +{ + if (!p->ainsn.insn) + return; + if (is_kernel_addr(p->addr)) + free_dmainsn_slot(p->ainsn.insn, 0); + else + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; +} + int __kprobes arch_prepare_kprobe(struct kprobe *p) { if ((unsigned long) p->addr & 0x01) return -EINVAL; - /* Make sure the probe isn't going on a difficult instruction */ if (is_prohibited_opcode(p->addr)) return -EINVAL; - + if (s390_get_insn_slot(p)) + return -ENOMEM; p->opcode = *p->addr; - memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); - + copy_instruction(p); return 0; } @@ -169,6 +322,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { + s390_free_insn_slot(p); } static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, @@ -354,7 +508,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, { struct kretprobe_instance *ri; struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address; unsigned long trampoline_address; kprobe_opcode_t *correct_ret_addr; @@ -379,7 +533,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, orig_ret_address = 0; correct_ret_addr = NULL; trampoline_address = (unsigned long) &kretprobe_trampoline; - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -398,7 +552,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -427,7 +581,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } @@ -457,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; if (fixup & FIXUP_BRANCH_NOT_TAKEN) { - int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2; + int ilen = insn_length(p->ainsn.insn[0] >> 8); if (ip - (unsigned long) p->ainsn.insn == ilen) ip = (unsigned long) p->addr + ilen; } @@ -526,7 +680,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(p); @@ -547,7 +701,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) */ entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (entry) { - regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; return 1; } diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c new file mode 100644 index 00000000000..6ea6d69339b --- /dev/null +++ b/arch/s390/kernel/lgr.c @@ -0,0 +1,186 @@ +/* + * Linux Guest Relocation (LGR) detection + * + * Copyright IBM Corp. 2012 + * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <asm/facility.h> +#include <asm/sysinfo.h> +#include <asm/ebcdic.h> +#include <asm/debug.h> +#include <asm/ipl.h> + +#define LGR_TIMER_INTERVAL_SECS (30 * 60) +#define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */ + +/* + * LGR info: Contains stfle and stsi data + */ +struct lgr_info { + /* Bit field with facility information: 4 DWORDs are stored */ + u64 stfle_fac_list[4]; + /* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */ + u32 level; + /* Level 1: CEC info (stsi 1.1.1) */ + char manufacturer[16]; + char type[4]; + char sequence[16]; + char plant[4]; + char model[16]; + /* Level 2: LPAR info (stsi 2.2.2) */ + u16 lpar_number; + char name[8]; + /* Level 3: VM info (stsi 3.2.2) */ + u8 vm_count; + struct { + char name[8]; + char cpi[16]; + } vm[VM_LEVEL_MAX]; +} __packed __aligned(8); + +/* + * LGR globals + */ +static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE); +static struct lgr_info lgr_info_last; +static struct lgr_info lgr_info_cur; +static struct debug_info *lgr_dbf; + +/* + * Copy buffer and then convert it to ASCII + */ +static void cpascii(char *dst, char *src, int size) +{ + memcpy(dst, src, size); + EBCASC(dst, size); +} + +/* + * Fill LGR info with 1.1.1 stsi data + */ +static void lgr_stsi_1_1_1(struct lgr_info *lgr_info) +{ + struct sysinfo_1_1_1 *si = (void *) lgr_page; + + if (stsi(si, 1, 1, 1)) + return; + cpascii(lgr_info->manufacturer, si->manufacturer, + sizeof(si->manufacturer)); + cpascii(lgr_info->type, si->type, sizeof(si->type)); + cpascii(lgr_info->model, si->model, sizeof(si->model)); + cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence)); + cpascii(lgr_info->plant, si->plant, sizeof(si->plant)); +} + +/* + * Fill LGR info with 2.2.2 stsi data + */ +static void lgr_stsi_2_2_2(struct lgr_info *lgr_info) +{ + struct sysinfo_2_2_2 *si = (void *) lgr_page; + + if (stsi(si, 2, 2, 2)) + return; + cpascii(lgr_info->name, si->name, sizeof(si->name)); + memcpy(&lgr_info->lpar_number, &si->lpar_number, + sizeof(lgr_info->lpar_number)); +} + +/* + * Fill LGR info with 3.2.2 stsi data + */ +static void lgr_stsi_3_2_2(struct lgr_info *lgr_info) +{ + struct sysinfo_3_2_2 *si = (void *) lgr_page; + int i; + + if (stsi(si, 3, 2, 2)) + return; + for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) { + cpascii(lgr_info->vm[i].name, si->vm[i].name, + sizeof(si->vm[i].name)); + cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi, + sizeof(si->vm[i].cpi)); + } + lgr_info->vm_count = si->count; +} + +/* + * Fill LGR info with current data + */ +static void lgr_info_get(struct lgr_info *lgr_info) +{ + int level; + + memset(lgr_info, 0, sizeof(*lgr_info)); + stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list)); + level = stsi(NULL, 0, 0, 0); + lgr_info->level = level; + if (level >= 1) + lgr_stsi_1_1_1(lgr_info); + if (level >= 2) + lgr_stsi_2_2_2(lgr_info); + if (level >= 3) + lgr_stsi_3_2_2(lgr_info); +} + +/* + * Check if LGR info has changed and if yes log new LGR info to s390dbf + */ +void lgr_info_log(void) +{ + static DEFINE_SPINLOCK(lgr_info_lock); + unsigned long flags; + + if (!spin_trylock_irqsave(&lgr_info_lock, flags)) + return; + lgr_info_get(&lgr_info_cur); + if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) { + debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur)); + lgr_info_last = lgr_info_cur; + } + spin_unlock_irqrestore(&lgr_info_lock, flags); +} +EXPORT_SYMBOL_GPL(lgr_info_log); + +static void lgr_timer_set(void); + +/* + * LGR timer callback + */ +static void lgr_timer_fn(unsigned long ignored) +{ + lgr_info_log(); + lgr_timer_set(); +} + +static struct timer_list lgr_timer = + TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0); + +/* + * Setup next LGR timer + */ +static void lgr_timer_set(void) +{ + mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ); +} + +/* + * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer + */ +static int __init lgr_init(void) +{ + lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info)); + if (!lgr_dbf) + return -ENOMEM; + debug_register_view(lgr_dbf, &debug_hex_ascii_view); + lgr_info_get(&lgr_info_last); + debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last)); + lgr_timer_set(); + return 0; +} +module_init(lgr_init); diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 47b168fb29c..719e27b2cf2 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/machine_kexec.c - * - * Copyright IBM Corp. 2005,2011 + * Copyright IBM Corp. 2005, 2011 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -14,16 +12,19 @@ #include <linux/delay.h> #include <linux/reboot.h> #include <linux/ftrace.h> +#include <linux/debug_locks.h> +#include <linux/suspend.h> #include <asm/cio.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/smp.h> #include <asm/reset.h> #include <asm/ipl.h> #include <asm/diag.h> +#include <asm/elf.h> #include <asm/asm-offsets.h> +#include <asm/os_info.h> typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); @@ -32,8 +33,6 @@ extern const unsigned long long relocate_kernel_len; #ifdef CONFIG_CRASH_DUMP -void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); - /* * Create ELF notes for one CPU */ @@ -49,55 +48,55 @@ static void add_elf_notes(int cpu) } /* - * Store status of next available physical CPU + * Initialize CPU ELF notes */ -static int store_status_next(int start_cpu, int this_cpu) +static void setup_regs(void) { - struct save_area *sa = (void *) 4608 + store_prefix(); - int cpu, rc; + unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; + int cpu, this_cpu; - for (cpu = start_cpu; cpu < 65536; cpu++) { + this_cpu = smp_find_processor_id(stap()); + add_elf_notes(this_cpu); + for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; - do { - rc = raw_sigp(cpu, sigp_stop_and_store_status); - } while (rc == sigp_busy); - if (rc != sigp_order_code_accepted) + if (smp_store_status(cpu)) continue; - if (sa->pref_reg) - return cpu; + add_elf_notes(cpu); } - return -1; + /* Copy dump CPU store status info to absolute zero */ + memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); } /* - * Initialize CPU ELF notes + * PM notifier callback for kdump */ -void setup_regs(void) +static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) { - unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; - int cpu, this_cpu, phys_cpu = 0, first = 1; - - this_cpu = stap(); - - if (!S390_lowcore.prefixreg_save_area) - first = 0; - for_each_online_cpu(cpu) { - if (first) { - add_elf_notes(cpu); - first = 0; - continue; - } - phys_cpu = store_status_next(phys_cpu, this_cpu); - if (phys_cpu == -1) - break; - add_elf_notes(cpu); - phys_cpu++; + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + if (crashk_res.start) + crash_map_reserved_pages(); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + if (crashk_res.start) + crash_unmap_reserved_pages(); + break; + default: + return NOTIFY_DONE; } - /* Copy dump CPU store status info to absolute zero */ - memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); + return NOTIFY_OK; } +static int __init machine_kdump_pm_init(void) +{ + pm_notifier(machine_kdump_pm_cb, 0); + return 0; +} +arch_initcall(machine_kdump_pm_init); #endif /* @@ -108,8 +107,8 @@ static void __do_machine_kdump(void *image) #ifdef CONFIG_CRASH_DUMP int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; - __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); setup_regs(); + __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); start_kdump(1); #endif } @@ -143,8 +142,13 @@ static void crash_map_pages(int enable) size % KEXEC_CRASH_MEM_ALIGN); if (enable) vmem_add_mapping(crashk_res.start, size); - else + else { vmem_remove_mapping(crashk_res.start, size); + if (size) + os_info_crashkernel_add(crashk_res.start, size); + else + os_info_crashkernel_add(0, 0); + } } /* @@ -184,7 +188,7 @@ int machine_kexec_prepare(struct kimage *image) /* Can't replace kernel image since it is read-only. */ if (ipl_flags & IPL_NSS_VALID) - return -ENOSYS; + return -EOPNOTSUPP; if (image->type == KEXEC_TYPE_CRASH) return machine_kexec_prepare_kdump(); @@ -216,6 +220,10 @@ void machine_shutdown(void) { } +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + /* * Do normal kexec */ @@ -237,11 +245,16 @@ static void __machine_kexec(void *data) { struct kimage *image = data; + __arch_local_irq_stosm(0x04); /* enable DAT */ pfault_fini(); - if (image->type == KEXEC_TYPE_CRASH) + tracing_off(); + debug_locks_off(); + if (image->type == KEXEC_TYPE_CRASH) { + lgr_info_log(); s390_reset_system(__do_machine_kdump, data); - else + } else { s390_reset_system(__do_machine_kexec, data); + } disabled_wait((unsigned long) __builtin_return_address(0)); } @@ -255,5 +268,5 @@ void machine_kexec(struct kimage *image) return; tracer_disable(); smp_send_stop(); - smp_switch_to_ipl_cpu(__machine_kexec, image); + smp_call_ipl_cpu(__machine_kexec, image); } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 7e2c38ba137..08dcf21cb8d 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008,2009 + * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * @@ -7,6 +7,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/ftrace.h> .section .kprobes.text, "ax" @@ -33,6 +34,7 @@ ENTRY(ftrace_caller) la %r2,0(%r14) st %r0,__SF_BACKCHAIN(%r15) la %r3,0(%r3) + ahi %r2,-MCOUNT_INSN_SIZE l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index f70cadec68f..1c52eae3396 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008,2009 + * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * @@ -7,6 +7,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/ftrace.h> .section .kprobes.text, "ax" @@ -29,6 +30,7 @@ ENTRY(ftrace_caller) stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) + aghi %r2,-MCOUNT_INSN_SIZE larl %r14,ftrace_trace_function lg %r14,0(%r14) basr %r14,%r14 diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c deleted file mode 100644 index 22d502e885e..00000000000 --- a/arch/s390/kernel/mem_detect.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright IBM Corp. 2008, 2009 - * - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/ipl.h> -#include <asm/sclp.h> -#include <asm/setup.h> - -#define ADDR2G (1ULL << 31) - -static void find_memory_chunks(struct mem_chunk chunk[]) -{ - unsigned long long memsize, rnmax, rzm; - unsigned long addr = 0, size; - int i = 0, type; - - rzm = sclp_get_rzm(); - rnmax = sclp_get_rnmax(); - memsize = rzm * rnmax; - if (!rzm) - rzm = 1ULL << 17; - if (sizeof(long) == 4) { - rzm = min(ADDR2G, rzm); - memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; - } - do { - size = 0; - type = tprot(addr); - do { - size += rzm; - if (memsize && addr + size >= memsize) - break; - } while (type == tprot(addr + size)); - if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { - chunk[i].addr = addr; - chunk[i].size = size; - chunk[i].type = type; - i++; - } - addr += size; - } while (addr < memsize && i < MEMORY_CHUNKS); -} - -void detect_memory_layout(struct mem_chunk chunk[]) -{ - unsigned long flags, cr0; - - memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); - /* Disable IRQs, DAT and low address protection so tprot does the - * right thing and we don't get scheduled away with low address - * protection disabled. - */ - flags = __arch_local_irq_stnsm(0xf8); - __ctl_store(cr0, 0, 0); - __ctl_clear_bit(0, 28); - find_memory_chunks(chunk); - __ctl_load(cr0, 0, 0); - arch_local_irq_restore(flags); -} -EXPORT_SYMBOL(detect_memory_layout); - -/* - * Move memory chunks array from index "from" to index "to" - */ -static void mem_chunk_move(struct mem_chunk chunk[], int to, int from) -{ - int cnt = MEMORY_CHUNKS - to; - - memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk)); -} - -/* - * Initialize memory chunk - */ -static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr, - unsigned long size, int type) -{ - chunk->type = type; - chunk->addr = addr; - chunk->size = size; -} - -/* - * Create memory hole with given address, size, and type - */ -void create_mem_hole(struct mem_chunk chunk[], unsigned long addr, - unsigned long size, int type) -{ - unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size; - int i, ch_type; - - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (chunk[i].size == 0) - continue; - - /* Define chunk properties */ - ch_start = chunk[i].addr; - ch_size = chunk[i].size; - ch_end = ch_start + ch_size - 1; - ch_type = chunk[i].type; - - /* Is memory chunk hit by memory hole? */ - if (addr + size <= ch_start) - continue; /* No: memory hole in front of chunk */ - if (addr > ch_end) - continue; /* No: memory hole after chunk */ - - /* Yes: Define local hole properties */ - lh_start = max(addr, chunk[i].addr); - lh_end = min(addr + size - 1, ch_end); - lh_size = lh_end - lh_start + 1; - - if (lh_start == ch_start && lh_end == ch_end) { - /* Hole covers complete memory chunk */ - mem_chunk_init(&chunk[i], lh_start, lh_size, type); - } else if (lh_end == ch_end) { - /* Hole starts in memory chunk and convers chunk end */ - mem_chunk_move(chunk, i + 1, i); - mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size, - ch_type); - mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); - i += 1; - } else if (lh_start == ch_start) { - /* Hole ends in memory chunk */ - mem_chunk_move(chunk, i + 1, i); - mem_chunk_init(&chunk[i], lh_start, lh_size, type); - mem_chunk_init(&chunk[i + 1], lh_end + 1, - ch_size - lh_size, ch_type); - break; - } else { - /* Hole splits memory chunk */ - mem_chunk_move(chunk, i + 2, i); - mem_chunk_init(&chunk[i], ch_start, - lh_start - ch_start, ch_type); - mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); - mem_chunk_init(&chunk[i + 2], lh_end + 1, - ch_end - lh_end, ch_type); - break; - } - } -} diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index dfcb3436bad..b89b59158b9 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -1,9 +1,8 @@ /* - * arch/s390/kernel/module.c - Kernel module help for s390. + * Kernel module help for s390. * * S390 version - * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002, 2003 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * @@ -45,6 +44,17 @@ #define PLT_ENTRY_SIZE 20 #endif /* CONFIG_64BIT */ +#ifdef CONFIG_64BIT +void *module_alloc(unsigned long size) +{ + if (PAGE_ALIGN(size) > MODULES_LEN) + return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, + __builtin_return_address(0)); +} +#endif + /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { @@ -55,8 +65,7 @@ void module_free(struct module *mod, void *module_region) vfree(module_region); } -static void -check_rela(Elf_Rela *rela, struct module *me) +static void check_rela(Elf_Rela *rela, struct module *me) { struct mod_arch_syminfo *info; @@ -105,9 +114,8 @@ check_rela(Elf_Rela *rela, struct module *me) * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ -int -module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *me) +int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; @@ -169,13 +177,52 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, return 0; } -static int -apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - struct module *me) +static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, + int sign, int bits, int shift) +{ + unsigned long umax; + long min, max; + + if (val & ((1UL << shift) - 1)) + return -ENOEXEC; + if (sign) { + val = (Elf_Addr)(((long) val) >> shift); + min = -(1L << (bits - 1)); + max = (1L << (bits - 1)) - 1; + if ((long) val < min || (long) val > max) + return -ENOEXEC; + } else { + val >>= shift; + umax = ((1UL << (bits - 1)) << 1) - 1; + if ((unsigned long) val > umax) + return -ENOEXEC; + } + + if (bits == 8) + *(unsigned char *) loc = val; + else if (bits == 12) + *(unsigned short *) loc = (val & 0xfff) | + (*(unsigned short *) loc & 0xf000); + else if (bits == 16) + *(unsigned short *) loc = val; + else if (bits == 20) + *(unsigned int *) loc = (val & 0xfff) << 16 | + (val & 0xff000) >> 4 | + (*(unsigned int *) loc & 0xf00000ff); + else if (bits == 32) + *(unsigned int *) loc = val; + else if (bits == 64) + *(unsigned long *) loc = val; + return 0; +} + +static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + const char *strtab, struct module *me) { struct mod_arch_syminfo *info; Elf_Addr loc, val; int r_type, r_sym; + int rc = -ENOEXEC; /* This is where to make the change */ loc = base + rela->r_offset; @@ -187,6 +234,9 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = symtab[r_sym].st_value; switch (r_type) { + case R_390_NONE: /* No relocation. */ + rc = 0; + break; case R_390_8: /* Direct 8 bit. */ case R_390_12: /* Direct 12 bit. */ case R_390_16: /* Direct 16 bit. */ @@ -195,20 +245,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) - *(unsigned char *) loc = val; + rc = apply_rela_bits(loc, val, 0, 8, 0); else if (r_type == R_390_12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ @@ -217,15 +264,15 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 1, 16, 0); else if (r_type == R_390_PC16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PC32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PC32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_PC64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 1, 64, 0); break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ @@ -250,26 +297,24 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) - *(unsigned int *) loc = val; - else if (r_type == R_390_GOTENT || - r_type == R_390_GOTPLTENT) - *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) { + val += (Elf_Addr) me->module_core - loc; + rc = apply_rela_bits(loc, val, 1, 32, 1); + } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ @@ -311,17 +356,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PLTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_PLT32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ @@ -329,20 +374,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = val + rela->r_addend - ((Elf_Addr) me->module_core + me->arch.got_offset); if (r_type == R_390_GOTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->module_core + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_GOTPCDBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ @@ -350,19 +395,25 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_RELATIVE: /* Adjust by program base. */ /* Only needed if we want to support loading of modules linked with -shared. */ - break; + return -ENOEXEC; default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", + printk(KERN_ERR "module %s: unknown relocation: %u\n", me->name, r_type); return -ENOEXEC; } + if (rc) { + printk(KERN_ERR "module %s: relocation error for symbol %s " + "(r_type %i, value 0x%lx)\n", + me->name, strtab + symtab[r_sym].st_name, + r_type, (unsigned long) val); + return rc; + } return 0; } -int -apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) { Elf_Addr base; Elf_Sym *symtab; @@ -378,7 +429,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { - rc = apply_rela(rela, base, symtab, me); + rc = apply_rela(rela, base, symtab, strtab, me); if (rc) return rc; } diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 0fd2e863e11..210e1285f75 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -1,7 +1,7 @@ /* * Machine check handler * - * Copyright IBM Corp. 2000,2009 + * Copyright IBM Corp. 2000, 2009 * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, @@ -55,7 +55,7 @@ void s390_handle_mcck(void) local_mcck_disable(); mcck = __get_cpu_var(cpu_mcck); memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); - clear_thread_flag(TIF_MCCK_PENDING); + clear_cpu_flag(CIF_MCCK_PENDING); local_mcck_enable(); local_irq_restore(flags); @@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci) : "0", "cc"); #endif /* Revalidate clock comparator register */ - if (S390_lowcore.clock_comparator == -1) - set_clock_comparator(S390_lowcore.mcck_clock); - else - set_clock_comparator(S390_lowcore.clock_comparator); + set_clock_comparator(S390_lowcore.clock_comparator); /* Check if old PSW is valid */ if (!mci->wp) /* @@ -254,9 +251,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) int umode; nmi_enter(); - s390_idle_check(regs, S390_lowcore.mcck_clock, - S390_lowcore.mcck_enter_timer); - kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++; + inc_irq_stat(NMI_NMI); mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mcck = &__get_cpu_var(cpu_mcck); umode = user_mode(regs); @@ -295,7 +290,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) * retry this instruction. */ spin_lock(&ipd_lock); - tmp = get_clock(); + tmp = get_tod_clock(); if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) ipd_count++; else @@ -318,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) */ mcck->kill_task = 1; mcck->mcck_code = *(unsigned long long *) mci; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } else { /* * Couldn't restore all register contents while in @@ -357,12 +352,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs) if (mci->cp) { /* Channel report word pending */ mcck->channel_report = 1; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } if (mci->w) { /* Warning pending */ mcck->warning = 1; - set_thread_flag(TIF_MCCK_PENDING); + set_cpu_flag(CIF_MCCK_PENDING); } nmi_exit(); } diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c new file mode 100644 index 00000000000..d112fc66f99 --- /dev/null +++ b/arch/s390/kernel/os_info.c @@ -0,0 +1,168 @@ +/* + * OS info memory interface + * + * Copyright IBM Corp. 2012 + * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> + */ + +#define KMSG_COMPONENT "os_info" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/crash_dump.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <asm/checksum.h> +#include <asm/lowcore.h> +#include <asm/os_info.h> + +/* + * OS info structure has to be page aligned + */ +static struct os_info os_info __page_aligned_data; + +/* + * Compute checksum over OS info structure + */ +u32 os_info_csum(struct os_info *os_info) +{ + int size = sizeof(*os_info) - offsetof(struct os_info, version_major); + return csum_partial(&os_info->version_major, size, 0); +} + +/* + * Add crashkernel info to OS info and update checksum + */ +void os_info_crashkernel_add(unsigned long base, unsigned long size) +{ + os_info.crashkernel_addr = (u64)(unsigned long)base; + os_info.crashkernel_size = (u64)(unsigned long)size; + os_info.csum = os_info_csum(&os_info); +} + +/* + * Add OS info entry and update checksum + */ +void os_info_entry_add(int nr, void *ptr, u64 size) +{ + os_info.entry[nr].addr = (u64)(unsigned long)ptr; + os_info.entry[nr].size = size; + os_info.entry[nr].csum = csum_partial(ptr, size, 0); + os_info.csum = os_info_csum(&os_info); +} + +/* + * Initialize OS info struture and set lowcore pointer + */ +void __init os_info_init(void) +{ + void *ptr = &os_info; + + os_info.version_major = OS_INFO_VERSION_MAJOR; + os_info.version_minor = OS_INFO_VERSION_MINOR; + os_info.magic = OS_INFO_MAGIC; + os_info.csum = os_info_csum(&os_info); + mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr); +} + +#ifdef CONFIG_CRASH_DUMP + +static struct os_info *os_info_old; + +/* + * Allocate and copy OS info entry from oldmem + */ +static void os_info_old_alloc(int nr, int align) +{ + unsigned long addr, size = 0; + char *buf, *buf_align, *msg; + u32 csum; + + addr = os_info_old->entry[nr].addr; + if (!addr) { + msg = "not available"; + goto fail; + } + size = os_info_old->entry[nr].size; + buf = kmalloc(size + align - 1, GFP_KERNEL); + if (!buf) { + msg = "alloc failed"; + goto fail; + } + buf_align = PTR_ALIGN(buf, align); + if (copy_from_oldmem(buf_align, (void *) addr, size)) { + msg = "copy failed"; + goto fail_free; + } + csum = csum_partial(buf_align, size, 0); + if (csum != os_info_old->entry[nr].csum) { + msg = "checksum failed"; + goto fail_free; + } + os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align; + msg = "copied"; + goto out; +fail_free: + kfree(buf); +fail: + os_info_old->entry[nr].addr = 0; +out: + pr_info("entry %i: %s (addr=0x%lx size=%lu)\n", + nr, msg, addr, size); +} + +/* + * Initialize os info and os info entries from oldmem + */ +static void os_info_old_init(void) +{ + static int os_info_init; + unsigned long addr; + + if (os_info_init) + return; + if (!OLDMEM_BASE) + goto fail; + if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr))) + goto fail; + if (addr == 0 || addr % PAGE_SIZE) + goto fail; + os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL); + if (!os_info_old) + goto fail; + if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old))) + goto fail_free; + if (os_info_old->magic != OS_INFO_MAGIC) + goto fail_free; + if (os_info_old->csum != os_info_csum(os_info_old)) + goto fail_free; + if (os_info_old->version_major > OS_INFO_VERSION_MAJOR) + goto fail_free; + os_info_old_alloc(OS_INFO_VMCOREINFO, 1); + os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1); + pr_info("crashkernel: addr=0x%lx size=%lu\n", + (unsigned long) os_info_old->crashkernel_addr, + (unsigned long) os_info_old->crashkernel_size); + os_info_init = 1; + return; +fail_free: + kfree(os_info_old); +fail: + os_info_init = 1; + os_info_old = NULL; +} + +/* + * Return pointer to os infor entry and its size + */ +void *os_info_old_entry(int nr, unsigned long *size) +{ + os_info_old_init(); + + if (!os_info_old) + return NULL; + if (!os_info_old->entry[nr].addr) + return NULL; + *size = (unsigned long) os_info_old->entry[nr].size; + return (void *)(unsigned long)os_info_old->entry[nr].addr; +} +#endif diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c new file mode 100644 index 00000000000..ea75d011a6f --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -0,0 +1,696 @@ +/* + * Performance event support for s390x - CPU-measurement Counter Facility + * + * Copyright IBM Corp. 2012 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "cpum_cf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/init.h> +#include <linux/export.h> +#include <asm/ctl_reg.h> +#include <asm/irq.h> +#include <asm/cpu_mf.h> + +/* CPU-measurement counter facility supports these CPU counter sets: + * For CPU counter sets: + * Basic counter set: 0-31 + * Problem-state counter set: 32-63 + * Crypto-activity counter set: 64-127 + * Extented counter set: 128-159 + */ +enum cpumf_ctr_set { + /* CPU counter sets */ + CPUMF_CTR_SET_BASIC = 0, + CPUMF_CTR_SET_USER = 1, + CPUMF_CTR_SET_CRYPTO = 2, + CPUMF_CTR_SET_EXT = 3, + + /* Maximum number of counter sets */ + CPUMF_CTR_SET_MAX, +}; + +#define CPUMF_LCCTL_ENABLE_SHIFT 16 +#define CPUMF_LCCTL_ACTCTL_SHIFT 0 +static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { + [CPUMF_CTR_SET_BASIC] = 0x02, + [CPUMF_CTR_SET_USER] = 0x04, + [CPUMF_CTR_SET_CRYPTO] = 0x08, + [CPUMF_CTR_SET_EXT] = 0x01, +}; + +static void ctr_set_enable(u64 *state, int ctr_set) +{ + *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; +} +static void ctr_set_disable(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); +} +static void ctr_set_start(u64 *state, int ctr_set) +{ + *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; +} +static void ctr_set_stop(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); +} + +/* Local CPUMF event structure */ +struct cpu_hw_events { + struct cpumf_ctr_info info; + atomic_t ctr_set[CPUMF_CTR_SET_MAX]; + u64 state, tx_state; + unsigned int flags; +}; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .ctr_set = { + [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), + }, + .state = 0, + .flags = 0, +}; + +static int get_counter_set(u64 event) +{ + int set = -1; + + if (event < 32) + set = CPUMF_CTR_SET_BASIC; + else if (event < 64) + set = CPUMF_CTR_SET_USER; + else if (event < 128) + set = CPUMF_CTR_SET_CRYPTO; + else if (event < 256) + set = CPUMF_CTR_SET_EXT; + + return set; +} + +static int validate_event(const struct hw_perf_event *hwc) +{ + switch (hwc->config_base) { + case CPUMF_CTR_SET_BASIC: + case CPUMF_CTR_SET_USER: + case CPUMF_CTR_SET_CRYPTO: + case CPUMF_CTR_SET_EXT: + /* check for reserved counters */ + if ((hwc->config >= 6 && hwc->config <= 31) || + (hwc->config >= 38 && hwc->config <= 63) || + (hwc->config >= 80 && hwc->config <= 127)) + return -EOPNOTSUPP; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int validate_ctr_version(const struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuhw; + int err = 0; + + cpuhw = &get_cpu_var(cpu_hw_events); + + /* check required version for counter sets */ + switch (hwc->config_base) { + case CPUMF_CTR_SET_BASIC: + case CPUMF_CTR_SET_USER: + if (cpuhw->info.cfvn < 1) + err = -EOPNOTSUPP; + break; + case CPUMF_CTR_SET_CRYPTO: + case CPUMF_CTR_SET_EXT: + if (cpuhw->info.csvn < 1) + err = -EOPNOTSUPP; + if ((cpuhw->info.csvn == 1 && hwc->config > 159) || + (cpuhw->info.csvn == 2 && hwc->config > 175) || + (cpuhw->info.csvn > 2 && hwc->config > 255)) + err = -EOPNOTSUPP; + break; + } + + put_cpu_var(cpu_hw_events); + return err; +} + +static int validate_ctr_auth(const struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuhw; + u64 ctrs_state; + int err = 0; + + cpuhw = &get_cpu_var(cpu_hw_events); + + /* check authorization for cpu counter sets */ + ctrs_state = cpumf_state_ctl[hwc->config_base]; + if (!(ctrs_state & cpuhw->info.auth_ctl)) + err = -EPERM; + + put_cpu_var(cpu_hw_events); + return err; +} + +/* + * Change the CPUMF state to active. + * Enable and activate the CPU-counter sets according + * to the per-cpu control state. + */ +static void cpumf_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + int err; + + if (cpuhw->flags & PMU_F_ENABLED) + return; + + err = lcctl(cpuhw->state); + if (err) { + pr_err("Enabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + + cpuhw->flags |= PMU_F_ENABLED; +} + +/* + * Change the CPUMF state to inactive. + * Disable and enable (inactive) the CPU-counter sets according + * to the per-cpu control state. + */ +static void cpumf_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + int err; + u64 inactive; + + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); + err = lcctl(inactive); + if (err) { + pr_err("Disabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + + cpuhw->flags &= ~PMU_F_ENABLED; +} + + +/* Number of perf events counting hardware events */ +static atomic_t num_events = ATOMIC_INIT(0); +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +/* CPU-measurement alerts for the counter facility */ +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_hw_events *cpuhw; + + if (!(alert & CPU_MF_INT_CF_MASK)) + return; + + inc_irq_stat(IRQEXT_CMC); + cpuhw = &__get_cpu_var(cpu_hw_events); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* counter authorization change alert */ + if (alert & CPU_MF_INT_CF_CACA) + qctri(&cpuhw->info); + + /* loss of counter data alert */ + if (alert & CPU_MF_INT_CF_LCDA) + pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); +} + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +static void setup_pmc_cpu(void *flags) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + switch (*((int *) flags)) { + case PMC_INIT: + memset(&cpuhw->info, 0, sizeof(cpuhw->info)); + qctri(&cpuhw->info); + cpuhw->flags |= PMU_F_RESERVED; + break; + + case PMC_RELEASE: + cpuhw->flags &= ~PMU_F_RESERVED; + break; + } + + /* Disable CPU counter sets */ + lcctl(0); +} + +/* Initialize the CPU-measurement facility */ +static int reserve_pmc_hardware(void) +{ + int flags = PMC_INIT; + + on_each_cpu(setup_pmc_cpu, &flags, 1); + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} + +/* Release the CPU-measurement facility */ +static void release_pmc_hardware(void) +{ + int flags = PMC_RELEASE; + + on_each_cpu(setup_pmc_cpu, &flags, 1); + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); +} + +/* Release the PMU if event is the last perf event */ +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +/* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ +static const int cpumf_generic_events_basic[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0, + [PERF_COUNT_HW_INSTRUCTIONS] = 1, + [PERF_COUNT_HW_CACHE_REFERENCES] = -1, + [PERF_COUNT_HW_CACHE_MISSES] = -1, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, +}; +/* CPUMF <-> perf event mappings for userspace (problem-state set) */ +static const int cpumf_generic_events_user[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 32, + [PERF_COUNT_HW_INSTRUCTIONS] = 33, + [PERF_COUNT_HW_CACHE_REFERENCES] = -1, + [PERF_COUNT_HW_CACHE_MISSES] = -1, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, +}; + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + int err; + u64 ev; + + switch (attr->type) { + case PERF_TYPE_RAW: + /* Raw events are used to access counters directly, + * hence do not permit excludes */ + if (attr->exclude_kernel || attr->exclude_user || + attr->exclude_hv) + return -EOPNOTSUPP; + ev = attr->config; + break; + + case PERF_TYPE_HARDWARE: + ev = attr->config; + /* Count user space (problem-state) only */ + if (!attr->exclude_user && attr->exclude_kernel) { + if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) + return -EOPNOTSUPP; + ev = cpumf_generic_events_user[ev]; + + /* No support for kernel space counters only */ + } else if (!attr->exclude_kernel && attr->exclude_user) { + return -EOPNOTSUPP; + + /* Count user and kernel space */ + } else { + if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) + return -EOPNOTSUPP; + ev = cpumf_generic_events_basic[ev]; + } + break; + + default: + return -ENOENT; + } + + if (ev == -1) + return -ENOENT; + + if (ev >= PERF_CPUM_CF_MAX_CTR) + return -EINVAL; + + /* Use the hardware perf event structure to store the counter number + * in 'config' member and the counter set to which the counter belongs + * in the 'config_base'. The counter set (config_base) is then used + * to enable/disable the counters. + */ + hwc->config = ev; + hwc->config_base = get_counter_set(ev); + + /* Validate the counter that is assigned to this event. + * Because the counter facility can use numerous counters at the + * same time without constraints, it is not necessary to explicity + * validate event groups (event->group_leader != event). + */ + err = validate_event(hwc); + if (err) + return err; + + /* Initialize for using the CPU-measurement counter facility */ + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + /* Finally, validate version and authorization of the counter set */ + err = validate_ctr_auth(hwc); + if (!err) + err = validate_ctr_version(hwc); + + return err; +} + +static int cpumf_pmu_event_init(struct perf_event *event) +{ + int err; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + case PERF_TYPE_RAW: + /* The CPU measurement counter facility does not have overflow + * interrupts to do sampling. Sampling must be provided by + * external means, for example, by timers. + */ + if (is_sampling_event(event)) + return -ENOENT; + err = __hw_perf_event_init(event); + break; + default: + return -ENOENT; + } + + if (unlikely(err) && event->destroy) + event->destroy(event); + + return err; +} + +static int hw_perf_event_reset(struct perf_event *event) +{ + u64 prev, new; + int err; + + do { + prev = local64_read(&event->hw.prev_count); + err = ecctr(event->hw.config, &new); + if (err) { + if (err != 3) + break; + /* The counter is not (yet) available. This + * might happen if the counter set to which + * this counter belongs is in the disabled + * state. + */ + new = 0; + } + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + return err; +} + +static int hw_perf_event_update(struct perf_event *event) +{ + u64 prev, new, delta; + int err; + + do { + prev = local64_read(&event->hw.prev_count); + err = ecctr(event->hw.config, &new); + if (err) + goto out; + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + delta = (prev <= new) ? new - prev + : (-1ULL - prev) + new + 1; /* overflow */ + local64_add(delta, &event->count); +out: + return err; +} + +static void cpumf_pmu_read(struct perf_event *event) +{ + if (event->hw.state & PERF_HES_STOPPED) + return; + + hw_perf_event_update(event); +} + +static void cpumf_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (WARN_ON_ONCE(hwc->config == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* (Re-)enable and activate the counter set */ + ctr_set_enable(&cpuhw->state, hwc->config_base); + ctr_set_start(&cpuhw->state, hwc->config_base); + + /* The counter set to which this counter belongs can be already active. + * Because all counters in a set are active, the event->hw.prev_count + * needs to be synchronized. At this point, the counter set can be in + * the inactive or disabled state. + */ + hw_perf_event_reset(event); + + /* increment refcount for this counter set */ + atomic_inc(&cpuhw->ctr_set[hwc->config_base]); +} + +static void cpumf_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* Decrement reference count for this counter set and if this + * is the last used counter in the set, clear activation + * control and set the counter set state to inactive. + */ + if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) + ctr_set_stop(&cpuhw->state, hwc->config_base); + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + hw_perf_event_update(event); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static int cpumf_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + /* Check authorization for the counter set to which this + * counter belongs. + * For group events transaction, the authorization check is + * done in cpumf_pmu_commit_txn(). + */ + if (!(cpuhw->flags & PERF_EVENT_TXN)) + if (validate_ctr_auth(&event->hw)) + return -EPERM; + + ctr_set_enable(&cpuhw->state, event->hw.config_base); + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + cpumf_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void cpumf_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + cpumf_pmu_stop(event, PERF_EF_UPDATE); + + /* Check if any counter in the counter set is still used. If not used, + * change the counter set to the disabled state. This also clears the + * content of all counters in the set. + * + * When a new perf event has been added but not yet started, this can + * clear enable control and resets all counters in a set. Therefore, + * cpumf_pmu_start() always has to reenable a counter set. + */ + if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) + ctr_set_disable(&cpuhw->state, event->hw.config_base); + + perf_event_update_userpage(event); +} + +/* + * Start group events scheduling transaction. + * Set flags to perform a single test at commit time. + */ +static void cpumf_pmu_start_txn(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + perf_pmu_disable(pmu); + cpuhw->flags |= PERF_EVENT_TXN; + cpuhw->tx_state = cpuhw->state; +} + +/* + * Stop and cancel a group events scheduling tranctions. + * Assumes cpumf_pmu_del() is called for each successful added + * cpumf_pmu_add() during the transaction. + */ +static void cpumf_pmu_cancel_txn(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + WARN_ON(cpuhw->tx_state != cpuhw->state); + + cpuhw->flags &= ~PERF_EVENT_TXN; + perf_pmu_enable(pmu); +} + +/* + * Commit the group events scheduling transaction. On success, the + * transaction is closed. On error, the transaction is kept open + * until cpumf_pmu_cancel_txn() is called. + */ +static int cpumf_pmu_commit_txn(struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + u64 state; + + /* check if the updated state can be scheduled */ + state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); + state >>= CPUMF_LCCTL_ENABLE_SHIFT; + if ((state & cpuhw->info.auth_ctl) != state) + return -EPERM; + + cpuhw->flags &= ~PERF_EVENT_TXN; + perf_pmu_enable(pmu); + return 0; +} + +/* Performance monitoring unit for s390x */ +static struct pmu cpumf_pmu = { + .pmu_enable = cpumf_pmu_enable, + .pmu_disable = cpumf_pmu_disable, + .event_init = cpumf_pmu_event_init, + .add = cpumf_pmu_add, + .del = cpumf_pmu_del, + .start = cpumf_pmu_start, + .stop = cpumf_pmu_stop, + .read = cpumf_pmu_read, + .start_txn = cpumf_pmu_start_txn, + .commit_txn = cpumf_pmu_commit_txn, + .cancel_txn = cpumf_pmu_cancel_txn, +}; + +static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, + void *hcpu) +{ + unsigned int cpu = (long) hcpu; + int flags; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + flags = PMC_INIT; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + case CPU_DOWN_PREPARE: + flags = PMC_RELEASE; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int __init cpumf_pmu_init(void) +{ + int rc; + + if (!cpum_cf_avail()) + return -ENODEV; + + /* clear bit 15 of cr0 to unauthorize problem-state to + * extract measurement counters */ + ctl_clear_bit(0, 48); + + /* register handler for measurement-alert interruptions */ + rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + if (rc) { + pr_err("Registering for CPU-measurement alerts " + "failed with rc=%i\n", rc); + goto out; + } + + cpumf_pmu.attr_groups = cpumf_cf_event_group(); + rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); + if (rc) { + pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); + unregister_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + goto out; + } + perf_cpu_notifier(cpumf_pmu_notifier); +out: + return rc; +} +early_initcall(cpumf_pmu_init); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c new file mode 100644 index 00000000000..4554a4bae39 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -0,0 +1,322 @@ +/* + * Perf PMU sysfs events attributes for available CPU-measurement counters + * + */ + +#include <linux/slab.h> +#include <linux/perf_event.h> + + +/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ + +CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000); +CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001); +CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002); +CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024); +CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025); +CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004); +CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005); +CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040); +CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042); +CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043); +CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044); +CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046); +CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047); +CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048); +CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a); +CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b); +CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c); +CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e); +CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f); +CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091); +CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092); +CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080); +CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083); +CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088); +CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096); +CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098); +CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082); +CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083); +CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085); +CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087); +CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089); +CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a); +CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b); +CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e); +CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095); +CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097); +CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e); +CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0); +CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); +CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); +CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); + +static struct attribute *cpumcf_pmu_event_attr[] = { + CPUMF_EVENT_PTR(cf, CPU_CYCLES), + CPUMF_EVENT_PTR(cf, INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES), + CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_CYCLES), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_CYCLES), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_CYCLES), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES), + CPUMF_EVENT_PTR(cf, AES_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_CYCLES), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS), + CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES), + NULL, +}; + +static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT), + NULL, +}; + +static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES), + NULL, +}; + +static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { + CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES), + CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND), + CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV), + CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL), + CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL), + NULL, +}; + +/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumcf_pmu_event_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; + +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + + +static __init struct attribute **merge_attr(struct attribute **a, + struct attribute **b) +{ + struct attribute **new; + int j, i; + + for (j = 0; a[j]; j++) + ; + for (i = 0; b[i]; i++) + j++; + j++; + + new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + if (!new) + return NULL; + j = 0; + for (i = 0; a[i]; i++) + new[j++] = a[i]; + for (i = 0; b[i]; i++) + new[j++] = b[i]; + new[j] = NULL; + + return new; +} + +__init const struct attribute_group **cpumf_cf_event_group(void) +{ + struct attribute **combined, **model; + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x2097: + case 0x2098: + model = cpumcf_z10_pmu_event_attr; + break; + case 0x2817: + case 0x2818: + model = cpumcf_z196_pmu_event_attr; + break; + case 0x2827: + case 0x2828: + model = cpumcf_zec12_pmu_event_attr; + break; + default: + model = NULL; + break; + }; + + if (!model) + goto out; + + combined = merge_attr(cpumcf_pmu_event_attr, model); + if (combined) + cpumsf_pmu_events_group.attrs = combined; +out: + return cpumsf_pmu_attr_groups; +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c new file mode 100644 index 00000000000..ea0c7b2ef03 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -0,0 +1,1643 @@ +/* + * Performance event support for the System z CPU-measurement Sampling Facility + * + * Copyright IBM Corp. 2013 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "cpum_sf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/moduleparam.h> +#include <asm/cpu_mf.h> +#include <asm/irq.h> +#include <asm/debug.h> +#include <asm/timex.h> + +/* Minimum number of sample-data-block-tables: + * At least one table is required for the sampling buffer structure. + * A single table contains up to 511 pointers to sample-data-blocks. + */ +#define CPUM_SF_MIN_SDBT 1 + +/* Number of sample-data-blocks per sample-data-block-table (SDBT): + * A table contains SDB pointers (8 bytes) and one table-link entry + * that points to the origin of the next SDBT. + */ +#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) + +/* Maximum page offset for an SDBT table-link entry: + * If this page offset is reached, a table-link entry to the next SDBT + * must be added. + */ +#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) +static inline int require_table_link(const void *sdbt) +{ + return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; +} + +/* Minimum and maximum sampling buffer sizes: + * + * This number represents the maximum size of the sampling buffer taking + * the number of sample-data-block-tables into account. Note that these + * numbers apply to the basic-sampling function only. + * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if + * the diagnostic-sampling function is active. + * + * Sampling buffer size Buffer characteristics + * --------------------------------------------------- + * 64KB == 16 pages (4KB per page) + * 1 page for SDB-tables + * 15 pages for SDBs + * + * 32MB == 8192 pages (4KB per page) + * 16 pages for SDB-tables + * 8176 pages for SDBs + */ +static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; +static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; +static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; + +struct sf_buffer { + unsigned long *sdbt; /* Sample-data-block-table origin */ + /* buffer characteristics (required for buffer increments) */ + unsigned long num_sdb; /* Number of sample-data-blocks */ + unsigned long num_sdbt; /* Number of sample-data-block-tables */ + unsigned long *tail; /* last sample-data-block-table */ +}; + +struct cpu_hw_sf { + /* CPU-measurement sampling information block */ + struct hws_qsi_info_block qsi; + /* CPU-measurement sampling control block */ + struct hws_lsctl_request_block lsctl; + struct sf_buffer sfb; /* Sampling buffer */ + unsigned int flags; /* Status flags */ + struct perf_event *event; /* Scheduled perf event */ +}; +static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); + +/* Debug feature */ +static debug_info_t *sfdbg; + +/* + * sf_disable() - Switch off sampling facility + */ +static int sf_disable(void) +{ + struct hws_lsctl_request_block sreq; + + memset(&sreq, 0, sizeof(sreq)); + return lsctl(&sreq); +} + +/* + * sf_buffer_available() - Check for an allocated sampling buffer + */ +static int sf_buffer_available(struct cpu_hw_sf *cpuhw) +{ + return !!cpuhw->sfb.sdbt; +} + +/* + * deallocate sampling facility buffer + */ +static void free_sampling_buffer(struct sf_buffer *sfb) +{ + unsigned long *sdbt, *curr; + + if (!sfb->sdbt) + return; + + sdbt = sfb->sdbt; + curr = sdbt; + + /* Free the SDBT after all SDBs are processed... */ + while (1) { + if (!*curr || !sdbt) + break; + + /* Process table-link entries */ + if (is_link_entry(curr)) { + curr = get_next_sdbt(curr); + if (sdbt) + free_page((unsigned long) sdbt); + + /* If the origin is reached, sampling buffer is freed */ + if (curr == sfb->sdbt) + break; + else + sdbt = curr; + } else { + /* Process SDB pointer */ + if (*curr) { + free_page(*curr); + curr++; + } + } + } + + debug_sprintf_event(sfdbg, 5, + "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); + memset(sfb, 0, sizeof(*sfb)); +} + +static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) +{ + unsigned long sdb, *trailer; + + /* Allocate and initialize sample-data-block */ + sdb = get_zeroed_page(gfp_flags); + if (!sdb) + return -ENOMEM; + trailer = trailer_entry_ptr(sdb); + *trailer = SDB_TE_ALERT_REQ_MASK; + + /* Link SDB into the sample-data-block-table */ + *sdbt = sdb; + + return 0; +} + +/* + * realloc_sampling_buffer() - extend sampler memory + * + * Allocates new sample-data-blocks and adds them to the specified sampling + * buffer memory. + * + * Important: This modifies the sampling buffer and must be called when the + * sampling facility is disabled. + * + * Returns zero on success, non-zero otherwise. + */ +static int realloc_sampling_buffer(struct sf_buffer *sfb, + unsigned long num_sdb, gfp_t gfp_flags) +{ + int i, rc; + unsigned long *new, *tail; + + if (!sfb->sdbt || !sfb->tail) + return -EINVAL; + + if (!is_link_entry(sfb->tail)) + return -EINVAL; + + /* Append to the existing sampling buffer, overwriting the table-link + * register. + * The tail variables always points to the "tail" (last and table-link) + * entry in an SDB-table. + */ + tail = sfb->tail; + + /* Do a sanity check whether the table-link entry points to + * the sampling buffer origin. + */ + if (sfb->sdbt != get_next_sdbt(tail)) { + debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " + "sampling buffer is not linked: origin=%p" + "tail=%p\n", + (void *) sfb->sdbt, (void *) tail); + return -EINVAL; + } + + /* Allocate remaining SDBs */ + rc = 0; + for (i = 0; i < num_sdb; i++) { + /* Allocate a new SDB-table if it is full. */ + if (require_table_link(tail)) { + new = (unsigned long *) get_zeroed_page(gfp_flags); + if (!new) { + rc = -ENOMEM; + break; + } + sfb->num_sdbt++; + /* Link current page to tail of chain */ + *tail = (unsigned long)(void *) new + 1; + tail = new; + } + + /* Allocate a new sample-data-block. + * If there is not enough memory, stop the realloc process + * and simply use what was allocated. If this is a temporary + * issue, a new realloc call (if required) might succeed. + */ + rc = alloc_sample_data_block(tail, gfp_flags); + if (rc) + break; + sfb->num_sdb++; + tail++; + } + + /* Link sampling buffer to its origin */ + *tail = (unsigned long) sfb->sdbt + 1; + sfb->tail = tail; + + debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" + " settings: sdbt=%lu sdb=%lu\n", + sfb->num_sdbt, sfb->num_sdb); + return rc; +} + +/* + * allocate_sampling_buffer() - allocate sampler memory + * + * Allocates and initializes a sampling buffer structure using the + * specified number of sample-data-blocks (SDB). For each allocation, + * a 4K page is used. The number of sample-data-block-tables (SDBT) + * are calculated from SDBs. + * Also set the ALERT_REQ mask in each SDBs trailer. + * + * Returns zero on success, non-zero otherwise. + */ +static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) +{ + int rc; + + if (sfb->sdbt) + return -EINVAL; + + /* Allocate the sample-data-block-table origin */ + sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); + if (!sfb->sdbt) + return -ENOMEM; + sfb->num_sdb = 0; + sfb->num_sdbt = 1; + + /* Link the table origin to point to itself to prepare for + * realloc_sampling_buffer() invocation. + */ + sfb->tail = sfb->sdbt; + *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; + + /* Allocate requested number of sample-data-blocks */ + rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); + if (rc) { + free_sampling_buffer(sfb); + debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " + "realloc_sampling_buffer failed with rc=%i\n", rc); + } else + debug_sprintf_event(sfdbg, 4, + "alloc_sampling_buffer: tear=%p dear=%p\n", + sfb->sdbt, (void *) *sfb->sdbt); + return rc; +} + +static void sfb_set_limits(unsigned long min, unsigned long max) +{ + struct hws_qsi_info_block si; + + CPUM_SF_MIN_SDB = min; + CPUM_SF_MAX_SDB = max; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); +} + +static unsigned long sfb_max_limit(struct hw_perf_event *hwc) +{ + return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR + : CPUM_SF_MAX_SDB; +} + +static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + if (!sfb->sdbt) + return SFB_ALLOC_REG(hwc); + if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) + return SFB_ALLOC_REG(hwc) - sfb->num_sdb; + return 0; +} + +static int sfb_has_pending_allocs(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + return sfb_pending_allocs(sfb, hwc) > 0; +} + +static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + /* Limit the number of SDBs to not exceed the maximum */ + num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); + if (num) + SFB_ALLOC_REG(hwc) += num; +} + +static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) +{ + SFB_ALLOC_REG(hwc) = 0; + sfb_account_allocs(num, hwc); +} + +static size_t event_sample_size(struct hw_perf_event *hwc) +{ + struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + size_t sample_size; + + /* The sample size depends on the sampling function: The basic-sampling + * function must be always enabled, diagnostic-sampling function is + * optional. + */ + sample_size = sfr->bsdes; + if (SAMPL_DIAG_MODE(hwc)) + sample_size += sfr->dsdes; + + return sample_size; +} + +static void deallocate_buffers(struct cpu_hw_sf *cpuhw) +{ + if (cpuhw->sfb.sdbt) + free_sampling_buffer(&cpuhw->sfb); +} + +static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) +{ + unsigned long n_sdb, freq, factor; + size_t sfr_size, sample_size; + struct sf_raw_sample *sfr; + + /* Allocate raw sample buffer + * + * The raw sample buffer is used to temporarily store sampling data + * entries for perf raw sample processing. The buffer size mainly + * depends on the size of diagnostic-sampling data entries which is + * machine-specific. The exact size calculation includes: + * 1. The first 4 bytes of diagnostic-sampling data entries are + * already reflected in the sf_raw_sample structure. Subtract + * these bytes. + * 2. The perf raw sample data must be 8-byte aligned (u64) and + * perf's internal data size must be considered too. So add + * an additional u32 for correct alignment and subtract before + * allocating the buffer. + * 3. Store the raw sample buffer pointer in the perf event + * hardware structure. + */ + sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + + sizeof(u32), sizeof(u64)); + sfr_size -= sizeof(u32); + sfr = kzalloc(sfr_size, GFP_KERNEL); + if (!sfr) + return -ENOMEM; + sfr->size = sfr_size; + sfr->bsdes = cpuhw->qsi.bsdes; + sfr->dsdes = cpuhw->qsi.dsdes; + RAWSAMPLE_REG(hwc) = (unsigned long) sfr; + + /* Calculate sampling buffers using 4K pages + * + * 1. Determine the sample data size which depends on the used + * sampling functions, for example, basic-sampling or + * basic-sampling with diagnostic-sampling. + * + * 2. Use the sampling frequency as input. The sampling buffer is + * designed for almost one second. This can be adjusted through + * the "factor" variable. + * In any case, alloc_sampling_buffer() sets the Alert Request + * Control indicator to trigger a measurement-alert to harvest + * sample-data-blocks (sdb). + * + * 3. Compute the number of sample-data-blocks and ensure a minimum + * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not + * exceed a "calculated" maximum. The symbolic maximum is + * designed for basic-sampling only and needs to be increased if + * diagnostic-sampling is active. + * See also the remarks for these symbolic constants. + * + * 4. Compute the number of sample-data-block-tables (SDBT) and + * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up + * to 511 SDBs). + */ + sample_size = event_sample_size(hwc); + freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); + factor = 1; + n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); + if (n_sdb < CPUM_SF_MIN_SDB) + n_sdb = CPUM_SF_MIN_SDB; + + /* If there is already a sampling buffer allocated, it is very likely + * that the sampling facility is enabled too. If the event to be + * initialized requires a greater sampling buffer, the allocation must + * be postponed. Changing the sampling buffer requires the sampling + * facility to be in the disabled state. So, account the number of + * required SDBs and let cpumsf_pmu_enable() resize the buffer just + * before the event is started. + */ + sfb_init_allocs(n_sdb, hwc); + if (sf_buffer_available(cpuhw)) + return 0; + + debug_sprintf_event(sfdbg, 3, + "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" + " sample_size=%lu cpuhw=%p\n", + SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), + sample_size, cpuhw); + + return alloc_sampling_buffer(&cpuhw->sfb, + sfb_pending_allocs(&cpuhw->sfb, hwc)); +} + +static unsigned long min_percent(unsigned int percent, unsigned long base, + unsigned long min) +{ + return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); +} + +static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) +{ + /* Use a percentage-based approach to extend the sampling facility + * buffer. Accept up to 5% sample data loss. + * Vary the extents between 1% to 5% of the current number of + * sample-data-blocks. + */ + if (ratio <= 5) + return 0; + if (ratio <= 25) + return min_percent(1, base, 1); + if (ratio <= 50) + return min_percent(1, base, 1); + if (ratio <= 75) + return min_percent(2, base, 2); + if (ratio <= 100) + return min_percent(3, base, 3); + if (ratio <= 250) + return min_percent(4, base, 4); + + return min_percent(5, base, 8); +} + +static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, + struct hw_perf_event *hwc) +{ + unsigned long ratio, num; + + if (!OVERFLOW_REG(hwc)) + return; + + /* The sample_overflow contains the average number of sample data + * that has been lost because sample-data-blocks were full. + * + * Calculate the total number of sample data entries that has been + * discarded. Then calculate the ratio of lost samples to total samples + * per second in percent. + */ + ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, + sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); + + /* Compute number of sample-data-blocks */ + num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); + if (num) + sfb_account_allocs(num, hwc); + + debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" + " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); + OVERFLOW_REG(hwc) = 0; +} + +/* extend_sampling_buffer() - Extend sampling buffer + * @sfb: Sampling buffer structure (for local CPU) + * @hwc: Perf event hardware structure + * + * Use this function to extend the sampling buffer based on the overflow counter + * and postponed allocation extents stored in the specified Perf event hardware. + * + * Important: This function disables the sampling facility in order to safely + * change the sampling buffer structure. Do not call this function + * when the PMU is active. + */ +static void extend_sampling_buffer(struct sf_buffer *sfb, + struct hw_perf_event *hwc) +{ + unsigned long num, num_old; + int rc; + + num = sfb_pending_allocs(sfb, hwc); + if (!num) + return; + num_old = sfb->num_sdb; + + /* Disable the sampling facility to reset any states and also + * clear pending measurement alerts. + */ + sf_disable(); + + /* Extend the sampling buffer. + * This memory allocation typically happens in an atomic context when + * called by perf. Because this is a reallocation, it is fine if the + * new SDB-request cannot be satisfied immediately. + */ + rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); + if (rc) + debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " + "failed with rc=%i\n", rc); + + if (sfb_has_pending_allocs(sfb, hwc)) + debug_sprintf_event(sfdbg, 5, "sfb: extend: " + "req=%lu alloc=%lu remaining=%lu\n", + num, sfb->num_sdb - num_old, + sfb_pending_allocs(sfb, hwc)); +} + + +/* Number of perf events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_cpumf_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +#define PMC_FAILURE 2 +static void setup_pmc_cpu(void *flags) +{ + int err; + struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); + + err = 0; + switch (*((int *) flags)) { + case PMC_INIT: + memset(cpusf, 0, sizeof(*cpusf)); + err = qsi(&cpusf->qsi); + if (err) + break; + cpusf->flags |= PMU_F_RESERVED; + err = sf_disable(); + if (err) + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); + break; + case PMC_RELEASE: + cpusf->flags &= ~PMU_F_RESERVED; + err = sf_disable(); + if (err) { + pr_err("Switching off the sampling facility failed " + "with rc=%i\n", err); + } else + deallocate_buffers(cpusf); + debug_sprintf_event(sfdbg, 5, + "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); + break; + } + if (err) + *((int *) flags) |= PMC_FAILURE; +} + +static void release_pmc_hardware(void) +{ + int flags = PMC_RELEASE; + + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + on_each_cpu(setup_pmc_cpu, &flags, 1); + perf_release_sampling(); +} + +static int reserve_pmc_hardware(void) +{ + int flags = PMC_INIT; + int err; + + err = perf_reserve_sampling(); + if (err) + return err; + on_each_cpu(setup_pmc_cpu, &flags, 1); + if (flags & PMC_FAILURE) { + release_pmc_hardware(); + return -ENODEV; + } + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Free raw sample buffer */ + if (RAWSAMPLE_REG(&event->hw)) + kfree((void *) RAWSAMPLE_REG(&event->hw)); + + /* Release PMC if this is the last perf event */ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static void hw_init_period(struct hw_perf_event *hwc, u64 period) +{ + hwc->sample_period = period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); +} + +static void hw_reset_registers(struct hw_perf_event *hwc, + unsigned long *sdbt_origin) +{ + struct sf_raw_sample *sfr; + + /* (Re)set to first sample-data-block-table */ + TEAR_REG(hwc) = (unsigned long) sdbt_origin; + + /* (Re)set raw sampling buffer register */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); + memset(&sfr->basic, 0, sizeof(sfr->basic)); + memset(&sfr->diag, 0, sfr->dsdes); +} + +static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, + unsigned long rate) +{ + return clamp_t(unsigned long, rate, + si->min_sampl_rate, si->max_sampl_rate); +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_hw_sf *cpuhw; + struct hws_qsi_info_block si; + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + unsigned long rate; + int cpu, err; + + /* Reserve CPU-measurement sampling facility */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + event->destroy = hw_perf_event_destroy; + + if (err) + goto out; + + /* Access per-CPU sampling information (query sampling info) */ + /* + * The event->cpu value can be -1 to count on every CPU, for example, + * when attaching to a task. If this is specified, use the query + * sampling info from the current CPU, otherwise use event->cpu to + * retrieve the per-CPU information. + * Later, cpuhw indicates whether to allocate sampling buffers for a + * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). + */ + memset(&si, 0, sizeof(si)); + cpuhw = NULL; + if (event->cpu == -1) + qsi(&si); + else { + /* Event is pinned to a particular CPU, retrieve the per-CPU + * sampling structure for accessing the CPU-specific QSI. + */ + cpuhw = &per_cpu(cpu_hw_sf, event->cpu); + si = cpuhw->qsi; + } + + /* Check sampling facility authorization and, if not authorized, + * fall back to other PMUs. It is safe to check any CPU because + * the authorization is identical for all configured CPUs. + */ + if (!si.as) { + err = -ENOENT; + goto out; + } + + /* Always enable basic sampling */ + SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; + + /* Check if diagnostic sampling is requested. Deny if the required + * sampling authorization is missing. + */ + if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { + if (!si.ad) { + err = -EPERM; + goto out; + } + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; + } + + /* Check and set other sampling flags */ + if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; + + /* The sampling information (si) contains information about the + * min/max sampling intervals and the CPU speed. So calculate the + * correct sampling interval and avoid the whole period adjust + * feedback loop. + */ + rate = 0; + if (attr->freq) { + rate = freq_to_sample_rate(&si, attr->sample_freq); + rate = hw_limit_rate(&si, rate); + attr->freq = 0; + attr->sample_period = rate; + } else { + /* The min/max sampling rates specifies the valid range + * of sample periods. If the specified sample period is + * out of range, limit the period to the range boundary. + */ + rate = hw_limit_rate(&si, hwc->sample_period); + + /* The perf core maintains a maximum sample rate that is + * configurable through the sysctl interface. Ensure the + * sampling rate does not exceed this value. This also helps + * to avoid throttling when pushing samples with + * perf_event_overflow(). + */ + if (sample_rate_to_freq(&si, rate) > + sysctl_perf_event_sample_rate) { + err = -EINVAL; + debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); + goto out; + } + } + SAMPL_RATE(hwc) = rate; + hw_init_period(hwc, SAMPL_RATE(hwc)); + + /* Initialize sample data overflow accounting */ + hwc->extra_reg.reg = REG_OVERFLOW; + OVERFLOW_REG(hwc) = 0; + + /* Allocate the per-CPU sampling buffer using the CPU information + * from the event. If the event is not pinned to a particular + * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling + * buffers for each online CPU. + */ + if (cpuhw) + /* Event is pinned to a particular CPU */ + err = allocate_buffers(cpuhw, hwc); + else { + /* Event is not pinned, allocate sampling buffer on + * each online CPU + */ + for_each_online_cpu(cpu) { + cpuhw = &per_cpu(cpu_hw_sf, cpu); + err = allocate_buffers(cpuhw, hwc); + if (err) + break; + } + } +out: + return err; +} + +static int cpumsf_pmu_event_init(struct perf_event *event) +{ + int err; + + /* No support for taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + if ((event->attr.config != PERF_EVENT_CPUM_SF) && + (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) + return -ENOENT; + break; + case PERF_TYPE_HARDWARE: + /* Support sampling of CPU cycles in addition to the + * counter facility. However, the counter facility + * is more precise and, hence, restrict this PMU to + * sampling events only. + */ + if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) + return -ENOENT; + if (!is_sampling_event(event)) + return -ENOENT; + break; + default: + return -ENOENT; + } + + /* Check online status of the CPU to which the event is pinned */ + if (event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + /* Force reset of idle/hv excludes regardless of what the + * user requested. + */ + if (event->attr.exclude_hv) + event->attr.exclude_hv = 0; + if (event->attr.exclude_idle) + event->attr.exclude_idle = 0; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + if (event->destroy) + event->destroy(event); + return err; +} + +static void cpumsf_pmu_enable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hw_perf_event *hwc; + int err; + + if (cpuhw->flags & PMU_F_ENABLED) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Check whether to extent the sampling buffer. + * + * Two conditions trigger an increase of the sampling buffer for a + * perf event: + * 1. Postponed buffer allocations from the event initialization. + * 2. Sampling overflows that contribute to pending allocations. + * + * Note that the extend_sampling_buffer() function disables the sampling + * facility, but it can be fully re-enabled using sampling controls that + * have been saved in cpumsf_pmu_disable(). + */ + if (cpuhw->event) { + hwc = &cpuhw->event->hw; + /* Account number of overflow-designated buffer extents */ + sfb_account_overflows(cpuhw, hwc); + if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) + extend_sampling_buffer(&cpuhw->sfb, hwc); + } + + /* (Re)enable the PMU and sampling facility */ + cpuhw->flags |= PMU_F_ENABLED; + barrier(); + + err = lsctl(&cpuhw->lsctl); + if (err) { + cpuhw->flags &= ~PMU_F_ENABLED; + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 1, err); + return; + } + + debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " + "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, + cpuhw->lsctl.ed, cpuhw->lsctl.cd, + (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); +} + +static void cpumsf_pmu_disable(struct pmu *pmu) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + struct hws_lsctl_request_block inactive; + struct hws_qsi_info_block si; + int err; + + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + if (cpuhw->flags & PMU_F_ERR_MASK) + return; + + /* Switch off sampling activation control */ + inactive = cpuhw->lsctl; + inactive.cs = 0; + inactive.cd = 0; + + err = lsctl(&inactive); + if (err) { + pr_err("Loading sampling controls failed: op=%i err=%i\n", + 2, err); + return; + } + + /* Save state of TEAR and DEAR register contents */ + if (!qsi(&si)) { + /* TEAR/DEAR values are valid only if the sampling facility is + * enabled. Note that cpumsf_pmu_disable() might be called even + * for a disabled sampling facility because cpumsf_pmu_enable() + * controls the enable/disable state. + */ + if (si.es) { + cpuhw->lsctl.tear = si.tear; + cpuhw->lsctl.dear = si.dear; + } + } else + debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " + "qsi() failed with err=%i\n", err); + + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* perf_exclude_event() - Filter event + * @event: The perf event + * @regs: pt_regs structure + * @sde_regs: Sample-data-entry (sde) regs structure + * + * Filter perf events according to their exclude specification. + * + * Return non-zero if the event shall be excluded. + */ +static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, + struct perf_sf_sde_regs *sde_regs) +{ + if (event->attr.exclude_user && user_mode(regs)) + return 1; + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + if (event->attr.exclude_guest && sde_regs->in_guest) + return 1; + if (event->attr.exclude_host && !sde_regs->in_guest) + return 1; + return 0; +} + +/* perf_push_sample() - Push samples to perf + * @event: The perf event + * @sample: Hardware sample data + * + * Use the hardware sample data to create perf event sample. The sample + * is the pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) +{ + int overflow; + struct pt_regs regs; + struct perf_sf_sde_regs *sde_regs; + struct perf_sample_data data; + struct perf_raw_record raw; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + raw.size = sfr->size; + raw.data = sfr; + data.raw = &raw; + + /* Setup pt_regs to look like an CPU-measurement external interrupt + * using the Program Request Alert code. The regs.int_parm_long + * field which is unused contains additional sample-data-entry related + * indicators. + */ + memset(®s, 0, sizeof(regs)); + regs.int_code = 0x1407; + regs.int_parm = CPU_MF_INT_SF_PRA; + sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; + + regs.psw.addr = sfr->basic.ia; + if (sfr->basic.T) + regs.psw.mask |= PSW_MASK_DAT; + if (sfr->basic.W) + regs.psw.mask |= PSW_MASK_WAIT; + if (sfr->basic.P) + regs.psw.mask |= PSW_MASK_PSTATE; + switch (sfr->basic.AS) { + case 0x0: + regs.psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x1: + regs.psw.mask |= PSW_ASC_ACCREG; + break; + case 0x2: + regs.psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x3: + regs.psw.mask |= PSW_ASC_HOME; + break; + } + + /* The host-program-parameter (hpp) contains the sie control + * block that is set by sie64a() in entry64.S. Check if hpp + * refers to a valid control block and set sde_regs flags + * accordingly. This would allow to use hpp values for other + * purposes too. + * For now, simply use a non-zero value as guest indicator. + */ + if (sfr->basic.hpp) + sde_regs->in_guest = 1; + + overflow = 0; + if (perf_exclude_event(event, ®s, sde_regs)) + goto out; + if (perf_event_overflow(event, &data, ®s)) { + overflow = 1; + event->pmu->stop(event, 0); + } + perf_event_update_userpage(event); +out: + return overflow; +} + +static void perf_event_count_update(struct perf_event *event, u64 count) +{ + local64_add(count, &event->count); +} + +static int sample_format_is_valid(struct hws_combined_entry *sample, + unsigned int flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + /* Only basic-sampling data entries with data-entry-format + * version of 0x0001 can be processed. + */ + if (sample->basic.def != 0x0001) + return 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + /* The data-entry-format number of diagnostic-sampling data + * entries can vary. Because diagnostic data is just passed + * through, do only a sanity check on the DEF. + */ + if (sample->diag.def < 0x8001) + return 0; + return 1; +} + +static int sample_is_consistent(struct hws_combined_entry *sample, + unsigned long flags) +{ + /* This check applies only to basic-sampling data entries of potentially + * combined-sampling data entries. Invalid entries cannot be processed + * by the PMU and, thus, do not deliver an associated + * diagnostic-sampling data entry. + */ + if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE))) + return 0; + /* + * Samples are skipped, if they are invalid or for which the + * instruction address is not predictable, i.e., the wait-state bit is + * set. + */ + if (sample->basic.I || sample->basic.W) + return 0; + return 1; +} + +static void reset_sample_slot(struct hws_combined_entry *sample, + unsigned long flags) +{ + if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) + sample->basic.def = 0; + if (flags & PERF_CPUM_SF_DIAG_MODE) + sample->diag.def = 0; +} + +static void sfr_store_sample(struct sf_raw_sample *sfr, + struct hws_combined_entry *sample) +{ + if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE)) + sfr->basic = sample->basic; + if (sfr->format & PERF_CPUM_SF_DIAG_MODE) + memcpy(&sfr->diag, &sample->diag, sfr->dsdes); +} + +static void debug_sample_entry(struct hws_combined_entry *sample, + struct hws_trailer_entry *te, + unsigned long flags) +{ + debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " + "sampling data entry: te->f=%i basic.def=%04x (%p)" + " diag.def=%04x (%p)\n", te->f, + sample->basic.def, &sample->basic, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? sample->diag.def : 0xFFFF, + (flags & PERF_CPUM_SF_DIAG_MODE) + ? &sample->diag : NULL); +} + +/* hw_collect_samples() - Walk through a sample-data-block and collect samples + * @event: The perf event + * @sdbt: Sample-data-block table + * @overflow: Event overflow counter + * + * Walks through a sample-data-block and collects sampling data entries that are + * then pushed to the perf event subsystem. Depending on the sampling function, + * there can be either basic-sampling or combined-sampling data entries. A + * combined-sampling data entry consists of a basic- and a diagnostic-sampling + * data entry. The sampling function is determined by the flags in the perf + * event hardware structure. The function always works with a combined-sampling + * data entry but ignores the the diagnostic portion if it is not available. + * + * Note that the implementation focuses on basic-sampling data entries and, if + * such an entry is not valid, the entire combined-sampling data entry is + * ignored. + * + * The overflow variables counts the number of samples that has been discarded + * due to a perf event overflow. + */ +static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, + unsigned long long *overflow) +{ + unsigned long flags = SAMPL_FLAGS(&event->hw); + struct hws_combined_entry *sample; + struct hws_trailer_entry *te; + struct sf_raw_sample *sfr; + size_t sample_size; + + /* Prepare and initialize raw sample data */ + sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); + sfr->format = flags & PERF_CPUM_SF_MODE_MASK; + + sample_size = event_sample_size(&event->hw); + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + sample = (struct hws_combined_entry *) *sdbt; + while ((unsigned long *) sample < (unsigned long *) te) { + /* Check for an empty sample */ + if (!sample->basic.def) + break; + + /* Update perf event period */ + perf_event_count_update(event, SAMPL_RATE(&event->hw)); + + /* Check sampling data entry */ + if (sample_format_is_valid(sample, flags)) { + /* If an event overflow occurred, the PMU is stopped to + * throttle event delivery. Remaining sample data is + * discarded. + */ + if (!*overflow) { + if (sample_is_consistent(sample, flags)) { + /* Deliver sample data to perf */ + sfr_store_sample(sfr, sample); + *overflow = perf_push_sample(event, sfr); + } + } else + /* Count discarded samples */ + *overflow += 1; + } else { + debug_sample_entry(sample, te, flags); + /* Sample slot is not yet written or other record. + * + * This condition can occur if the buffer was reused + * from a combined basic- and diagnostic-sampling. + * If only basic-sampling is then active, entries are + * written into the larger diagnostic entries. + * This is typically the case for sample-data-blocks + * that are not full. Stop processing if the first + * invalid format was detected. + */ + if (!te->f) + break; + } + + /* Reset sample slot and advance to next sample */ + reset_sample_slot(sample, flags); + sample += sample_size; + } +} + +/* hw_perf_event_update() - Process sampling buffer + * @event: The perf event + * @flush_all: Flag to also flush partially filled sample-data-blocks + * + * Processes the sampling buffer and create perf event samples. + * The sampling buffer position are retrieved and saved in the TEAR_REG + * register of the specified perf event. + * + * Only full sample-data-blocks are processed. Specify the flash_all flag + * to also walk through partially filled sample-data-blocks. It is ignored + * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag + * enforces the processing of full sample-data-blocks only (trailer entries + * with the block-full-indicator bit set). + */ +static void hw_perf_event_update(struct perf_event *event, int flush_all) +{ + struct hw_perf_event *hwc = &event->hw; + struct hws_trailer_entry *te; + unsigned long *sdbt; + unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; + int done; + + if (flush_all && SDB_FULL_BLOCKS(hwc)) + flush_all = 0; + + sdbt = (unsigned long *) TEAR_REG(hwc); + done = event_overflow = sampl_overflow = num_sdb = 0; + while (!done) { + /* Get the trailer entry of the sample-data-block */ + te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); + + /* Leave loop if no more work to do (block full indicator) */ + if (!te->f) { + done = 1; + if (!flush_all) + break; + } + + /* Check the sample overflow count */ + if (te->overflow) + /* Account sample overflows and, if a particular limit + * is reached, extend the sampling buffer. + * For details, see sfb_account_overflows(). + */ + sampl_overflow += te->overflow; + + /* Timestamps are valid for full sample-data-blocks only */ + debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " + "overflow=%llu timestamp=0x%llx\n", + sdbt, te->overflow, + (te->f) ? trailer_timestamp(te) : 0ULL); + + /* Collect all samples from a single sample-data-block and + * flag if an (perf) event overflow happened. If so, the PMU + * is stopped and remaining samples will be discarded. + */ + hw_collect_samples(event, sdbt, &event_overflow); + num_sdb++; + + /* Reset trailer (using compare-double-and-swap) */ + do { + te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; + te_flags |= SDB_TE_ALERT_REQ_MASK; + } while (!cmpxchg_double(&te->flags, &te->overflow, + te->flags, te->overflow, + te_flags, 0ULL)); + + /* Advance to next sample-data-block */ + sdbt++; + if (is_link_entry(sdbt)) + sdbt = get_next_sdbt(sdbt); + + /* Update event hardware registers */ + TEAR_REG(hwc) = (unsigned long) sdbt; + + /* Stop processing sample-data if all samples of the current + * sample-data-block were flushed even if it was not full. + */ + if (flush_all && done) + break; + + /* If an event overflow happened, discard samples by + * processing any remaining sample-data-blocks. + */ + if (event_overflow) + flush_all = 1; + } + + /* Account sample overflows in the event hardware structure */ + if (sampl_overflow) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + + sampl_overflow, 1 + num_sdb); + if (sampl_overflow || event_overflow) + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " + "overflow stats: sample=%llu event=%llu\n", + sampl_overflow, event_overflow); +} + +static void cpumsf_pmu_read(struct perf_event *event) +{ + /* Nothing to do ... updates are interrupt-driven */ +} + +/* Activate sampling control. + * Next call of pmu_enable() starts sampling. + */ +static void cpumsf_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + perf_pmu_disable(event->pmu); + event->hw.state = 0; + cpuhw->lsctl.cs = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.cd = 1; + perf_pmu_enable(event->pmu); +} + +/* Deactivate sampling control. + * Next call of pmu_enable() stops sampling. + */ +static void cpumsf_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + if (event->hw.state & PERF_HES_STOPPED) + return; + + perf_pmu_disable(event->pmu); + cpuhw->lsctl.cs = 0; + cpuhw->lsctl.cd = 0; + event->hw.state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + hw_perf_event_update(event, 1); + event->hw.state |= PERF_HES_UPTODATE; + } + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + int err; + + if (cpuhw->flags & PMU_F_IN_USE) + return -EAGAIN; + + if (!cpuhw->sfb.sdbt) + return -EINVAL; + + err = 0; + perf_pmu_disable(event->pmu); + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Set up sampling controls. Always program the sampling register + * using the SDB-table start. Reset TEAR_REG event hardware register + * that is used by hw_perf_event_update() to store the sampling buffer + * position after samples have been flushed. + */ + cpuhw->lsctl.s = 0; + cpuhw->lsctl.h = 1; + cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; + cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; + cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); + hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); + + /* Ensure sampling functions are in the disabled state. If disabled, + * switch on sampling enable control. */ + if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { + err = -EAGAIN; + goto out; + } + cpuhw->lsctl.es = 1; + if (SAMPL_DIAG_MODE(&event->hw)) + cpuhw->lsctl.ed = 1; + + /* Set in_use flag and store event */ + event->hw.idx = 0; /* only one sampling event per CPU supported */ + cpuhw->event = event; + cpuhw->flags |= PMU_F_IN_USE; + + if (flags & PERF_EF_START) + cpumsf_pmu_start(event, PERF_EF_RELOAD); +out: + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); + return err; +} + +static void cpumsf_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); + + perf_pmu_disable(event->pmu); + cpumsf_pmu_stop(event, PERF_EF_UPDATE); + + cpuhw->lsctl.es = 0; + cpuhw->lsctl.ed = 0; + cpuhw->flags &= ~PMU_F_IN_USE; + cpuhw->event = NULL; + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); +} + +static int cpumsf_pmu_event_idx(struct perf_event *event) +{ + return event->hw.idx; +} + +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); +CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); + +static struct attribute *cpumsf_pmu_events_attr[] = { + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), + CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cpumsf_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cpumsf_pmu_events_group = { + .name = "events", + .attrs = cpumsf_pmu_events_attr, +}; +static struct attribute_group cpumsf_pmu_format_group = { + .name = "format", + .attrs = cpumsf_pmu_format_attr, +}; +static const struct attribute_group *cpumsf_pmu_attr_groups[] = { + &cpumsf_pmu_events_group, + &cpumsf_pmu_format_group, + NULL, +}; + +static struct pmu cpumf_sampling = { + .pmu_enable = cpumsf_pmu_enable, + .pmu_disable = cpumsf_pmu_disable, + + .event_init = cpumsf_pmu_event_init, + .add = cpumsf_pmu_add, + .del = cpumsf_pmu_del, + + .start = cpumsf_pmu_start, + .stop = cpumsf_pmu_stop, + .read = cpumsf_pmu_read, + + .event_idx = cpumsf_pmu_event_idx, + .attr_groups = cpumsf_pmu_attr_groups, +}; + +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_hw_sf *cpuhw; + + if (!(alert & CPU_MF_INT_SF_MASK)) + return; + inc_irq_stat(IRQEXT_CMS); + cpuhw = &__get_cpu_var(cpu_hw_sf); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* The processing below must take care of multiple alert events that + * might be indicated concurrently. */ + + /* Program alert request */ + if (alert & CPU_MF_INT_SF_PRA) { + if (cpuhw->flags & PMU_F_IN_USE) + hw_perf_event_update(cpuhw->event, 0); + else + WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); + } + + /* Report measurement alerts only for non-PRA codes */ + if (alert != CPU_MF_INT_SF_PRA) + debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert); + + /* Sampling authorization change request */ + if (alert & CPU_MF_INT_SF_SACA) + qsi(&cpuhw->qsi); + + /* Loss of sample data due to high-priority machine activities */ + if (alert & CPU_MF_INT_SF_LSDA) { + pr_err("Sample data was lost\n"); + cpuhw->flags |= PMU_F_ERR_LSDA; + sf_disable(); + } + + /* Invalid sampling buffer entry */ + if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { + pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", + alert); + cpuhw->flags |= PMU_F_ERR_IBE; + sf_disable(); + } +} + +static int cpumf_pmu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long) hcpu; + int flags; + + /* Ignore the notification if no events are scheduled on the PMU. + * This might be racy... + */ + if (!atomic_read(&num_events)) + return NOTIFY_OK; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + flags = PMC_INIT; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + case CPU_DOWN_PREPARE: + flags = PMC_RELEASE; + smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) +{ + if (!cpum_sf_avail()) + return -ENODEV; + return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); +} + +static int param_set_sfb_size(const char *val, const struct kernel_param *kp) +{ + int rc; + unsigned long min, max; + + if (!cpum_sf_avail()) + return -ENODEV; + if (!val || !strlen(val)) + return -EINVAL; + + /* Valid parameter values: "min,max" or "max" */ + min = CPUM_SF_MIN_SDB; + max = CPUM_SF_MAX_SDB; + if (strchr(val, ',')) + rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; + else + rc = kstrtoul(val, 10, &max); + + if (min < 2 || min >= max || max > get_num_physpages()) + rc = -EINVAL; + if (rc) + return rc; + + sfb_set_limits(min, max); + pr_info("The sampling buffer limits have changed to: " + "min=%lu max=%lu (diag=x%lu)\n", + CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); + return 0; +} + +#define param_check_sfb_size(name, p) __param_check(name, p, void) +static struct kernel_param_ops param_ops_sfb_size = { + .set = param_set_sfb_size, + .get = param_get_sfb_size, +}; + +#define RS_INIT_FAILURE_QSI 0x0001 +#define RS_INIT_FAILURE_BSDES 0x0002 +#define RS_INIT_FAILURE_ALRT 0x0003 +#define RS_INIT_FAILURE_PERF 0x0004 +static void __init pr_cpumsf_err(unsigned int reason) +{ + pr_err("Sampling facility support for perf is not available: " + "reason=%04x\n", reason); +} + +static int __init init_cpum_sampling_pmu(void) +{ + struct hws_qsi_info_block si; + int err; + + if (!cpum_sf_avail()) + return -ENODEV; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) { + pr_cpumsf_err(RS_INIT_FAILURE_QSI); + return -ENODEV; + } + + if (si.bsdes != sizeof(struct hws_basic_entry)) { + pr_cpumsf_err(RS_INIT_FAILURE_BSDES); + return -EINVAL; + } + + if (si.ad) + sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); + + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); + if (!sfdbg) + pr_err("Registering for s390dbf failed\n"); + debug_register_view(sfdbg, &debug_sprintf_view); + + err = register_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + goto out; + } + + err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_PERF); + unregister_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + goto out; + } + perf_cpu_notifier(cpumf_pmu_notifier); +out: + return err; +} +arch_initcall(init_cpum_sampling_pmu); +core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c new file mode 100644 index 00000000000..61595c1f0a0 --- /dev/null +++ b/arch/s390/kernel/perf_event.c @@ -0,0 +1,324 @@ +/* + * Performance event support for s390x + * + * Copyright IBM Corp. 2012, 2013 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + */ +#define KMSG_COMPONENT "perf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/kvm_host.h> +#include <linux/percpu.h> +#include <linux/export.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> +#include <asm/irq.h> +#include <asm/cpu_mf.h> +#include <asm/lowcore.h> +#include <asm/processor.h> +#include <asm/sysinfo.h> + +const char *perf_pmu_name(void) +{ + if (cpum_cf_avail() || cpum_sf_avail()) + return "CPU-Measurement Facilities (CPU-MF)"; + return "pmu"; +} +EXPORT_SYMBOL(perf_pmu_name); + +int perf_num_counters(void) +{ + int num = 0; + + if (cpum_cf_avail()) + num += PERF_CPUM_CF_MAX_CTR; + if (cpum_sf_avail()) + num += PERF_CPUM_SF_MAX_CTR; + + return num; +} +EXPORT_SYMBOL(perf_num_counters); + +static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) +{ + struct stack_frame *stack = (struct stack_frame *) regs->gprs[15]; + + if (!stack) + return NULL; + + return (struct kvm_s390_sie_block *) stack->empty1[0]; +} + +static bool is_in_guest(struct pt_regs *regs) +{ + if (user_mode(regs)) + return false; +#if IS_ENABLED(CONFIG_KVM) + return instruction_pointer(regs) == (unsigned long) &sie_exit; +#else + return false; +#endif +} + +static unsigned long guest_is_user_mode(struct pt_regs *regs) +{ + return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE; +} + +static unsigned long instruction_pointer_guest(struct pt_regs *regs) +{ + return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN; +} + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + return is_in_guest(regs) ? instruction_pointer_guest(regs) + : instruction_pointer(regs); +} + +static unsigned long perf_misc_guest_flags(struct pt_regs *regs) +{ + return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; +} + +static unsigned long perf_misc_flags_sf(struct pt_regs *regs) +{ + struct perf_sf_sde_regs *sde_regs; + unsigned long flags; + + sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; + if (sde_regs->in_guest) + flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else + flags = user_mode(regs) ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + return flags; +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + /* Check if the cpum_sf PMU has created the pt_regs structure. + * In this case, perf misc flags can be easily extracted. Otherwise, + * do regular checks on the pt_regs content. + */ + if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) + if (!regs->gprs[15]) + return perf_misc_flags_sf(regs); + + if (is_in_guest(regs)) + return perf_misc_guest_flags(regs); + + return user_mode(regs) ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; +} + +static void print_debug_cf(void) +{ + struct cpumf_ctr_info cf_info; + int cpu = smp_processor_id(); + + memset(&cf_info, 0, sizeof(cf_info)); + if (!qctri(&cf_info)) + pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", + cpu, cf_info.cfvn, cf_info.csvn, + cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); +} + +static void print_debug_sf(void) +{ + struct hws_qsi_info_block si; + int cpu = smp_processor_id(); + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", + cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + + if (si.as) + pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" + " bsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); + if (si.ad) + pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" + " dsdes=%i tear=%016lx dear=%016lx\n", cpu, + si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + + local_irq_save(flags); + if (cpum_cf_avail()) + print_debug_cf(); + if (cpum_sf_avail()) + print_debug_sf(); + local_irq_restore(flags); +} + +/* Service level infrastructure */ +static void sl_print_counter(struct seq_file *m) +{ + struct cpumf_ctr_info ci; + + memset(&ci, 0, sizeof(ci)); + if (qctri(&ci)) + return; + + seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " + "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); +} + +static void sl_print_sampling(struct seq_file *m) +{ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (qsi(&si)) + return; + + if (!si.as && !si.ad) + return; + + seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" + " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, + si.cpu_speed); + if (si.as) + seq_printf(m, "CPU-MF: Sampling facility: mode=basic" + " sample_size=%u\n", si.bsdes); + if (si.ad) + seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" + " sample_size=%u\n", si.dsdes); +} + +static void service_level_perf_print(struct seq_file *m, + struct service_level *sl) +{ + if (cpum_cf_avail()) + sl_print_counter(m); + if (cpum_sf_avail()) + sl_print_sampling(m); +} + +static struct service_level service_level_perf = { + .seq_print = service_level_perf_print, +}; + +static int __init service_level_perf_register(void) +{ + return register_service_level(&service_level_perf); +} +arch_initcall(service_level_perf_register); + +/* See also arch/s390/kernel/traps.c */ +static unsigned long __store_trace(struct perf_callchain_entry *entry, + unsigned long sp, + unsigned long low, unsigned long high) +{ + struct stack_frame *sf; + struct pt_regs *regs; + + while (1) { + sp = sp & PSW_ADDR_INSN; + if (sp < low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + /* Follow the backchain. */ + while (1) { + low = sp; + sp = sf->back_chain & PSW_ADDR_INSN; + if (!sp) + break; + if (sp <= low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + perf_callchain_store(entry, + sf->gprs[8] & PSW_ADDR_INSN); + } + /* Zero backchain detected, check for interrupt frame. */ + sp = (unsigned long) (sf + 1); + if (sp <= low || sp > high - sizeof(*regs)) + return sp; + regs = (struct pt_regs *) sp; + perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + low = sp; + sp = regs->gprs[15]; + } +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + unsigned long head; + struct stack_frame *head_sf; + + if (user_mode(regs)) + return; + + head = regs->gprs[15]; + head_sf = (struct stack_frame *) head; + + if (!head_sf || !head_sf->back_chain) + return; + + head = head_sf->back_chain; + head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, + S390_lowcore.async_stack); + + __store_trace(entry, head, S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); +} + +/* Perf defintions for PMU event attributes in sysfs */ +ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx,name=%s\n", + pmu_attr->id, attr->attr.name); +} + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(perf_hw_owner_lock); +static void *perf_sampling_owner; + +int perf_reserve_sampling(void) +{ + int err; + + err = 0; + spin_lock(&perf_hw_owner_lock); + if (perf_sampling_owner) { + pr_warn("The sampling facility is already reserved by %p\n", + perf_sampling_owner); + err = -EBUSY; + } else + perf_sampling_owner = __builtin_return_address(0); + spin_unlock(&perf_hw_owner_lock); + return err; +} +EXPORT_SYMBOL(perf_reserve_sampling); + +void perf_release_sampling(void) +{ + spin_lock(&perf_hw_owner_lock); + WARN_ON(!perf_sampling_owner); + perf_sampling_owner = NULL; + spin_unlock(&perf_hw_owner_lock); +} +EXPORT_SYMBOL(perf_release_sampling); diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S new file mode 100644 index 00000000000..813ec726087 --- /dev/null +++ b/arch/s390/kernel/pgm_check.S @@ -0,0 +1,152 @@ +/* + * Program check table. + * + * Copyright IBM Corp. 2012 + */ + +#include <linux/linkage.h> + +#ifdef CONFIG_32BIT +#define PGM_CHECK_64BIT(handler) .long default_trap_handler +#else +#define PGM_CHECK_64BIT(handler) .long handler +#endif + +#define PGM_CHECK(handler) .long handler +#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) + +/* + * The program check table contains exactly 128 (0x00-0x7f) entries. Each + * line defines the 31 and/or 64 bit function to be called corresponding + * to the program check interruption code. + */ +.section .rodata, "a" +ENTRY(pgm_check_table) +PGM_CHECK_DEFAULT /* 00 */ +PGM_CHECK(illegal_op) /* 01 */ +PGM_CHECK(privileged_op) /* 02 */ +PGM_CHECK(execute_exception) /* 03 */ +PGM_CHECK(do_protection_exception) /* 04 */ +PGM_CHECK(addressing_exception) /* 05 */ +PGM_CHECK(specification_exception) /* 06 */ +PGM_CHECK(data_exception) /* 07 */ +PGM_CHECK(overflow_exception) /* 08 */ +PGM_CHECK(divide_exception) /* 09 */ +PGM_CHECK(overflow_exception) /* 0a */ +PGM_CHECK(divide_exception) /* 0b */ +PGM_CHECK(hfp_overflow_exception) /* 0c */ +PGM_CHECK(hfp_underflow_exception) /* 0d */ +PGM_CHECK(hfp_significance_exception) /* 0e */ +PGM_CHECK(hfp_divide_exception) /* 0f */ +PGM_CHECK(do_dat_exception) /* 10 */ +PGM_CHECK(do_dat_exception) /* 11 */ +PGM_CHECK(translation_exception) /* 12 */ +PGM_CHECK(special_op_exception) /* 13 */ +PGM_CHECK_DEFAULT /* 14 */ +PGM_CHECK(operand_exception) /* 15 */ +PGM_CHECK_DEFAULT /* 16 */ +PGM_CHECK_DEFAULT /* 17 */ +PGM_CHECK_64BIT(transaction_exception) /* 18 */ +PGM_CHECK_DEFAULT /* 19 */ +PGM_CHECK_DEFAULT /* 1a */ +PGM_CHECK_DEFAULT /* 1b */ +PGM_CHECK(space_switch_exception) /* 1c */ +PGM_CHECK(hfp_sqrt_exception) /* 1d */ +PGM_CHECK_DEFAULT /* 1e */ +PGM_CHECK_DEFAULT /* 1f */ +PGM_CHECK_DEFAULT /* 20 */ +PGM_CHECK_DEFAULT /* 21 */ +PGM_CHECK_DEFAULT /* 22 */ +PGM_CHECK_DEFAULT /* 23 */ +PGM_CHECK_DEFAULT /* 24 */ +PGM_CHECK_DEFAULT /* 25 */ +PGM_CHECK_DEFAULT /* 26 */ +PGM_CHECK_DEFAULT /* 27 */ +PGM_CHECK_DEFAULT /* 28 */ +PGM_CHECK_DEFAULT /* 29 */ +PGM_CHECK_DEFAULT /* 2a */ +PGM_CHECK_DEFAULT /* 2b */ +PGM_CHECK_DEFAULT /* 2c */ +PGM_CHECK_DEFAULT /* 2d */ +PGM_CHECK_DEFAULT /* 2e */ +PGM_CHECK_DEFAULT /* 2f */ +PGM_CHECK_DEFAULT /* 30 */ +PGM_CHECK_DEFAULT /* 31 */ +PGM_CHECK_DEFAULT /* 32 */ +PGM_CHECK_DEFAULT /* 33 */ +PGM_CHECK_DEFAULT /* 34 */ +PGM_CHECK_DEFAULT /* 35 */ +PGM_CHECK_DEFAULT /* 36 */ +PGM_CHECK_DEFAULT /* 37 */ +PGM_CHECK_64BIT(do_dat_exception) /* 38 */ +PGM_CHECK_64BIT(do_dat_exception) /* 39 */ +PGM_CHECK_64BIT(do_dat_exception) /* 3a */ +PGM_CHECK_64BIT(do_dat_exception) /* 3b */ +PGM_CHECK_DEFAULT /* 3c */ +PGM_CHECK_DEFAULT /* 3d */ +PGM_CHECK_DEFAULT /* 3e */ +PGM_CHECK_DEFAULT /* 3f */ +PGM_CHECK_DEFAULT /* 40 */ +PGM_CHECK_DEFAULT /* 41 */ +PGM_CHECK_DEFAULT /* 42 */ +PGM_CHECK_DEFAULT /* 43 */ +PGM_CHECK_DEFAULT /* 44 */ +PGM_CHECK_DEFAULT /* 45 */ +PGM_CHECK_DEFAULT /* 46 */ +PGM_CHECK_DEFAULT /* 47 */ +PGM_CHECK_DEFAULT /* 48 */ +PGM_CHECK_DEFAULT /* 49 */ +PGM_CHECK_DEFAULT /* 4a */ +PGM_CHECK_DEFAULT /* 4b */ +PGM_CHECK_DEFAULT /* 4c */ +PGM_CHECK_DEFAULT /* 4d */ +PGM_CHECK_DEFAULT /* 4e */ +PGM_CHECK_DEFAULT /* 4f */ +PGM_CHECK_DEFAULT /* 50 */ +PGM_CHECK_DEFAULT /* 51 */ +PGM_CHECK_DEFAULT /* 52 */ +PGM_CHECK_DEFAULT /* 53 */ +PGM_CHECK_DEFAULT /* 54 */ +PGM_CHECK_DEFAULT /* 55 */ +PGM_CHECK_DEFAULT /* 56 */ +PGM_CHECK_DEFAULT /* 57 */ +PGM_CHECK_DEFAULT /* 58 */ +PGM_CHECK_DEFAULT /* 59 */ +PGM_CHECK_DEFAULT /* 5a */ +PGM_CHECK_DEFAULT /* 5b */ +PGM_CHECK_DEFAULT /* 5c */ +PGM_CHECK_DEFAULT /* 5d */ +PGM_CHECK_DEFAULT /* 5e */ +PGM_CHECK_DEFAULT /* 5f */ +PGM_CHECK_DEFAULT /* 60 */ +PGM_CHECK_DEFAULT /* 61 */ +PGM_CHECK_DEFAULT /* 62 */ +PGM_CHECK_DEFAULT /* 63 */ +PGM_CHECK_DEFAULT /* 64 */ +PGM_CHECK_DEFAULT /* 65 */ +PGM_CHECK_DEFAULT /* 66 */ +PGM_CHECK_DEFAULT /* 67 */ +PGM_CHECK_DEFAULT /* 68 */ +PGM_CHECK_DEFAULT /* 69 */ +PGM_CHECK_DEFAULT /* 6a */ +PGM_CHECK_DEFAULT /* 6b */ +PGM_CHECK_DEFAULT /* 6c */ +PGM_CHECK_DEFAULT /* 6d */ +PGM_CHECK_DEFAULT /* 6e */ +PGM_CHECK_DEFAULT /* 6f */ +PGM_CHECK_DEFAULT /* 70 */ +PGM_CHECK_DEFAULT /* 71 */ +PGM_CHECK_DEFAULT /* 72 */ +PGM_CHECK_DEFAULT /* 73 */ +PGM_CHECK_DEFAULT /* 74 */ +PGM_CHECK_DEFAULT /* 75 */ +PGM_CHECK_DEFAULT /* 76 */ +PGM_CHECK_DEFAULT /* 77 */ +PGM_CHECK_DEFAULT /* 78 */ +PGM_CHECK_DEFAULT /* 79 */ +PGM_CHECK_DEFAULT /* 7a */ +PGM_CHECK_DEFAULT /* 7b */ +PGM_CHECK_DEFAULT /* 7c */ +PGM_CHECK_DEFAULT /* 7d */ +PGM_CHECK_DEFAULT /* 7e */ +PGM_CHECK_DEFAULT /* 7f */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 3201ae44799..93b9ca42e5c 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -1,7 +1,7 @@ /* * This file handles the architecture dependent parts of process handling. * - * Copyright IBM Corp. 1999,2009 + * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Hartmut Penner <hp@de.ibm.com>, * Denis Joseph Barrow, @@ -23,14 +23,15 @@ #include <linux/kprobes.h> #include <linux/random.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> +#include <asm/vtimer.h> +#include <asm/exec.h> #include <asm/irq.h> -#include <asm/timer.h> #include <asm/nmi.h> -#include <asm/compat.h> #include <asm/smp.h> +#include <asm/switch_to.h> +#include <asm/runtime_instr.h> #include "entry.h" asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); @@ -60,85 +61,38 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sf->gprs[8]; } -/* - * The idle loop on a S390... - */ -static void default_idle(void) +void arch_cpu_idle(void) { - if (cpu_is_offline(smp_processor_id())) - cpu_die(); - local_irq_disable(); - if (need_resched()) { - local_irq_enable(); - return; - } local_mcck_disable(); - if (test_thread_flag(TIF_MCCK_PENDING)) { + if (test_cpu_flag(CIF_MCCK_PENDING)) { local_mcck_enable(); local_irq_enable(); - s390_handle_mcck(); return; } - trace_hardirqs_on(); - /* Don't trace preempt off for idle. */ - stop_critical_timings(); - /* Stop virtual timer and halt the cpu. */ + /* Halt the cpu and keep track of cpu time accounting. */ vtime_stop_cpu(); - /* Reenable preemption tracer. */ - start_critical_timings(); + local_irq_enable(); } -void cpu_idle(void) +void arch_cpu_idle_exit(void) { - for (;;) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - while (!need_resched()) - default_idle(); - rcu_idle_exit(); - tick_nohz_idle_exit(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - } + if (test_cpu_flag(CIF_MCCK_PENDING)) + s390_handle_mcck(); } -extern void __kprobes kernel_thread_starter(void); - -asm( - ".section .kprobes.text, \"ax\"\n" - ".global kernel_thread_starter\n" - "kernel_thread_starter:\n" - " la 2,0(10)\n" - " basr 14,9\n" - " la 2,0\n" - " br 11\n" - ".previous\n"); - -int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) +void arch_cpu_idle_dead(void) { - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - regs.psw.mask = psw_kernel_bits | - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; - regs.gprs[9] = (unsigned long) fn; - regs.gprs[10] = (unsigned long) arg; - regs.gprs[11] = (unsigned long) do_exit; - regs.orig_gpr2 = -1; - - /* Ok, create the new process.. */ - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, - 0, ®s, 0, NULL, NULL); + cpu_die(); } -EXPORT_SYMBOL(kernel_thread); + +extern void __kprobes kernel_thread_starter(void); /* * Free current thread data structures etc.. */ void exit_thread(void) { + exit_thread_runtime_instr(); } void flush_thread(void) @@ -150,8 +104,7 @@ void release_thread(struct task_struct *dead_task) } int copy_thread(unsigned long clone_flags, unsigned long new_stackp, - unsigned long unused, - struct task_struct *p, struct pt_regs *regs) + unsigned long arg, struct task_struct *p) { struct thread_info *ti; struct fake_frame @@ -162,93 +115,82 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, frame = container_of(task_pt_regs(p), struct fake_frame, childregs); p->thread.ksp = (unsigned long) frame; - /* Store access registers to kernel stack of new process. */ - frame->childregs = *regs; - frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ - frame->childregs.gprs[15] = new_stackp; - frame->sf.back_chain = 0; + /* Save access registers to new thread structure. */ + save_access_regs(&p->thread.acrs[0]); + /* start new process with ar4 pointing to the correct address space */ + p->thread.mm_segment = get_fs(); + /* Don't copy debug registers */ + memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); + memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); + clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + /* Initialize per thread user and system timer values */ + ti = task_thread_info(p); + ti->user_timer = 0; + ti->system_timer = 0; + frame->sf.back_chain = 0; /* new return point is ret_from_fork */ frame->sf.gprs[8] = (unsigned long) ret_from_fork; - /* fake return stack for resume(), don't go back to schedule */ frame->sf.gprs[9] = (unsigned long) frame; - /* Save access registers to new thread structure. */ - save_access_regs(&p->thread.acrs[0]); + /* Store access registers to kernel stack of new process. */ + if (unlikely(p->flags & PF_KTHREAD)) { + /* kernel thread */ + memset(&frame->childregs, 0, sizeof(struct pt_regs)); + frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; + frame->childregs.psw.addr = PSW_ADDR_AMODE | + (unsigned long) kernel_thread_starter; + frame->childregs.gprs[9] = new_stackp; /* function */ + frame->childregs.gprs[10] = arg; + frame->childregs.gprs[11] = (unsigned long) do_exit; + frame->childregs.orig_gpr2 = -1; + + return 0; + } + frame->childregs = *current_pt_regs(); + frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ + frame->childregs.flags = 0; + if (new_stackp) + frame->childregs.gprs[15] = new_stackp; + + /* Don't copy runtime instrumentation info */ + p->thread.ri_cb = NULL; + p->thread.ri_signum = 0; + frame->childregs.psw.mask &= ~PSW_MASK_RI; #ifndef CONFIG_64BIT /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the child. */ - save_fp_regs(¤t->thread.fp_regs); + save_fp_ctl(¤t->thread.fp_regs.fpc); + save_fp_regs(current->thread.fp_regs.fprs); memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) - p->thread.acrs[0] = regs->gprs[6]; + p->thread.acrs[0] = frame->childregs.gprs[6]; #else /* CONFIG_64BIT */ /* Save the fpu registers to new thread structure. */ - save_fp_regs(&p->thread.fp_regs); + save_fp_ctl(&p->thread.fp_regs.fpc); + save_fp_regs(p->thread.fp_regs.fprs); + p->thread.fp_regs.pad = 0; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { + unsigned long tls = frame->childregs.gprs[6]; if (is_compat_task()) { - p->thread.acrs[0] = (unsigned int) regs->gprs[6]; + p->thread.acrs[0] = (unsigned int)tls; } else { - p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); - p->thread.acrs[1] = (unsigned int) regs->gprs[6]; + p->thread.acrs[0] = (unsigned int)(tls >> 32); + p->thread.acrs[1] = (unsigned int)tls; } } #endif /* CONFIG_64BIT */ - /* start new process with ar4 pointing to the correct address space */ - p->thread.mm_segment = get_fs(); - /* Don't copy debug registers */ - memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); - memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); - clear_tsk_thread_flag(p, TIF_SINGLE_STEP); - clear_tsk_thread_flag(p, TIF_PER_TRAP); - /* Initialize per thread user and system timer values */ - ti = task_thread_info(p); - ti->user_timer = 0; - ti->system_timer = 0; return 0; } -SYSCALL_DEFINE0(fork) -{ - struct pt_regs *regs = task_pt_regs(current); - return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL); -} - -SYSCALL_DEFINE4(clone, unsigned long, newsp, unsigned long, clone_flags, - int __user *, parent_tidptr, int __user *, child_tidptr) -{ - struct pt_regs *regs = task_pt_regs(current); - - if (!newsp) - newsp = regs->gprs[15]; - return do_fork(clone_flags, newsp, regs, 0, - parent_tidptr, child_tidptr); -} - -/* - * This is trivial, and on the face of it looks like it - * could equally well be done in user mode. - * - * Not so, for quite unobvious reasons - register pressure. - * In user mode vfork() cannot have a stack frame, and if - * done by calling the "clone()" system call directly, you - * do not have enough call-clobbered registers to hold all - * the information you need. - */ -SYSCALL_DEFINE0(vfork) -{ - struct pt_regs *regs = task_pt_regs(current); - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, - regs->gprs[15], regs, 0, NULL, NULL); -} - asmlinkage void execve_tail(void) { current->thread.fp_regs.fpc = 0; @@ -257,31 +199,6 @@ asmlinkage void execve_tail(void) } /* - * sys_execve() executes a new program. - */ -SYSCALL_DEFINE3(execve, const char __user *, name, - const char __user *const __user *, argv, - const char __user *const __user *, envp) -{ - struct pt_regs *regs = task_pt_regs(current); - char *filename; - long rc; - - filename = getname(name); - rc = PTR_ERR(filename); - if (IS_ERR(filename)) - return rc; - rc = do_execve(filename, argv, envp, regs); - if (rc) - goto out; - execve_tail(); - rc = regs->gprs[2]; -out: - putname(filename); - return rc; -} - -/* * fill in the FPU structure for a core dump. */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) @@ -291,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the dump. */ - save_fp_regs(¤t->thread.fp_regs); + save_fp_ctl(¤t->thread.fp_regs.fpc); + save_fp_regs(current->thread.fp_regs.fprs); memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); #else /* CONFIG_64BIT */ - save_fp_regs(fpregs); + save_fp_ctl(&fpregs->fpc); + save_fp_regs(fpregs->fprs); #endif /* CONFIG_64BIT */ return 1; } @@ -342,20 +261,18 @@ static inline unsigned long brk_rnd(void) unsigned long arch_randomize_brk(struct mm_struct *mm) { - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + unsigned long ret; - if (ret < mm->brk) - return mm->brk; - return ret; + ret = PAGE_ALIGN(mm->brk + brk_rnd()); + return (ret > mm->brk) ? ret : mm->brk; } unsigned long randomize_et_dyn(unsigned long base) { - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); + unsigned long ret; if (!(current->flags & PF_RANDOMIZE)) return base; - if (ret < base) - return base; - return ret; + ret = PAGE_ALIGN(base + brk_rnd()); + return (ret > base) ? ret : base; } diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 6e0073e43f5..24612029f45 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -1,6 +1,4 @@ /* - * arch/s390/kernel/processor.c - * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -23,15 +21,17 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); /* * cpu_init - initializes state that is per-CPU. */ -void __cpuinit cpu_init(void) +void cpu_init(void) { - struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); + struct s390_idle_data *idle = &__get_cpu_var(s390_idle); + struct cpuid *id = &__get_cpu_var(cpu_id); get_cpu_id(id); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); + memset(idle, 0, sizeof(*idle)); } /* @@ -39,9 +39,9 @@ void __cpuinit cpu_init(void) */ static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[10] = { + static const char *hwcap_str[] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat", "etf3eh", "highgprs" + "edat", "etf3eh", "highgprs", "te" }; unsigned long n = (unsigned long) v - 1; int i; @@ -54,10 +54,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); - for (i = 0; i < 10; i++) + for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); + show_cacheinfo(m); } get_online_cpus(); if (cpu_online(n)) { diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 9d82ed4bcb2..5dc7ad9e2fb 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1,7 +1,7 @@ /* * Ptrace user space interface. * - * Copyright IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2010 * Author(s): Denis Joseph Barrow * Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -20,15 +20,15 @@ #include <linux/regset.h> #include <linux/tracehook.h> #include <linux/seccomp.h> +#include <linux/compat.h> #include <trace/syscall.h> -#include <asm/compat.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/unistd.h> +#include <asm/switch_to.h> #include "entry.h" #ifdef CONFIG_COMPAT @@ -42,16 +42,42 @@ enum s390_regset { REGSET_GENERAL, REGSET_FP, REGSET_LAST_BREAK, + REGSET_TDB, REGSET_SYSTEM_CALL, REGSET_GENERAL_EXTENDED, }; -void update_per_regs(struct task_struct *task) +void update_cr_regs(struct task_struct *task) { struct pt_regs *regs = task_pt_regs(task); struct thread_struct *thread = &task->thread; struct per_regs old, new; +#ifdef CONFIG_64BIT + /* Take care of the enable/disable of transactional execution. */ + if (MACHINE_HAS_TE) { + unsigned long cr, cr_new; + + __ctl_store(cr, 0, 0); + /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); + if (task->thread.per_flags & PER_FLAG_NO_TE) + cr_new &= ~(1UL << 55); + if (cr_new != cr) + __ctl_load(cr_new, 0, 0); + /* Set or clear transaction execution TDC bits 62 and 63. */ + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) + cr_new |= 1UL; + else + cr_new |= 2UL; + } + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); + } +#endif /* Copy user specified PER registers */ new.control = thread->per_user.control; new.start = thread->per_user.start; @@ -59,7 +85,14 @@ void update_per_regs(struct task_struct *task) /* merge TIF_SINGLE_STEP into user specified PER registers. */ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { - new.control |= PER_EVENT_IFETCH; + if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) + new.control |= PER_EVENT_BRANCH; + else + new.control |= PER_EVENT_IFETCH; +#ifdef CONFIG_64BIT + new.control |= PER_CONTROL_SUSPENSION; + new.control |= PER_EVENT_TRANSACTION_END; +#endif new.start = 0; new.end = PSW_ADDR_INSN; } @@ -77,16 +110,20 @@ void update_per_regs(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { + clear_tsk_thread_flag(task, TIF_BLOCK_STEP); set_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_per_regs(task); } void user_disable_single_step(struct task_struct *task) { + clear_tsk_thread_flag(task, TIF_BLOCK_STEP); clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - if (task == current) - update_per_regs(task); +} + +void user_enable_block_step(struct task_struct *task) +{ + set_tsk_thread_flag(task, TIF_SINGLE_STEP); + set_tsk_thread_flag(task, TIF_BLOCK_STEP); } /* @@ -99,7 +136,8 @@ void ptrace_disable(struct task_struct *task) memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); clear_tsk_thread_flag(task, TIF_SINGLE_STEP); - clear_tsk_thread_flag(task, TIF_PER_TRAP); + clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); + task->thread.per_flags = 0; } #ifndef CONFIG_64BIT @@ -168,9 +206,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) * psw and gprs are stored on the stack */ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); - if (addr == (addr_t) &dummy->regs.psw.mask) + if (addr == (addr_t) &dummy->regs.psw.mask) { /* Return a clean psw mask. */ - tmp = psw_user_bits | (tmp & PSW_MASK_USER); + tmp &= PSW_MASK_USER | PSW_MASK_RI; + tmp |= PSW_USER_BITS; + } } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { /* @@ -209,8 +249,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) - tmp &= (unsigned long) FPC_VALID_MASK - << (BITS_PER_LONG - 32); + tmp <<= BITS_PER_LONG - 32; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -291,11 +330,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* * psw and gprs are stored on the stack */ - if (addr == (addr_t) &dummy->regs.psw.mask && - ((data & ~PSW_MASK_USER) != psw_user_bits || - ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) - /* Invalid psw mask. */ - return -EINVAL; + if (addr == (addr_t) &dummy->regs.psw.mask) { + unsigned long mask = PSW_MASK_USER; + + mask |= is_ri_task(child) ? PSW_MASK_RI : 0; + if ((data ^ PSW_USER_BITS) & ~mask) + /* Invalid psw mask. */ + return -EINVAL; + if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) + /* Invalid address-space-control bits */ + return -EINVAL; + if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) + /* Invalid addressing mode bits */ + return -EINVAL; + } *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { @@ -333,10 +381,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* * floating point regs. are stored in the thread structure */ - if (addr == (addr_t) &dummy->regs.fp_regs.fpc && - (data & ~((unsigned long) FPC_VALID_MASK - << (BITS_PER_LONG - 32))) != 0) - return -EINVAL; + if (addr == (addr_t) &dummy->regs.fp_regs.fpc) + if ((unsigned int) data != 0 || + test_fp_ctl(data >> (BITS_PER_LONG - 32))) + return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; @@ -416,6 +464,36 @@ long arch_ptrace(struct task_struct *child, long request, put_user(task_thread_info(child)->last_break, (unsigned long __user *) data); return 0; + case PTRACE_ENABLE_TE: + if (!MACHINE_HAS_TE) + return -EIO; + child->thread.per_flags &= ~PER_FLAG_NO_TE; + return 0; + case PTRACE_DISABLE_TE: + if (!MACHINE_HAS_TE) + return -EIO; + child->thread.per_flags |= PER_FLAG_NO_TE; + child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; + return 0; + case PTRACE_TE_ABORT_RAND: + if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) + return -EIO; + switch (data) { + case 0UL: + child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; + break; + case 1UL: + child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; + child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; + break; + case 2UL: + child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; + child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; + break; + default: + return -EINVAL; + } + return 0; default: /* Removing high order bit from addr (only for 31 bit). */ addr &= PSW_ADDR_INSN; @@ -497,7 +575,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Fake a 31 bit psw mask. */ tmp = (__u32)(regs->psw.mask >> 32); - tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); + tmp &= PSW32_MASK_USER | PSW32_MASK_RI; + tmp |= PSW32_USER_BITS; } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Fake a 31 bit psw address. */ tmp = (__u32) regs->psw.addr | @@ -594,13 +673,19 @@ static int __poke_user_compat(struct task_struct *child, * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { + __u32 mask = PSW32_MASK_USER; + + mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ - if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) + if ((tmp ^ PSW32_USER_BITS) & ~mask) /* Invalid psw mask. */ return -EINVAL; + if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) + /* Invalid address-space-control bits */ + return -EINVAL; regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (regs->psw.mask & PSW_MASK_BA) | - (__u64)(tmp & PSW32_MASK_USER) << 32; + (__u64)(tmp & mask) << 32; } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; @@ -636,8 +721,7 @@ static int __poke_user_compat(struct task_struct *child, * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && - (tmp & ~FPC_VALID_MASK) != 0) - /* Invalid floating point control. */ + test_fp_ctl(tmp)) return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; @@ -719,7 +803,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing(regs->gprs[2]); + if (secure_computing(regs->gprs[2])) { + /* seccomp failures shouldn't expose any additional code. */ + ret = -1; + goto out; + } /* * The sysc_tracesys code in entry.S stored the system @@ -733,7 +821,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) * debugger stored an invalid system call number. Skip * the system call and the system call restart handling. */ - clear_thread_flag(TIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL); ret = -1; } @@ -745,6 +833,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); +out: return ret ?: regs->gprs[2]; } @@ -830,8 +919,10 @@ static int s390_fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - if (target == current) - save_fp_regs(&target->thread.fp_regs); + if (target == current) { + save_fp_ctl(&target->thread.fp_regs.fpc); + save_fp_regs(target->thread.fp_regs.fprs); + } return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fp_regs, 0, -1); @@ -844,19 +935,21 @@ static int s390_fpregs_set(struct task_struct *target, { int rc = 0; - if (target == current) - save_fp_regs(&target->thread.fp_regs); + if (target == current) { + save_fp_ctl(&target->thread.fp_regs.fpc); + save_fp_regs(target->thread.fp_regs.fprs); + } /* If setting FPC, must validate it first. */ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { - u32 fpc[2] = { target->thread.fp_regs.fpc, 0 }; - rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc, + u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 }; + rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; - if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0) + if (ufpc[1] != 0 || test_fp_ctl(ufpc[0])) return -EINVAL; - target->thread.fp_regs.fpc = fpc[0]; + target->thread.fp_regs.fpc = ufpc[0]; } if (rc == 0 && count > 0) @@ -864,8 +957,10 @@ static int s390_fpregs_set(struct task_struct *target, target->thread.fp_regs.fprs, offsetof(s390_fp_regs, fprs), -1); - if (rc == 0 && target == current) - restore_fp_regs(&target->thread.fp_regs); + if (rc == 0 && target == current) { + restore_fp_ctl(&target->thread.fp_regs.fpc); + restore_fp_regs(target->thread.fp_regs.fprs); + } return rc; } @@ -898,6 +993,28 @@ static int s390_last_break_set(struct task_struct *target, return 0; } +static int s390_tdb_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + unsigned char *data; + + if (!(regs->int_code & 0x200)) + return -ENODATA; + data = target->thread.trap_tdb; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256); +} + +static int s390_tdb_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return 0; +} + #endif static int s390_system_call_get(struct task_struct *target, @@ -946,6 +1063,14 @@ static const struct user_regset s390_regsets[] = { .get = s390_last_break_get, .set = s390_last_break_set, }, + [REGSET_TDB] = { + .core_note_type = NT_S390_TDB, + .n = 1, + .size = 256, + .align = 1, + .get = s390_tdb_get, + .set = s390_tdb_set, + }, #endif [REGSET_SYSTEM_CALL] = { .core_note_type = NT_S390_SYSTEM_CALL, @@ -1143,6 +1268,14 @@ static const struct user_regset s390_compat_regsets[] = { .get = s390_compat_last_break_get, .set = s390_compat_last_break_set, }, + [REGSET_TDB] = { + .core_note_type = NT_S390_TDB, + .n = 1, + .size = 256, + .align = 1, + .get = s390_tdb_get, + .set = s390_tdb_set, + }, [REGSET_SYSTEM_CALL] = { .core_note_type = NT_S390_SYSTEM_CALL, .n = 1, @@ -1196,7 +1329,7 @@ int regs_query_register_offset(const char *name) if (!name || *name != 'r') return -EINVAL; - if (strict_strtoul(name + 1, 10, &offset)) + if (kstrtoul(name + 1, 10, &offset)) return -EINVAL; if (offset >= NUM_GPRS) return -EINVAL; diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index ad67c214be0..dd8016b0477 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -1,13 +1,12 @@ /* - * arch/s390/kernel/reipl.S - * * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2000 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) */ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> # # store_status: Empty implementation until kdump is supported on 31 bit @@ -60,7 +59,7 @@ ENTRY(do_reipl_asm) bas %r14,.Ldisab-.Lpg0(%r13) .L003: st %r1,__LC_SUBCHANNEL_ID lpsw 0 - sigp 0,0,0(6) + sigp 0,0,SIGP_RESTART .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) lpsw .Ldispsw-.Lpg0(%r13) .align 8 diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 36b32658fb2..dc3b1273c4d 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,11 +1,12 @@ /* - * Copyright IBM Corp 2000,2011 + * Copyright IBM Corp 2000, 2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ #include <linux/linkage.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> # # store_status @@ -106,7 +107,7 @@ ENTRY(do_reipl_asm) .L003: st %r1,__LC_SUBCHANNEL_ID lhi %r1,0 # mode 0 = esa slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,0x12 # switch to esa mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode lpsw 0 .Ldisab: sll %r14,1 srl %r14,1 # need to kill hi bit to avoid specification exceptions. diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index c91d70aede9..f4e6f20e117 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/relocate_kernel.S - * - * (C) Copyright IBM Corp. 2005 + * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -9,6 +7,7 @@ */ #include <linux/linkage.h> +#include <asm/sigp.h> /* * moves the new kernel to its destination... @@ -93,7 +92,7 @@ ENTRY(relocate_kernel) .no_diag308: sr %r1,%r1 # clear %r1 sr %r2,%r2 # clear %r2 - sigp %r1,%r2,0x12 # set cpuid to zero + sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero lpsw 0 # hopefully start new kernel... .align 8 diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S index 7c3ce589a7f..cfac28330b0 100644 --- a/arch/s390/kernel/relocate_kernel64.S +++ b/arch/s390/kernel/relocate_kernel64.S @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/relocate_kernel64.S - * - * (C) Copyright IBM Corp. 2005 + * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> @@ -9,6 +7,7 @@ */ #include <linux/linkage.h> +#include <asm/sigp.h> /* * moves the new kernel to its destination... @@ -45,7 +44,7 @@ ENTRY(relocate_kernel) diag %r0,%r0,0x308 .back: lhi %r1,1 # mode 1 = esame - sigp %r1,%r0,0x12 # switch to esame mode + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode sam64 # switch to 64 bit addressing mode basr %r13,0 .back_base: @@ -96,7 +95,7 @@ ENTRY(relocate_kernel) sam31 # 31 bit mode sr %r1,%r1 # erase register r1 sr %r2,%r2 # erase register r2 - sigp %r1,%r2,0x12 # set cpuid to zero + sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero lpsw 0 # hopefully start new kernel... .align 8 diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c new file mode 100644 index 00000000000..26b4ae96fdd --- /dev/null +++ b/arch/s390/kernel/runtime_instr.c @@ -0,0 +1,149 @@ +/* + * Copyright IBM Corp. 2012 + * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/syscalls.h> +#include <linux/signal.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/kernel_stat.h> +#include <asm/runtime_instr.h> +#include <asm/cpu_mf.h> +#include <asm/irq.h> + +/* empty control block to disable RI by loading it */ +struct runtime_instr_cb runtime_instr_empty_cb; + +static int runtime_instr_avail(void) +{ + return test_facility(64); +} + +static void disable_runtime_instr(void) +{ + struct pt_regs *regs = task_pt_regs(current); + + load_runtime_instr_cb(&runtime_instr_empty_cb); + + /* + * Make sure the RI bit is deleted from the PSW. If the user did not + * switch off RI before the system call the process will get a + * specification exception otherwise. + */ + regs->psw.mask &= ~PSW_MASK_RI; +} + +static void init_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + cb->buf_limit = 0xfff; + cb->int_requested = 1; + cb->pstate = 1; + cb->pstate_set_buf = 1; + cb->pstate_sample = 1; + cb->pstate_collect = 1; + cb->key = PAGE_DEFAULT_KEY; + cb->valid = 1; +} + +void exit_thread_runtime_instr(void) +{ + struct task_struct *task = current; + + if (!task->thread.ri_cb) + return; + disable_runtime_instr(); + kfree(task->thread.ri_cb); + task->thread.ri_signum = 0; + task->thread.ri_cb = NULL; +} + +static void runtime_instr_int_handler(struct ext_code ext_code, + unsigned int param32, unsigned long param64) +{ + struct siginfo info; + + if (!(param32 & CPU_MF_INT_RI_MASK)) + return; + + inc_irq_stat(IRQEXT_CMR); + + if (!current->thread.ri_cb) + return; + if (current->thread.ri_signum < SIGRTMIN || + current->thread.ri_signum > SIGRTMAX) { + WARN_ON_ONCE(1); + return; + } + + memset(&info, 0, sizeof(info)); + info.si_signo = current->thread.ri_signum; + info.si_code = SI_QUEUE; + if (param32 & CPU_MF_INT_RI_BUF_FULL) + info.si_int = ENOBUFS; + else if (param32 & CPU_MF_INT_RI_HALTED) + info.si_int = ECANCELED; + else + return; /* unknown reason */ + + send_sig_info(current->thread.ri_signum, &info, current); +} + +SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) +{ + struct runtime_instr_cb *cb; + + if (!runtime_instr_avail()) + return -EOPNOTSUPP; + + if (command == S390_RUNTIME_INSTR_STOP) { + preempt_disable(); + exit_thread_runtime_instr(); + preempt_enable(); + return 0; + } + + if (command != S390_RUNTIME_INSTR_START || + (signum < SIGRTMIN || signum > SIGRTMAX)) + return -EINVAL; + + if (!current->thread.ri_cb) { + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; + } else { + cb = current->thread.ri_cb; + memset(cb, 0, sizeof(*cb)); + } + + init_runtime_instr_cb(cb); + current->thread.ri_signum = signum; + + /* now load the control block to make it available */ + preempt_disable(); + current->thread.ri_cb = cb; + load_runtime_instr_cb(cb); + preempt_enable(); + return 0; +} + +static int __init runtime_instr_init(void) +{ + int rc; + + if (!runtime_instr_avail()) + return 0; + + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, + runtime_instr_int_handler); + if (rc) + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + else + pr_info("Runtime instrumentation facility initialized\n"); + return rc; +} +device_initcall(runtime_instr_init); diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 57b536649b0..9f60467938d 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -5,6 +5,9 @@ #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +#if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); +EXPORT_SYMBOL(sie_exit); #endif +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 95792d846bb..a41f2c99dcc 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -1,7 +1,7 @@ /* * Mini SCLP driver. * - * Copyright IBM Corp. 2004,2009 + * Copyright IBM Corp. 2004, 2009 * * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>, @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> +#include <asm/irq.h> LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit @@ -44,6 +45,12 @@ _sclp_wait_int: #endif mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) mvc 0(16,%r8),0(%r9) +#ifdef CONFIG_64BIT + epsw %r6,%r7 # set current addressing mode + nill %r6,0x1 # in new psw (31 or 64 bit mode) + nilh %r7,0x8000 + stm %r6,%r7,0(%r8) +#endif lhi %r6,0x0200 # cr mask for ext int (cr0.54) ltr %r2,%r2 jz .LsetctS1 @@ -67,9 +74,9 @@ _sclp_wait_int: lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt .LwaitS1: lh %r7,LC_EXT_INT_CODE - chi %r7,0x1004 # timeout? + chi %r7,EXT_IRQ_CLK_COMP # timeout? je .LtimeoutS1 - chi %r7,0x2401 # service int? + chi %r7,EXT_IRQ_SERVICE_SIG # service int? jne .LloopS1 sr %r2,%r2 l %r3,LC_EXT_INT_PARAM @@ -87,7 +94,7 @@ _sclp_wait_int: .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int #ifdef CONFIG_64BIT .LextpswS1_64: - .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit + .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit #endif .LwaitpswS1: .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int @@ -219,7 +226,7 @@ _sclp_print: ahi %r2,1 ltr %r0,%r0 # end of string? jz .LfinalizemtoS4 - chi %r0,0x15 # end of line (NL)? + chi %r0,0x0a # end of line (NL)? jz .LfinalizemtoS4 stc %r0,0(%r6,%r7) # copy to mto la %r11,0(%r6,%r7) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 354de0763ef..1e2264b46e4 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/setup.c - * * S390 version - * Copyright (C) IBM Corp. 1999,2010 + * Copyright IBM Corp. 1999, 2012 * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * @@ -18,7 +16,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/memblock.h> @@ -46,10 +44,10 @@ #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/memory.h> +#include <linux/compat.h> #include <asm/ipl.h> -#include <asm/uaccess.h> -#include <asm/system.h> +#include <asm/facility.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/cpcmd.h> @@ -59,21 +57,11 @@ #include <asm/ptrace.h> #include <asm/sections.h> #include <asm/ebcdic.h> -#include <asm/compat.h> #include <asm/kvm_virtio.h> #include <asm/diag.h> - -long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | - PSW_MASK_EA | PSW_MASK_BA; -long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | - PSW_MASK_PSTATE | PSW_ASC_HOME; - -/* - * User copy operations. - */ -struct uaccess_ops uaccess; -EXPORT_SYMBOL(uaccess); +#include <asm/os_info.h> +#include <asm/sclp.h> +#include "entry.h" /* * Machine setup.. @@ -90,10 +78,9 @@ EXPORT_SYMBOL(console_irq); unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; -struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; - int __initdata memory_end_set; unsigned long __initdata memory_end; +unsigned long __initdata max_physmem_end; unsigned long VMALLOC_START; EXPORT_SYMBOL(VMALLOC_START); @@ -104,6 +91,11 @@ EXPORT_SYMBOL(VMALLOC_END); struct page *vmemmap; EXPORT_SYMBOL(vmemmap); +#ifdef CONFIG_64BIT +unsigned long MODULES_VADDR; +unsigned long MODULES_END; +#endif + /* An array with a pointer to the lowcore of every CPU. */ struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); @@ -136,9 +128,14 @@ __setup("condev=", condev_setup); static void __init set_preferred_console(void) { - if (MACHINE_IS_KVM) - add_preferred_console("hvc", 0, NULL); - else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) + if (MACHINE_IS_KVM) { + if (sclp_has_vt220()) + add_preferred_console("ttyS", 1, NULL); + else if (sclp_has_linemode()) + add_preferred_console("ttyS", 0, NULL); + else + add_preferred_console("hvc", 0, NULL); + } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) add_preferred_console("ttyS", 0, NULL); else if (CONSOLE_IS_3270) add_preferred_console("tty3270", 0, NULL); @@ -214,27 +211,19 @@ static void __init conmode_default(void) } } -#ifdef CONFIG_ZFCPDUMP -static void __init setup_zfcpdump(unsigned int console_devno) +#ifdef CONFIG_CRASH_DUMP +static void __init setup_zfcpdump(void) { - static char str[41]; - if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; if (OLDMEM_BASE) return; - if (console_devno != -1) - sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", - ipl_info.data.fcp.dev_id.devno, console_devno); - else - sprintf(str, " cio_ignore=all,!0.0.%04x", - ipl_info.data.fcp.dev_id.devno); - strcat(boot_command_line, str); + strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev"); console_loglevel = 2; } #else -static inline void setup_zfcpdump(unsigned int console_devno) {} -#endif /* CONFIG_ZFCPDUMP */ +static inline void setup_zfcpdump(void) {} +#endif /* CONFIG_CRASH_DUMP */ /* * Reboot, halt and power_off stubs. They just call _machine_restart, @@ -278,10 +267,12 @@ void machine_power_off(void) * Dummy power off function. */ void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL_GPL(pm_power_off); static int __init early_parse_mem(char *p) { memory_end = memparse(p, &p); + memory_end &= PAGE_MASK; memory_end_set = 1; return 0; } @@ -296,63 +287,9 @@ static int __init parse_vmalloc(char *arg) } early_param("vmalloc", parse_vmalloc); -unsigned int user_mode = HOME_SPACE_MODE; -EXPORT_SYMBOL_GPL(user_mode); - -static int set_amode_primary(void) -{ - psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; - psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; -#ifdef CONFIG_COMPAT - psw32_user_bits = - (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; -#endif - - if (MACHINE_HAS_MVCOS) { - memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); - return 1; - } else { - memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); - return 0; - } -} - -/* - * Switch kernel/user addressing modes? - */ -static int __init early_parse_switch_amode(char *p) -{ - user_mode = PRIMARY_SPACE_MODE; - return 0; -} -early_param("switch_amode", early_parse_switch_amode); - -static int __init early_parse_user_mode(char *p) -{ - if (p && strcmp(p, "primary") == 0) - user_mode = PRIMARY_SPACE_MODE; - else if (!p || strcmp(p, "home") == 0) - user_mode = HOME_SPACE_MODE; - else - return 1; - return 0; -} -early_param("user_mode", early_parse_user_mode); - -static void setup_addressing_mode(void) -{ - if (user_mode == PRIMARY_SPACE_MODE) { - if (set_amode_primary()) - pr_info("Address spaces switched, " - "mvcos available\n"); - else - pr_info("Address spaces switched, " - "mvcos not available\n"); - } -} +void *restart_stack __attribute__((__section__(".data"))); -static void __init -setup_lowcore(void) +static void __init setup_lowcore(void) { struct _lowcore *lc; @@ -361,32 +298,35 @@ setup_lowcore(void) */ BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); - lc->restart_psw.mask = psw_kernel_bits; + lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; - lc->external_new_psw.mask = psw_kernel_bits | + PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; - lc->svc_new_psw.mask = psw_kernel_bits | + lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; - lc->program_new_psw.mask = psw_kernel_bits | + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; - lc->mcck_new_psw.mask = psw_kernel_bits; + lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = psw_kernel_bits | + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; - lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; + lc->kernel_stack = ((unsigned long) &init_thread_union) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->async_stack = (unsigned long) - __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; + __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->panic_stack = (unsigned long) - __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; + __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; @@ -412,6 +352,31 @@ setup_lowcore(void) lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; lc->ftrace_func = S390_lowcore.ftrace_func; + + restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); + restart_stack += ASYNC_SIZE; + + /* + * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant + * restart data to the absolute zero lowcore. This is necessary if + * PSW restart is done on an offline CPU that has lowcore zero. + */ + lc->restart_stack = (unsigned long) restart_stack; + lc->restart_fn = (unsigned long) do_restart; + lc->restart_data = 0; + lc->restart_source = -1UL; + + /* Setup absolute zero lowcore */ + mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); + mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); + mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); + mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); + mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); + +#ifdef CONFIG_SMP + lc->spinlock_lockval = arch_spin_lockval(0); +#endif + set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } @@ -440,7 +405,8 @@ static struct resource __initdata *standard_resources[] = { static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; - int i, j; + struct memblock_region *reg; + int j; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; @@ -449,28 +415,13 @@ static void __init setup_resources(void) bss_resource.start = (unsigned long) &__bss_start; bss_resource.end = (unsigned long) &__bss_stop - 1; - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (!memory_chunk[i].size) - continue; - if (memory_chunk[i].type == CHUNK_OLDMEM || - memory_chunk[i].type == CHUNK_CRASHK) - continue; + for_each_memblock(memory, reg) { res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; - switch (memory_chunk[i].type) { - case CHUNK_READ_WRITE: - case CHUNK_CRASHK: - res->name = "System RAM"; - break; - case CHUNK_READ_ONLY: - res->name = "System ROM"; - res->flags |= IORESOURCE_READONLY; - break; - default: - res->name = "reserved"; - } - res->start = memory_chunk[i].addr; - res->end = res->start + memory_chunk[i].size - 1; + + res->name = "System RAM"; + res->start = reg->base; + res->end = reg->base + reg->size - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { @@ -491,212 +442,56 @@ static void __init setup_resources(void) } } -unsigned long real_memory_size; -EXPORT_SYMBOL_GPL(real_memory_size); - static void __init setup_memory_end(void) { unsigned long vmax, vmalloc_size, tmp; - int i; - - -#ifdef CONFIG_ZFCPDUMP - if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { - memory_end = ZFCPDUMP_HSA_SIZE; - memory_end_set = 1; - } -#endif - real_memory_size = 0; - memory_end &= PAGE_MASK; - - /* - * Make sure all chunks are MAX_ORDER aligned so we don't need the - * extra checks that HOLES_IN_ZONE would require. - */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - unsigned long start, end; - struct mem_chunk *chunk; - unsigned long align; - - chunk = &memory_chunk[i]; - align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); - start = (chunk->addr + align - 1) & ~(align - 1); - end = (chunk->addr + chunk->size) & ~(align - 1); - if (start >= end) - memset(chunk, 0, sizeof(*chunk)); - else { - chunk->addr = start; - chunk->size = end - start; - } - real_memory_size = max(real_memory_size, - chunk->addr + chunk->size); - } /* Choose kernel address space layout: 2, 3, or 4 levels. */ #ifdef CONFIG_64BIT - vmalloc_size = VMALLOC_END ?: 128UL << 30; - tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; + vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; + tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; if (tmp <= (1UL << 42)) vmax = 1UL << 42; /* 3-level kernel page table */ else vmax = 1UL << 53; /* 4-level kernel page table */ + /* module area is at the end of the kernel address space. */ + MODULES_END = vmax; + MODULES_VADDR = MODULES_END - MODULES_LEN; + VMALLOC_END = MODULES_VADDR; #else vmalloc_size = VMALLOC_END ?: 96UL << 20; vmax = 1UL << 31; /* 2-level kernel page table */ -#endif /* vmalloc area is at the end of the kernel address space. */ VMALLOC_END = vmax; +#endif VMALLOC_START = vmax - vmalloc_size; /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); vmemmap = (struct page *) tmp; /* Take care that memory_end is set and <= vmemmap */ - memory_end = min(memory_end ?: real_memory_size, tmp); + memory_end = min(memory_end ?: max_physmem_end, tmp); + max_pfn = max_low_pfn = PFN_DOWN(memory_end); + memblock_remove(memory_end, ULONG_MAX); - /* Fixup memory chunk array to fit into 0..memory_end */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - struct mem_chunk *chunk = &memory_chunk[i]; - - if (chunk->addr >= memory_end) { - memset(chunk, 0, sizeof(*chunk)); - continue; - } - if (chunk->addr + chunk->size > memory_end) - chunk->size = memory_end - chunk->addr; - } -} - -void *restart_stack __attribute__((__section__(".data"))); - -/* - * Setup new PSW and allocate stack for PSW restart interrupt - */ -static void __init setup_restart_psw(void) -{ - psw_t psw; - - restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); - restart_stack += ASYNC_SIZE; - - /* - * Setup restart PSW for absolute zero lowcore. This is necesary - * if PSW restart is done on an offline CPU that has lowcore zero - */ - psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; - psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; - copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); + pr_notice("Max memory size: %luMB\n", memory_end >> 20); } static void __init setup_vmcoreinfo(void) { -#ifdef CONFIG_KEXEC - unsigned long ptr = paddr_vmcoreinfo_note(); - - copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); -#endif + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } #ifdef CONFIG_CRASH_DUMP /* - * Find suitable location for crashkernel memory - */ -static unsigned long __init find_crash_base(unsigned long crash_size, - char **msg) -{ - unsigned long crash_base; - struct mem_chunk *chunk; - int i; - - if (memory_chunk[0].size < crash_size) { - *msg = "first memory chunk must be at least crashkernel size"; - return 0; - } - if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) - return OLDMEM_BASE; - - for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { - chunk = &memory_chunk[i]; - if (chunk->size == 0) - continue; - if (chunk->type != CHUNK_READ_WRITE) - continue; - if (chunk->size < crash_size) - continue; - crash_base = (chunk->addr + chunk->size) - crash_size; - if (crash_base < crash_size) - continue; - if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) - continue; - if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) - continue; - return crash_base; - } - *msg = "no suitable area found"; - return 0; -} - -/* - * Check if crash_base and crash_size is valid - */ -static int __init verify_crash_base(unsigned long crash_base, - unsigned long crash_size, - char **msg) -{ - struct mem_chunk *chunk; - int i; - - /* - * Because we do the swap to zero, we must have at least 'crash_size' - * bytes free space before crash_base - */ - if (crash_size > crash_base) { - *msg = "crashkernel offset must be greater than size"; - return -EINVAL; - } - - /* First memory chunk must be at least crash_size */ - if (memory_chunk[0].size < crash_size) { - *msg = "first memory chunk must be at least crashkernel size"; - return -EINVAL; - } - /* Check if we fit into the respective memory chunk */ - for (i = 0; i < MEMORY_CHUNKS; i++) { - chunk = &memory_chunk[i]; - if (chunk->size == 0) - continue; - if (crash_base < chunk->addr) - continue; - if (crash_base >= chunk->addr + chunk->size) - continue; - /* we have found the memory chunk */ - if (crash_base + crash_size > chunk->addr + chunk->size) { - *msg = "selected memory chunk is too small for " - "crashkernel memory"; - return -EINVAL; - } - return 0; - } - *msg = "invalid memory range specified"; - return -EINVAL; -} - -/* - * Reserve kdump memory by creating a memory hole in the mem_chunk array - */ -static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, - int type) -{ - create_mem_hole(memory_chunk, addr, size, type); -} - -/* * When kdump is enabled, we have to ensure that no memory from * the area [0 - crashkernel memory size] and * [crashk_res.start - crashk_res.end] is set offline. @@ -722,21 +517,44 @@ static struct notifier_block kdump_mem_nb = { #endif /* + * Make sure that the area behind memory_end is protected + */ +static void reserve_memory_end(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (ipl_info.type == IPL_TYPE_FCP_DUMP && + !OLDMEM_BASE && sclp_get_hsa_size()) { + memory_end = sclp_get_hsa_size(); + memory_end &= PAGE_MASK; + memory_end_set = 1; + } +#endif + if (!memory_end_set) + return; + memblock_reserve(memory_end, ULONG_MAX); +} + +/* * Make sure that oldmem, where the dump is stored, is protected */ static void reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP - if (!OLDMEM_BASE) - return; + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); +#endif +} - reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); - reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, - CHUNK_OLDMEM); - if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) - saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; - else - saved_max_pfn = PFN_DOWN(real_memory_size) - 1; +/* + * Make sure that oldmem, where the dump is stored, is protected + */ +static void remove_oldmem(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); #endif } @@ -747,170 +565,132 @@ static void __init reserve_crashkernel(void) { #ifdef CONFIG_CRASH_DUMP unsigned long long crash_base, crash_size; - char *msg; + phys_addr_t low, high; int rc; rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, &crash_base); - if (rc || crash_size == 0) - return; + crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); - if (register_memory_notifier(&kdump_mem_nb)) + if (rc || crash_size == 0) return; - if (!crash_base) - crash_base = find_crash_base(crash_size, &msg); - if (!crash_base) { - pr_info("crashkernel reservation failed: %s\n", msg); - unregister_memory_notifier(&kdump_mem_nb); + + if (memblock.memory.regions[0].size < crash_size) { + pr_info("crashkernel reservation failed: %s\n", + "first memory chunk must be at least crashkernel size"); return; } - if (verify_crash_base(crash_base, crash_size, &msg)) { - pr_info("crashkernel reservation failed: %s\n", msg); - unregister_memory_notifier(&kdump_mem_nb); + + low = crash_base ?: OLDMEM_BASE; + high = low + crash_size; + if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { + /* The crashkernel fits into OLDMEM, reuse OLDMEM */ + crash_base = low; + } else { + /* Find suitable area in free memory */ + low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); + high = crash_base ? crash_base + crash_size : ULONG_MAX; + + if (crash_base && crash_base < low) { + pr_info("crashkernel reservation failed: %s\n", + "crash_base too low"); + return; + } + low = crash_base ?: low; + crash_base = memblock_find_in_range(low, high, crash_size, + KEXEC_CRASH_MEM_ALIGN); + } + + if (!crash_base) { + pr_info("crashkernel reservation failed: %s\n", + "no suitable area found"); return; } + + if (register_memory_notifier(&kdump_mem_nb)) + return; + if (!OLDMEM_BASE && MACHINE_IS_VM) diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); - reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); + memblock_remove(crash_base, crash_size); pr_info("Reserving %lluMB of memory at %lluMB " "for crashkernel (System RAM: %luMB)\n", - crash_size >> 20, crash_base >> 20, memory_end >> 20); + crash_size >> 20, crash_base >> 20, + (unsigned long)memblock.memory.total_size >> 20); + os_info_crashkernel_add(crash_base, crash_size); #endif } -static void __init -setup_memory(void) +/* + * Reserve the initrd from being used by memblock + */ +static void __init reserve_initrd(void) { - unsigned long bootmap_size; - unsigned long start_pfn, end_pfn; - int i; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = INITRD_START; + initrd_end = initrd_start + INITRD_SIZE; + memblock_reserve(INITRD_START, INITRD_SIZE); +#endif +} - /* - * partially used pages are not usable - thus - * we are rounding upwards: - */ +/* + * Check for initrd being in usable memory + */ +static void __init check_initrd(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + if (INITRD_START && INITRD_SIZE && + !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { + pr_err("initrd does not fit memory.\n"); + memblock_free(INITRD_START, INITRD_SIZE); + initrd_start = initrd_end = 0; + } +#endif +} + +/* + * Reserve all kernel text + */ +static void __init reserve_kernel(void) +{ + unsigned long start_pfn; start_pfn = PFN_UP(__pa(&_end)); - end_pfn = max_pfn = PFN_DOWN(memory_end); -#ifdef CONFIG_BLK_DEV_INITRD /* - * Move the initrd in case the bitmap of the bootmem allocater - * would overwrite it. + * Reserve memory used for lowcore/command line/kernel image. */ + memblock_reserve(0, (unsigned long)_ehead); + memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) + - (unsigned long)_stext); +} - if (INITRD_START && INITRD_SIZE) { - unsigned long bmap_size; - unsigned long start; - - bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); - bmap_size = PFN_PHYS(bmap_size); - - if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { - start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; - +static void __init reserve_elfcorehdr(void) +{ #ifdef CONFIG_CRASH_DUMP - if (OLDMEM_BASE) { - /* Move initrd behind kdump oldmem */ - if (start + INITRD_SIZE > OLDMEM_BASE && - start < OLDMEM_BASE + OLDMEM_SIZE) - start = OLDMEM_BASE + OLDMEM_SIZE; - } -#endif - if (start + INITRD_SIZE > memory_end) { - pr_err("initrd extends beyond end of " - "memory (0x%08lx > 0x%08lx) " - "disabling initrd\n", - start + INITRD_SIZE, memory_end); - INITRD_START = INITRD_SIZE = 0; - } else { - pr_info("Moving initrd (0x%08lx -> " - "0x%08lx, size: %ld)\n", - INITRD_START, start, INITRD_SIZE); - memmove((void *) start, (void *) INITRD_START, - INITRD_SIZE); - INITRD_START = start; - } - } - } + if (is_kdump_kernel()) + memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, + PAGE_ALIGN(elfcorehdr_size)); #endif +} - /* - * Initialize the boot-time allocator - */ - bootmap_size = init_bootmem(start_pfn, end_pfn); +static void __init setup_memory(void) +{ + struct memblock_region *reg; /* - * Register RAM areas with the bootmem allocator. + * Init storage key for present memory */ - - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - unsigned long start_chunk, end_chunk, pfn; - - if (memory_chunk[i].type != CHUNK_READ_WRITE && - memory_chunk[i].type != CHUNK_CRASHK) - continue; - start_chunk = PFN_DOWN(memory_chunk[i].addr); - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); - end_chunk = min(end_chunk, end_pfn); - if (start_chunk >= end_chunk) - continue; - memblock_add_node(PFN_PHYS(start_chunk), - PFN_PHYS(end_chunk - start_chunk), 0); - pfn = max(start_chunk, start_pfn); - for (; pfn < end_chunk; pfn++) - page_set_storage_key(PFN_PHYS(pfn), - PAGE_DEFAULT_KEY, 0); + for_each_memblock(memory, reg) { + storage_key_init_range(reg->base, reg->base + reg->size); } - psw_set_key(PAGE_DEFAULT_KEY); - free_bootmem_with_active_regions(0, max_pfn); - - /* - * Reserve memory used for lowcore/command line/kernel image. - */ - reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); - reserve_bootmem((unsigned long)_stext, - PFN_PHYS(start_pfn) - (unsigned long)_stext, - BOOTMEM_DEFAULT); - /* - * Reserve the bootmem bitmap itself as well. We do this in two - * steps (first step was init_bootmem()) because this catches - * the (very unlikely) case of us accidentally initializing the - * bootmem allocator with an invalid RAM area. - */ - reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, - BOOTMEM_DEFAULT); - -#ifdef CONFIG_CRASH_DUMP - if (crashk_res.start) - reserve_bootmem(crashk_res.start, - crashk_res.end - crashk_res.start + 1, - BOOTMEM_DEFAULT); - if (is_kdump_kernel()) - reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, - PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); -#endif -#ifdef CONFIG_BLK_DEV_INITRD - if (INITRD_START && INITRD_SIZE) { - if (INITRD_START + INITRD_SIZE <= memory_end) { - reserve_bootmem(INITRD_START, INITRD_SIZE, - BOOTMEM_DEFAULT); - initrd_start = INITRD_START; - initrd_end = initrd_start + INITRD_SIZE; - } else { - pr_err("initrd extends beyond end of " - "memory (0x%08lx > 0x%08lx) " - "disabling initrd\n", - initrd_start + INITRD_SIZE, memory_end); - initrd_start = initrd_end = 0; - } - } -#endif + /* Only cosmetics */ + memblock_enforce_memory_limit(memblock_end_of_DRAM()); } /* @@ -969,12 +749,20 @@ static void __init setup_hwcaps(void) if (MACHINE_HAS_HPAGE) elf_hwcap |= HWCAP_S390_HPAGE; +#if defined(CONFIG_64BIT) /* * 64-bit register support for 31-bit processes * HWCAP_S390_HIGH_GPRS is bit 9. */ elf_hwcap |= HWCAP_S390_HIGH_GPRS; + /* + * Transactional execution support HWCAP_S390_TE is bit 10. + */ + if (test_facility(50) && test_facility(73)) + elf_hwcap |= HWCAP_S390_TE; +#endif + get_cpu_id(&cpu_id); switch (cpu_id.machine) { case 0x9672: @@ -1006,6 +794,10 @@ static void __init setup_hwcaps(void) case 0x2818: strcpy(elf_platform, "z196"); break; + case 0x2827: + case 0x2828: + strcpy(elf_platform, "zEC12"); + break; } } @@ -1014,8 +806,7 @@ static void __init setup_hwcaps(void) * was printed. */ -void __init -setup_arch(char **cmdline_p) +void __init setup_arch(char **cmdline_p) { /* * print what head.S has found out about the machine @@ -1048,29 +839,47 @@ setup_arch(char **cmdline_p) ROOT_DEV = Root_RAM0; + /* Is init_mm really needed? */ init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; - if (MACHINE_HAS_MVCOS) - memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); - else - memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); - parse_early_param(); - + os_info_init(); setup_ipl(); - setup_memory_end(); - setup_addressing_mode(); + + /* Do some memory reservations *before* memory is added to memblock */ + reserve_memory_end(); reserve_oldmem(); - reserve_crashkernel(); + reserve_kernel(); + reserve_initrd(); + reserve_elfcorehdr(); + memblock_allow_resize(); + + /* Get information about *all* installed memory */ + detect_memory_memblock(); + + remove_oldmem(); + + /* + * Make sure all chunks are MAX_ORDER aligned so we don't need the + * extra checks that HOLES_IN_ZONE would require. + * + * Is this still required? + */ + memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); + + setup_memory_end(); setup_memory(); + + check_initrd(); + reserve_crashkernel(); + setup_resources(); setup_vmcoreinfo(); - setup_restart_psw(); setup_lowcore(); - + smp_fill_possible_mask(); cpu_init(); s390_init_cpu_topology(); @@ -1089,5 +898,37 @@ setup_arch(char **cmdline_p) set_preferred_console(); /* Setup zfcpdump support */ - setup_zfcpdump(console_devno); + setup_zfcpdump(); } + +#ifdef CONFIG_32BIT +static int no_removal_warning __initdata; + +static int __init parse_no_removal_warning(char *str) +{ + no_removal_warning = 1; + return 0; +} +__setup("no_removal_warning", parse_no_removal_warning); + +static int __init removal_warning(void) +{ + if (no_removal_warning) + return 0; + printk(KERN_ALERT "\n\n"); + printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); + printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); + printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); + printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); + printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); + printk(KERN_CONT "please let us know. Please write to:\n"); + printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); + printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); + printk(KERN_CONT "Thank you!\n\n"); + printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); + printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); + schedule_timeout_uninterruptible(300 * HZ); + return 0; +} +early_initcall(removal_warning); +#endif diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index a8ba840294f..42b49f9e19b 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -1,7 +1,5 @@ /* - * arch/s390/kernel/signal.c - * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999, 2006 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * * Based on Intel version @@ -30,12 +28,9 @@ #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/lowcore.h> -#include <asm/compat.h> +#include <asm/switch_to.h> #include "entry.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - - typedef struct { __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; @@ -53,61 +48,6 @@ typedef struct struct ucontext uc; } rt_sigframe; -/* - * Atomically swap in the new signal mask, and wait for a signal. - */ -SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) -{ - sigset_t blocked; - - current->saved_sigmask = current->blocked; - mask &= _BLOCKABLE; - siginitset(&blocked, mask); - set_current_blocked(&blocked); - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - set_restore_sigmask(); - return -ERESTARTNOHAND; -} - -SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act, - struct old_sigaction __user *, oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - if (act) { - old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss, - stack_t __user *, uoss) -{ - struct pt_regs *regs = task_pt_regs(current); - return do_sigaltstack(uss, uoss, regs->gprs[15]); -} - /* Returns non-zero on fault. */ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { @@ -117,52 +57,63 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) /* Copy a 'clean' PSW mask to the user to avoid leaking information about whether PER is currently on. */ - user_sregs.regs.psw.mask = psw_user_bits | - (regs->psw.mask & PSW_MASK_USER); + user_sregs.regs.psw.mask = PSW_USER_BITS | + (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); user_sregs.regs.psw.addr = regs->psw.addr; memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs, - sizeof(sregs->regs.acrs)); + sizeof(user_sregs.regs.acrs)); /* * We have to store the fp registers to current->thread.fp_regs * to merge them with the emulated registers. */ - save_fp_regs(¤t->thread.fp_regs); + save_fp_ctl(¤t->thread.fp_regs.fpc); + save_fp_regs(current->thread.fp_regs.fprs); memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, - sizeof(s390_fp_regs)); - return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs)); + sizeof(user_sregs.fpregs)); + if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) + return -EFAULT; + return 0; } -/* Returns positive number on error */ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { - int err; _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; - err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); - if (err) - return err; - /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | - (user_sregs.regs.psw.mask & PSW_MASK_USER); + if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs))) + return -EFAULT; + + if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) + return -EINVAL; + + /* Loading the floating-point-control word can fail. Do that first. */ + if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + return -EINVAL; + + /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | + (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) + regs->psw.mask = PSW_ASC_PRIMARY | + (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; regs->psw.addr = user_sregs.regs.psw.addr; memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, - sizeof(sregs->regs.acrs)); + sizeof(current->thread.acrs)); restore_access_regs(current->thread.acrs); memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, - sizeof(s390_fp_regs)); - current->thread.fp_regs.fpc &= FPC_VALID_MASK; + sizeof(current->thread.fp_regs)); - restore_fp_regs(¤t->thread.fp_regs); - clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ + restore_fp_regs(current->thread.fp_regs.fprs); + clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; } @@ -172,11 +123,8 @@ SYSCALL_DEFINE0(sigreturn) sigframe __user *frame = (sigframe __user *)regs->gprs[15]; sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs(regs, &frame->sregs)) goto badframe; @@ -192,16 +140,12 @@ SYSCALL_DEFINE0(rt_sigreturn) rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, - regs->gprs[15]) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->gprs[2]; badframe: @@ -235,13 +179,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) sp = current->sas_ss_sp + current->sas_ss_size; } - /* This is the legacy signal stack switching. */ - else if (!user_mode(regs) && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) { - sp = (unsigned long) ka->sa.sa_restorer; - } - return (void __user *)((sp - frame_size) & -8ul); } @@ -261,8 +198,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigframe __user *frame; frame = get_sigframe(ka, regs, sizeof(sigframe)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) - goto give_sigsegv; if (frame == (void __user *) -1UL) goto give_sigsegv; @@ -294,7 +229,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (PSW_USER_BITS & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -327,8 +265,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, rt_sigframe __user *frame; frame = get_sigframe(ka, regs, sizeof(rt_sigframe)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) - goto give_sigsegv; if (frame == (void __user *) -1UL) goto give_sigsegv; @@ -339,10 +275,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); - err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->gprs[15]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]); err |= save_sigregs(regs, &frame->uc.uc_mcontext); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) @@ -367,7 +300,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (PSW_USER_BITS & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -381,11 +317,10 @@ give_sigsegv: return -EFAULT; } -static int handle_signal(unsigned long sig, struct k_sigaction *ka, +static void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { - sigset_t blocked; int ret; /* Set up the stack frame */ @@ -394,12 +329,9 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); if (ret) - return ret; - sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(&blocked, sig); - set_current_blocked(&blocked); - return 0; + return; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLE_STEP)); } /* @@ -416,21 +348,7 @@ void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; - - /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. - */ - if (!user_mode(regs)) - return; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; + sigset_t *oldset = sigmask_to_save(); /* * Get signal to deliver. When running under ptrace, at this point @@ -438,7 +356,7 @@ void do_signal(struct pt_regs *regs) * call information. */ current_thread_info()->system_call = - test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; + test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { @@ -466,31 +384,17 @@ void do_signal(struct pt_regs *regs) } } /* No longer in a system call */ - clear_thread_flag(TIF_SYSCALL); - - if ((is_compat_task() ? - handle_signal32(signr, &ka, &info, oldset, regs) : - handle_signal(signr, &ka, &info, oldset, regs)) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - /* - * Let tracing know that we've done the handler setup. - */ - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLE_STEP)); - } + clear_pt_regs_flag(regs, PIF_SYSCALL); + + if (is_compat_task()) + handle_signal32(signr, &ka, &info, oldset, regs); + else + handle_signal(signr, &ka, &info, oldset, regs); return; } /* No handlers present - check for system call restart */ - clear_thread_flag(TIF_SYSCALL); + clear_pt_regs_flag(regs, PIF_SYSCALL); if (current_thread_info()->system_call) { regs->int_code = current_thread_info()->system_call; switch (regs->gprs[2]) { @@ -503,7 +407,9 @@ void do_signal(struct pt_regs *regs) case -ERESTARTNOINTR: /* Restart system call with magic TIF bit. */ regs->gprs[2] = regs->orig_gpr2; - set_thread_flag(TIF_SYSCALL); + set_pt_regs_flag(regs, PIF_SYSCALL); + if (test_thread_flag(TIF_SINGLE_STEP)) + clear_pt_regs_flag(regs, PIF_PER_TRAP); break; } } @@ -511,16 +417,11 @@ void do_signal(struct pt_regs *regs) /* * If there's no signal to deliver, we just put the saved sigmask back. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - if (current->replacement_session_keyring) - key_replace_session_keyring(); } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2398ce6b15a..243c7e51260 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,23 +1,18 @@ /* - * arch/s390/kernel/smp.c + * SMP related functions * - * Copyright IBM Corp. 1999, 2009 - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Copyright IBM Corp. 1999, 2012 + * Author(s): Denis Joseph Barrow, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, * * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1998 Ingo Molnar * - * We work with logical cpu numbering everywhere we can. The only - * functions using the real cpu address (got from STAP) are the sigp - * functions. For all other functions we use the identity mapping. - * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is - * used e.g. to find the idle task belonging to a logical cpu. Every array - * in the kernel is sorted by the logical cpu number and not by the physical - * one which is causing all the confusion with __cpu_logical_map and - * cpu_number_map in other architectures. + * The code outside of smp.c uses logical cpu numbers, only smp.c does + * the translation of logical to physical cpu ids. All new code that + * operates on physical cpu numbers needs to go into smp.c. */ #define KMSG_COMPONENT "cpu" @@ -31,176 +26,380 @@ #include <linux/spinlock.h> #include <linux/kernel_stat.h> #include <linux/delay.h> -#include <linux/cache.h> #include <linux/interrupt.h> #include <linux/irqflags.h> #include <linux/cpu.h> -#include <linux/timex.h> -#include <linux/bootmem.h> #include <linux/slab.h> #include <linux/crash_dump.h> #include <asm/asm-offsets.h> +#include <asm/switch_to.h> +#include <asm/facility.h> #include <asm/ipl.h> #include <asm/setup.h> -#include <asm/sigp.h> -#include <asm/pgalloc.h> #include <asm/irq.h> -#include <asm/cpcmd.h> #include <asm/tlbflush.h> -#include <asm/timer.h> +#include <asm/vtimer.h> #include <asm/lowcore.h> #include <asm/sclp.h> -#include <asm/cputime.h> #include <asm/vdso.h> -#include <asm/cpu.h> +#include <asm/debug.h> +#include <asm/os_info.h> +#include <asm/sigp.h> #include "entry.h" -/* logical cpu to cpu address */ -unsigned short __cpu_logical_map[NR_CPUS]; - -static struct task_struct *current_set[NR_CPUS]; - -static u8 smp_cpu_type; -static int smp_use_sigp_detection; +enum { + ec_schedule = 0, + ec_call_function_single, + ec_stop_cpu, +}; -enum s390_cpu_state { +enum { CPU_STATE_STANDBY, CPU_STATE_CONFIGURED, }; +struct pcpu { + struct cpu *cpu; + struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ + unsigned long async_stack; /* async stack for the cpu */ + unsigned long panic_stack; /* panic stack for the cpu */ + unsigned long ec_mask; /* bit mask for ec_xxx functions */ + int state; /* physical cpu state */ + int polarization; /* physical polarization */ + u16 address; /* physical cpu address */ +}; + +static u8 boot_cpu_type; +static u16 boot_cpu_address; +static struct pcpu pcpu_devices[NR_CPUS]; + +/* + * The smp_cpu_state_mutex must be held when changing the state or polarization + * member of a pcpu data structure within the pcpu_devices arreay. + */ DEFINE_MUTEX(smp_cpu_state_mutex); -static int smp_cpu_state[NR_CPUS]; -static DEFINE_PER_CPU(struct cpu, cpu_devices); +/* + * Signal processor helper functions. + */ +static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) +{ + int cc; -static void smp_ext_bitcall(int, int); + while (1) { + cc = __pcpu_sigp(addr, order, parm, NULL); + if (cc != SIGP_CC_BUSY) + return cc; + cpu_relax(); + } +} -static int raw_cpu_stopped(int cpu) +static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) { - u32 status; + int cc, retry; - switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { - case sigp_status_stored: - /* Check for stopped and check stop state */ - if (status & 0x50) - return 1; - break; - default: - break; + for (retry = 0; ; retry++) { + cc = __pcpu_sigp(pcpu->address, order, parm, NULL); + if (cc != SIGP_CC_BUSY) + break; + if (retry >= 3) + udelay(10); } - return 0; + return cc; } -static inline int cpu_stopped(int cpu) +static inline int pcpu_stopped(struct pcpu *pcpu) { - return raw_cpu_stopped(cpu_logical_map(cpu)); + u32 uninitialized_var(status); + + if (__pcpu_sigp(pcpu->address, SIGP_SENSE, + 0, &status) != SIGP_CC_STATUS_STORED) + return 0; + return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); +} + +static inline int pcpu_running(struct pcpu *pcpu) +{ + if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, + 0, NULL) != SIGP_CC_STATUS_STORED) + return 1; + /* Status stored condition code is equivalent to cpu not running. */ + return 0; } /* - * Ensure that PSW restart is done on an online CPU + * Find struct pcpu by cpu address. */ -void smp_restart_with_online_cpu(void) +static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) { int cpu; - for_each_online_cpu(cpu) { - if (stap() == __cpu_logical_map[cpu]) { - /* We are online: Enable DAT again and return */ - __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); - return; - } + for_each_cpu(cpu, mask) + if (pcpu_devices[cpu].address == address) + return pcpu_devices + cpu; + return NULL; +} + +static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) +{ + int order; + + if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) + return; + order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; + pcpu_sigp_retry(pcpu, order, 0); +} + +static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) +{ + struct _lowcore *lc; + + if (pcpu != &pcpu_devices[0]) { + pcpu->lowcore = (struct _lowcore *) + __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); + pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + pcpu->panic_stack = __get_free_page(GFP_KERNEL); + if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) + goto out; } - /* We are not online: Do PSW restart on an online CPU */ - while (sigp(cpu, sigp_restart) == sigp_busy) - cpu_relax(); - /* And stop ourself */ - while (raw_sigp(stap(), sigp_stop) == sigp_busy) - cpu_relax(); - for (;;); + lc = pcpu->lowcore; + memcpy(lc, &S390_lowcore, 512); + memset((char *) lc + 512, 0, sizeof(*lc) - 512); + lc->async_stack = pcpu->async_stack + ASYNC_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->panic_stack = pcpu->panic_stack + PAGE_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->cpu_nr = cpu; + lc->spinlock_lockval = arch_spin_lockval(cpu); +#ifndef CONFIG_64BIT + if (MACHINE_HAS_IEEE) { + lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); + if (!lc->extended_save_area_addr) + goto out; + } +#else + if (vdso_alloc_per_cpu(lc)) + goto out; +#endif + lowcore_ptr[cpu] = lc; + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); + return 0; +out: + if (pcpu != &pcpu_devices[0]) { + free_page(pcpu->panic_stack); + free_pages(pcpu->async_stack, ASYNC_ORDER); + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); + } + return -ENOMEM; } -void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) +#ifdef CONFIG_HOTPLUG_CPU + +static void pcpu_free_lowcore(struct pcpu *pcpu) { - struct _lowcore *lc, *current_lc; - struct stack_frame *sf; - struct pt_regs *regs; - unsigned long sp; - - if (smp_processor_id() == 0) - func(data); - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | - PSW_MASK_EA | PSW_MASK_BA); - /* Disable lowcore protection */ - __ctl_clear_bit(0, 28); - current_lc = lowcore_ptr[smp_processor_id()]; - lc = lowcore_ptr[0]; - if (!lc) - lc = current_lc; - lc->restart_psw.mask = - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; - lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; - if (!cpu_online(0)) - smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); - while (sigp(0, sigp_stop_and_store_status) == sigp_busy) - cpu_relax(); - sp = lc->panic_stack; - sp -= sizeof(struct pt_regs); - regs = (struct pt_regs *) sp; - memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); - regs->psw = current_lc->psw_save_area; - sp -= STACK_FRAME_OVERHEAD; - sf = (struct stack_frame *) sp; - sf->back_chain = 0; - smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); + lowcore_ptr[pcpu - pcpu_devices] = NULL; +#ifndef CONFIG_64BIT + if (MACHINE_HAS_IEEE) { + struct _lowcore *lc = pcpu->lowcore; + + free_page((unsigned long) lc->extended_save_area_addr); + lc->extended_save_area_addr = 0; + } +#else + vdso_free_per_cpu(pcpu->lowcore); +#endif + if (pcpu != &pcpu_devices[0]) { + free_page(pcpu->panic_stack); + free_pages(pcpu->async_stack, ASYNC_ORDER); + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); + } +} + +#endif /* CONFIG_HOTPLUG_CPU */ + +static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) +{ + struct _lowcore *lc = pcpu->lowcore; + + if (MACHINE_HAS_TLB_LC) + cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); + cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); + atomic_inc(&init_mm.context.attach_count); + lc->cpu_nr = cpu; + lc->spinlock_lockval = arch_spin_lockval(cpu); + lc->percpu_offset = __per_cpu_offset[cpu]; + lc->kernel_asce = S390_lowcore.kernel_asce; + lc->machine_flags = S390_lowcore.machine_flags; + lc->ftrace_func = S390_lowcore.ftrace_func; + lc->user_timer = lc->system_timer = lc->steal_timer = 0; + __ctl_store(lc->cregs_save_area, 0, 15); + save_access_regs((unsigned int *) lc->access_regs_save_area); + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, + MAX_FACILITY_BIT/8); +} + +static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) +{ + struct _lowcore *lc = pcpu->lowcore; + struct thread_info *ti = task_thread_info(tsk); + + lc->kernel_stack = (unsigned long) task_stack_page(tsk) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->thread_info = (unsigned long) task_thread_info(tsk); + lc->current_task = (unsigned long) tsk; + lc->user_timer = ti->user_timer; + lc->system_timer = ti->system_timer; + lc->steal_timer = 0; +} + +static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) +{ + struct _lowcore *lc = pcpu->lowcore; + + lc->restart_stack = lc->kernel_stack; + lc->restart_fn = (unsigned long) func; + lc->restart_data = (unsigned long) data; + lc->restart_source = -1UL; + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); +} + +/* + * Call function via PSW restart on pcpu and stop the current cpu. + */ +static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), + void *data, unsigned long stack) +{ + struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; + unsigned long source_cpu = stap(); + + __load_psw_mask(PSW_KERNEL_BITS); + if (pcpu->address == source_cpu) + func(data); /* should not return */ + /* Stop target cpu (if func returns this stops the current cpu). */ + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); + /* Restart func on the target cpu and stop the current cpu. */ + mem_assign_absolute(lc->restart_stack, stack); + mem_assign_absolute(lc->restart_fn, (unsigned long) func); + mem_assign_absolute(lc->restart_data, (unsigned long) data); + mem_assign_absolute(lc->restart_source, source_cpu); + asm volatile( + "0: sigp 0,%0,%2 # sigp restart to target cpu\n" + " brc 2,0b # busy, try again\n" + "1: sigp 0,%1,%3 # sigp stop to current cpu\n" + " brc 2,1b # busy, try again\n" + : : "d" (pcpu->address), "d" (source_cpu), + "K" (SIGP_RESTART), "K" (SIGP_STOP) + : "0", "1", "cc"); + for (;;) ; +} + +/* + * Call function on an online CPU. + */ +void smp_call_online_cpu(void (*func)(void *), void *data) +{ + struct pcpu *pcpu; + + /* Use the current cpu if it is online. */ + pcpu = pcpu_find_address(cpu_online_mask, stap()); + if (!pcpu) + /* Use the first online cpu. */ + pcpu = pcpu_devices + cpumask_first(cpu_online_mask); + pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); +} + +/* + * Call function on the ipl CPU. + */ +void smp_call_ipl_cpu(void (*func)(void *), void *data) +{ + pcpu_delegate(&pcpu_devices[0], func, data, + pcpu_devices->panic_stack + PAGE_SIZE); +} + +int smp_find_processor_id(u16 address) +{ + int cpu; + + for_each_present_cpu(cpu) + if (pcpu_devices[cpu].address == address) + return cpu; + return -1; +} + +int smp_vcpu_scheduled(int cpu) +{ + return pcpu_running(pcpu_devices + cpu); +} + +void smp_yield(void) +{ + if (MACHINE_HAS_DIAG44) + asm volatile("diag 0,0,0x44"); +} + +void smp_yield_cpu(int cpu) +{ + if (MACHINE_HAS_DIAG9C) + asm volatile("diag %0,0,0x9c" + : : "d" (pcpu_devices[cpu].address)); + else if (MACHINE_HAS_DIAG44) + asm volatile("diag 0,0,0x44"); } -static void smp_stop_cpu(void) +/* + * Send cpus emergency shutdown signal. This gives the cpus the + * opportunity to complete outstanding interrupts. + */ +static void smp_emergency_stop(cpumask_t *cpumask) { - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) + u64 end; + int cpu; + + end = get_tod_clock() + (1000000UL << 12); + for_each_cpu(cpu, cpumask) { + struct pcpu *pcpu = pcpu_devices + cpu; + set_bit(ec_stop_cpu, &pcpu->ec_mask); + while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, + 0, NULL) == SIGP_CC_BUSY && + get_tod_clock() < end) + cpu_relax(); + } + while (get_tod_clock() < end) { + for_each_cpu(cpu, cpumask) + if (pcpu_stopped(pcpu_devices + cpu)) + cpumask_clear_cpu(cpu, cpumask); + if (cpumask_empty(cpumask)) + break; cpu_relax(); + } } +/* + * Stop all cpus but the current one. + */ void smp_send_stop(void) { cpumask_t cpumask; int cpu; - u64 end; /* Disable all interrupts/machine checks */ - __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); trace_hardirqs_off(); + debug_set_critical(); cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); - if (oops_in_progress) { - /* - * Give the other cpus the opportunity to complete - * outstanding interrupts before stopping them. - */ - end = get_clock() + (1000000UL << 12); - for_each_cpu(cpu, &cpumask) { - set_bit(ec_stop_cpu, (unsigned long *) - &lowcore_ptr[cpu]->ext_call_fast); - while (sigp(cpu, sigp_emergency_signal) == sigp_busy && - get_clock() < end) - cpu_relax(); - } - while (get_clock() < end) { - for_each_cpu(cpu, &cpumask) - if (cpu_stopped(cpu)) - cpumask_clear_cpu(cpu, &cpumask); - if (cpumask_empty(&cpumask)) - break; - cpu_relax(); - } - } + if (oops_in_progress) + smp_emergency_stop(&cpumask); /* stop all processors */ for_each_cpu(cpu, &cpumask) { - while (sigp(cpu, sigp_stop) == sigp_busy) - cpu_relax(); - while (!cpu_stopped(cpu)) + struct pcpu *pcpu = pcpu_devices + cpu; + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); + while (!pcpu_stopped(pcpu)) cpu_relax(); } } @@ -209,54 +408,25 @@ void smp_send_stop(void) * This is the main routine where commands issued by other * cpus are handled. */ - -static void do_ext_call_interrupt(unsigned int ext_int_code, - unsigned int param32, unsigned long param64) +static void smp_handle_ext_call(void) { unsigned long bits; - if ((ext_int_code & 0xffff) == 0x1202) - kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; - else - kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; - /* - * handle bit signal external calls - */ - bits = xchg(&S390_lowcore.ext_call_fast, 0); - + /* handle bit signal external calls */ + bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); if (test_bit(ec_stop_cpu, &bits)) smp_stop_cpu(); - if (test_bit(ec_schedule, &bits)) scheduler_ipi(); - - if (test_bit(ec_call_function, &bits)) - generic_smp_call_function_interrupt(); - if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); - } -/* - * Send an external call sigp to another cpu and return without waiting - * for its completion. - */ -static void smp_ext_bitcall(int cpu, int sig) +static void do_ext_call_interrupt(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { - int order; - - /* - * Set signaling bit in lowcore of target cpu and kick it - */ - set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while (1) { - order = smp_vcpu_scheduled(cpu) ? - sigp_external_call : sigp_emergency_signal; - if (sigp(cpu, order) != sigp_busy) - break; - udelay(10); - } + inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); + smp_handle_ext_call(); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -264,12 +434,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) int cpu; for_each_cpu(cpu, mask) - smp_ext_bitcall(cpu, ec_call_function); + pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); } void arch_send_call_function_single_ipi(int cpu) { - smp_ext_bitcall(cpu, ec_call_function_single); + pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); } #ifndef CONFIG_64BIT @@ -295,15 +465,16 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + pcpu_ec_call(pcpu_devices + cpu, ec_schedule); } /* * parameter area for the set/clear control bit callbacks */ struct ec_creg_mask_parms { - unsigned long orvals[16]; - unsigned long andvals[16]; + unsigned long orval; + unsigned long andval; + int cr; }; /* @@ -313,11 +484,9 @@ static void smp_ctl_bit_callback(void *info) { struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; - int i; __ctl_store(cregs, 0, 15); - for (i = 0; i <= 15; i++) - cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; + cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; __ctl_load(cregs, 0, 15); } @@ -326,11 +495,8 @@ static void smp_ctl_bit_callback(void *info) */ void smp_ctl_set_bit(int cr, int bit) { - struct ec_creg_mask_parms parms; + struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; - memset(&parms.orvals, 0, sizeof(parms.orvals)); - memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.orvals[cr] = 1UL << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_set_bit); @@ -340,505 +506,291 @@ EXPORT_SYMBOL(smp_ctl_set_bit); */ void smp_ctl_clear_bit(int cr, int bit) { - struct ec_creg_mask_parms parms; + struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; - memset(&parms.orvals, 0, sizeof(parms.orvals)); - memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.andvals[cr] = ~(1UL << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_clear_bit); -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) +#ifdef CONFIG_CRASH_DUMP -static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) +static void __init smp_get_save_area(int cpu, u16 address) { - if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) - return; + void *lc = pcpu_devices[0].lowcore; + struct save_area *save_area; + if (is_kdump_kernel()) return; - if (cpu >= NR_CPUS) { - pr_warning("CPU %i exceeds the maximum %i and is excluded from " - "the dump\n", cpu, NR_CPUS - 1); + if (!OLDMEM_BASE && (address == boot_cpu_address || + ipl_info.type != IPL_TYPE_FCP_DUMP)) + return; + save_area = dump_save_area_create(cpu); + if (!save_area) + panic("could not allocate memory for save area\n"); + if (address == boot_cpu_address) { + /* Copy the registers of the boot cpu. */ + copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), + SAVE_AREA_BASE - PAGE_SIZE, 0); return; } - zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); - while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) - cpu_relax(); - memcpy_real(zfcpdump_save_areas[cpu], - (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, - sizeof(struct save_area)); + /* Get the registers of a non-boot cpu. */ + __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); + memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); } -struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; -EXPORT_SYMBOL_GPL(zfcpdump_save_areas); +int smp_store_status(int cpu) +{ + struct pcpu *pcpu; -#else + pcpu = pcpu_devices + cpu; + if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, + 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) + return -EIO; + return 0; +} -static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } +#else /* CONFIG_CRASH_DUMP */ -#endif /* CONFIG_ZFCPDUMP */ +static inline void smp_get_save_area(int cpu, u16 address) { } -static int cpu_known(int cpu_id) -{ - int cpu; +#endif /* CONFIG_CRASH_DUMP */ - for_each_present_cpu(cpu) { - if (__cpu_logical_map[cpu] == cpu_id) - return 1; - } - return 0; +void smp_cpu_set_polarization(int cpu, int val) +{ + pcpu_devices[cpu].polarization = val; } -static int smp_rescan_cpus_sigp(cpumask_t avail) +int smp_cpu_get_polarization(int cpu) { - int cpu_id, logical_cpu; - - logical_cpu = cpumask_first(&avail); - if (logical_cpu >= nr_cpu_ids) - return 0; - for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { - if (cpu_known(cpu_id)) - continue; - __cpu_logical_map[logical_cpu] = cpu_id; - cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); - if (!cpu_stopped(logical_cpu)) - continue; - set_cpu_present(logical_cpu, true); - smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = cpumask_next(logical_cpu, &avail); - if (logical_cpu >= nr_cpu_ids) - break; - } - return 0; + return pcpu_devices[cpu].polarization; } -static int smp_rescan_cpus_sclp(cpumask_t avail) +static struct sclp_cpu_info *smp_get_cpu_info(void) { + static int use_sigp_detection; struct sclp_cpu_info *info; - int cpu_id, logical_cpu, cpu; - int rc; - - logical_cpu = cpumask_first(&avail); - if (logical_cpu >= nr_cpu_ids) - return 0; - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - rc = sclp_get_cpu_info(info); - if (rc) - goto out; - for (cpu = 0; cpu < info->combined; cpu++) { - if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) - continue; - cpu_id = info->cpu[cpu].address; - if (cpu_known(cpu_id)) - continue; - __cpu_logical_map[logical_cpu] = cpu_id; - cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); - set_cpu_present(logical_cpu, true); - if (cpu >= info->configured) - smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; - else - smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = cpumask_next(logical_cpu, &avail); - if (logical_cpu >= nr_cpu_ids) - break; + int address; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { + use_sigp_detection = 1; + for (address = 0; address <= MAX_CPU_ADDRESS; address++) { + if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) + continue; + info->cpu[info->configured].address = address; + info->configured++; + } + info->combined = info->configured; } -out: - kfree(info); - return rc; + return info; } -static int __smp_rescan_cpus(void) +static int smp_add_present_cpu(int cpu); + +static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) { + struct pcpu *pcpu; cpumask_t avail; + int cpu, nr, i; + nr = 0; cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); - if (smp_use_sigp_detection) - return smp_rescan_cpus_sigp(avail); - else - return smp_rescan_cpus_sclp(avail); + cpu = cpumask_first(&avail); + for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { + if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) + continue; + if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) + continue; + pcpu = pcpu_devices + cpu; + pcpu->address = info->cpu[i].address; + pcpu->state = (i >= info->configured) ? + CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (sysfs_add && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpu = cpumask_next(cpu, &avail); + } + return nr; } static void __init smp_detect_cpus(void) { unsigned int cpu, c_cpus, s_cpus; struct sclp_cpu_info *info; - u16 boot_cpu_addr, cpu_addr; - c_cpus = 1; - s_cpus = 0; - boot_cpu_addr = __cpu_logical_map[0]; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = smp_get_cpu_info(); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); -#ifdef CONFIG_CRASH_DUMP - if (OLDMEM_BASE && !is_kdump_kernel()) { - struct save_area *save_area; - - save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); - if (!save_area) - panic("could not allocate memory for save area\n"); - copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), - 0x200, 0); - zfcpdump_save_areas[0] = save_area; - } -#endif - /* Use sigp detection algorithm if sclp doesn't work. */ - if (sclp_get_cpu_info(info)) { - smp_use_sigp_detection = 1; - for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { - if (cpu == boot_cpu_addr) - continue; - if (!raw_cpu_stopped(cpu)) - continue; - smp_get_save_area(c_cpus, cpu); - c_cpus++; - } - goto out; - } - if (info->has_cpu_type) { for (cpu = 0; cpu < info->combined; cpu++) { - if (info->cpu[cpu].address == boot_cpu_addr) { - smp_cpu_type = info->cpu[cpu].type; - break; - } + if (info->cpu[cpu].address != boot_cpu_address) + continue; + /* The boot cpu dictates the cpu type. */ + boot_cpu_type = info->cpu[cpu].type; + break; } } - + c_cpus = s_cpus = 0; for (cpu = 0; cpu < info->combined; cpu++) { - if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) + if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) continue; - cpu_addr = info->cpu[cpu].address; - if (cpu_addr == boot_cpu_addr) - continue; - if (!raw_cpu_stopped(cpu_addr)) { + if (cpu < info->configured) { + smp_get_save_area(c_cpus, info->cpu[cpu].address); + c_cpus++; + } else s_cpus++; - continue; - } - smp_get_save_area(c_cpus, cpu_addr); - c_cpus++; } -out: - kfree(info); pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); get_online_cpus(); - __smp_rescan_cpus(); + __smp_rescan_cpus(info, 0); put_online_cpus(); + kfree(info); } /* * Activate a secondary processor. */ -int __cpuinit start_secondary(void *cpuvoid) +static void smp_start_secondary(void *cpuvoid) { + S390_lowcore.last_update_clock = get_tod_clock(); + S390_lowcore.restart_stack = (unsigned long) restart_stack; + S390_lowcore.restart_fn = (unsigned long) do_restart; + S390_lowcore.restart_data = 0; + S390_lowcore.restart_source = -1UL; + restore_access_regs(S390_lowcore.access_regs_save_area); + __ctl_load(S390_lowcore.cregs_save_area, 0, 15); + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); cpu_init(); preempt_disable(); init_cpu_timer(); init_cpu_vtimer(); pfault_init(); - notify_cpu_starting(smp_processor_id()); - ipi_call_lock(); set_cpu_online(smp_processor_id(), true); - ipi_call_unlock(); - __ctl_clear_bit(0, 28); /* Disable lowcore protection */ - S390_lowcore.restart_psw.mask = - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; - S390_lowcore.restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; - __ctl_set_bit(0, 28); /* Enable lowcore protection */ - /* - * Wait until the cpu which brought this one up marked it - * active before enabling interrupts. - */ - while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) - cpu_relax(); + inc_irq_stat(CPU_RST); local_irq_enable(); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; -} - -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit smp_fork_idle(struct work_struct *work) -{ - struct create_idle *c_idle; - - c_idle = container_of(work, struct create_idle, work); - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - -static int __cpuinit smp_alloc_lowcore(int cpu) -{ - unsigned long async_stack, panic_stack; - struct _lowcore *lowcore; - - lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - if (!lowcore) - return -ENOMEM; - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - panic_stack = __get_free_page(GFP_KERNEL); - if (!panic_stack || !async_stack) - goto out; - memcpy(lowcore, &S390_lowcore, 512); - memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); - lowcore->async_stack = async_stack + ASYNC_SIZE; - lowcore->panic_stack = panic_stack + PAGE_SIZE; - lowcore->restart_psw.mask = - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; - lowcore->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; - if (user_mode != HOME_SPACE_MODE) - lowcore->restart_psw.mask |= PSW_ASC_HOME; -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) { - unsigned long save_area; - - save_area = get_zeroed_page(GFP_KERNEL); - if (!save_area) - goto out; - lowcore->extended_save_area_addr = (u32) save_area; - } -#else - if (vdso_alloc_per_cpu(cpu, lowcore)) - goto out; -#endif - lowcore_ptr[cpu] = lowcore; - return 0; - -out: - free_page(panic_stack); - free_pages(async_stack, ASYNC_ORDER); - free_pages((unsigned long) lowcore, LC_ORDER); - return -ENOMEM; -} - -static void smp_free_lowcore(int cpu) -{ - struct _lowcore *lowcore; - - lowcore = lowcore_ptr[cpu]; -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) - free_page((unsigned long) lowcore->extended_save_area_addr); -#else - vdso_free_per_cpu(cpu, lowcore); -#endif - free_page(lowcore->panic_stack - PAGE_SIZE); - free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); - free_pages((unsigned long) lowcore, LC_ORDER); - lowcore_ptr[cpu] = NULL; + cpu_startup_entry(CPUHP_ONLINE); } /* Upping and downing of CPUs */ -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct _lowcore *cpu_lowcore; - struct create_idle c_idle; - struct task_struct *idle; - struct stack_frame *sf; - u32 lowcore; - int ccode; - - if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) - return -EIO; - idle = current_set[cpu]; - if (!idle) { - c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); - INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); - c_idle.cpu = cpu; - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - if (IS_ERR(c_idle.idle)) - return PTR_ERR(c_idle.idle); - idle = c_idle.idle; - current_set[cpu] = c_idle.idle; - } - init_idle(idle, cpu); - if (smp_alloc_lowcore(cpu)) - return -ENOMEM; - do { - ccode = sigp(cpu, sigp_initial_cpu_reset); - if (ccode == sigp_busy) - udelay(10); - if (ccode == sigp_not_operational) - goto err_out; - } while (ccode == sigp_busy); - - lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; - while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) - udelay(10); - - cpu_lowcore = lowcore_ptr[cpu]; - cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + THREAD_SIZE; - cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); - sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - - sizeof(struct pt_regs) - - sizeof(struct stack_frame)); - memset(sf, 0, sizeof(struct stack_frame)); - sf->gprs[9] = (unsigned long) sf; - cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf; - __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); - atomic_inc(&init_mm.context.attach_count); - asm volatile( - " stam 0,15,0(%0)" - : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); - cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_nr = cpu; - cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; - cpu_lowcore->machine_flags = S390_lowcore.machine_flags; - cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; - memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); - eieio(); + struct pcpu *pcpu; + int rc; - while (sigp(cpu, sigp_restart) == sigp_busy) - udelay(10); + pcpu = pcpu_devices + cpu; + if (pcpu->state != CPU_STATE_CONFIGURED) + return -EIO; + if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + SIGP_CC_ORDER_CODE_ACCEPTED) + return -EIO; + rc = pcpu_alloc_lowcore(pcpu, cpu); + if (rc) + return rc; + pcpu_prepare_secondary(pcpu, cpu); + pcpu_attach_task(pcpu, tidle); + pcpu_start_fn(pcpu, smp_start_secondary, NULL); while (!cpu_online(cpu)) cpu_relax(); return 0; - -err_out: - smp_free_lowcore(cpu); - return -EIO; } -static int __init setup_possible_cpus(char *s) -{ - int pcpus, cpu; +static unsigned int setup_possible_cpus __initdata; - pcpus = simple_strtoul(s, NULL, 0); - init_cpu_possible(cpumask_of(0)); - for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) - set_cpu_possible(cpu, true); +static int __init _setup_possible_cpus(char *s) +{ + get_option(&s, &setup_possible_cpus); return 0; } -early_param("possible_cpus", setup_possible_cpus); +early_param("possible_cpus", _setup_possible_cpus); #ifdef CONFIG_HOTPLUG_CPU int __cpu_disable(void) { - struct ec_creg_mask_parms cr_parms; - int cpu = smp_processor_id(); - - set_cpu_online(cpu, false); + unsigned long cregs[16]; - /* Disable pfault pseudo page faults on this cpu. */ + /* Handle possible pending IPIs */ + smp_handle_ext_call(); + set_cpu_online(smp_processor_id(), false); + /* Disable pseudo page faults on this cpu. */ pfault_fini(); - - memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); - memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); - - /* disable all external interrupts */ - cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | - 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 | - 1 << 4); - /* disable all I/O interrupts */ - cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | - 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); - /* disable most machine checks */ - cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | - 1 << 25 | 1 << 24); - - smp_ctl_bit_callback(&cr_parms); - + /* Disable interrupt sources via control register. */ + __ctl_store(cregs, 0, 15); + cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ + cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ + cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ + __ctl_load(cregs, 0, 15); return 0; } void __cpu_die(unsigned int cpu) { + struct pcpu *pcpu; + /* Wait until target cpu is down */ - while (!cpu_stopped(cpu)) + pcpu = pcpu_devices + cpu; + while (!pcpu_stopped(pcpu)) cpu_relax(); - while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) - udelay(10); - smp_free_lowcore(cpu); + pcpu_free_lowcore(pcpu); atomic_dec(&init_mm.context.attach_count); + cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); + if (MACHINE_HAS_TLB_LC) + cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); } void __noreturn cpu_die(void) { idle_task_exit(); - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) - cpu_relax(); - for (;;); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); + for (;;) ; } #endif /* CONFIG_HOTPLUG_CPU */ -void __init smp_prepare_cpus(unsigned int max_cpus) +void __init smp_fill_possible_mask(void) { -#ifndef CONFIG_64BIT - unsigned long save_area = 0; -#endif - unsigned long async_stack, panic_stack; - struct _lowcore *lowcore; + unsigned int possible, sclp, cpu; - smp_detect_cpus(); + sclp = sclp_get_max_cpu() ?: nr_cpu_ids; + possible = setup_possible_cpus ?: nr_cpu_ids; + possible = min(possible, sclp); + for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) + set_cpu_possible(cpu, true); +} +void __init smp_prepare_cpus(unsigned int max_cpus) +{ /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) panic("Couldn't request external interrupt 0x1201"); /* request the 0x1202 external call external interrupt */ - if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) + if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) panic("Couldn't request external interrupt 0x1202"); - - /* Reallocate current lowcore, but keep its contents. */ - lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - panic_stack = __get_free_page(GFP_KERNEL); - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - BUG_ON(!lowcore || !panic_stack || !async_stack); -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) - save_area = get_zeroed_page(GFP_KERNEL); -#endif - local_irq_disable(); - local_mcck_disable(); - lowcore_ptr[smp_processor_id()] = lowcore; - *lowcore = S390_lowcore; - lowcore->panic_stack = panic_stack + PAGE_SIZE; - lowcore->async_stack = async_stack + ASYNC_SIZE; -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) - lowcore->extended_save_area_addr = (u32) save_area; -#endif - set_prefix((u32)(unsigned long) lowcore); - local_mcck_enable(); - local_irq_enable(); -#ifdef CONFIG_64BIT - if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) - BUG(); -#endif + smp_detect_cpus(); } void __init smp_prepare_boot_cpu(void) { - BUG_ON(smp_processor_id() != 0); - - current_thread_info()->cpu = 0; + struct pcpu *pcpu = pcpu_devices; + + boot_cpu_address = stap(); + pcpu->state = CPU_STATE_CONFIGURED; + pcpu->address = boot_cpu_address; + pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); + pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + S390_lowcore.percpu_offset = __per_cpu_offset[0]; + smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); set_cpu_online(0, true); - S390_lowcore.percpu_offset = __per_cpu_offset[0]; - current_set[0] = current; - smp_cpu_state[0] = CPU_STATE_CONFIGURED; - cpu_set_polarization(0, POLARIZATION_UNKNOWN); } void __init smp_cpus_done(unsigned int max_cpus) @@ -848,7 +800,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { S390_lowcore.cpu_nr = 0; - __cpu_logical_map[0] = stap(); + S390_lowcore.spinlock_lockval = arch_spin_lockval(0); } /* @@ -864,56 +816,57 @@ int setup_profiling_timer(unsigned int multiplier) #ifdef CONFIG_HOTPLUG_CPU static ssize_t cpu_configure_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { ssize_t count; mutex_lock(&smp_cpu_state_mutex); - count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); + count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); mutex_unlock(&smp_cpu_state_mutex); return count; } static ssize_t cpu_configure_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) + struct device_attribute *attr, + const char *buf, size_t count) { - int cpu = dev->id; - int val, rc; + struct pcpu *pcpu; + int cpu, val, rc; char delim; if (sscanf(buf, "%d %c", &val, &delim) != 1) return -EINVAL; if (val != 0 && val != 1) return -EINVAL; - get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); rc = -EBUSY; /* disallow configuration changes of online cpus and cpu 0 */ + cpu = dev->id; if (cpu_online(cpu) || cpu == 0) goto out; + pcpu = pcpu_devices + cpu; rc = 0; switch (val) { case 0: - if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { - rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); - if (!rc) { - smp_cpu_state[cpu] = CPU_STATE_STANDBY; - cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - topology_expect_change(); - } - } + if (pcpu->state != CPU_STATE_CONFIGURED) + break; + rc = sclp_cpu_deconfigure(pcpu->address); + if (rc) + break; + pcpu->state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); break; case 1: - if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { - rc = sclp_cpu_configure(__cpu_logical_map[cpu]); - if (!rc) { - smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; - cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - topology_expect_change(); - } - } + if (pcpu->state != CPU_STATE_STANDBY) + break; + rc = sclp_cpu_configure(pcpu->address); + if (rc) + break; + pcpu->state = CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); break; default: break; @@ -929,7 +882,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); static ssize_t show_cpu_address(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); + return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); } static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); @@ -945,38 +898,19 @@ static struct attribute_group cpu_common_attr_group = { .attrs = cpu_common_attrs, }; -static ssize_t show_capability(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned int capability; - int rc; - - rc = get_cpu_capability(&capability); - if (rc) - return rc; - return sprintf(buf, "%u\n", capability); -} -static DEVICE_ATTR(capability, 0444, show_capability, NULL); - static ssize_t show_idle_count(struct device *dev, struct device_attribute *attr, char *buf) { - struct s390_idle_data *idle; + struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); unsigned long long idle_count; unsigned int sequence; - idle = &per_cpu(s390_idle, dev->id); -repeat: - sequence = idle->sequence; - smp_rmb(); - if (sequence & 1) - goto repeat; - idle_count = idle->idle_count; - if (idle->idle_enter) - idle_count++; - smp_rmb(); - if (idle->sequence != sequence) - goto repeat; + do { + sequence = ACCESS_ONCE(idle->sequence); + idle_count = ACCESS_ONCE(idle->idle_count); + if (ACCESS_ONCE(idle->clock_idle_enter)) + idle_count++; + } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); return sprintf(buf, "%llu\n", idle_count); } static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); @@ -984,30 +918,23 @@ static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); static ssize_t show_idle_time(struct device *dev, struct device_attribute *attr, char *buf) { - struct s390_idle_data *idle; - unsigned long long now, idle_time, idle_enter; + struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); + unsigned long long now, idle_time, idle_enter, idle_exit; unsigned int sequence; - idle = &per_cpu(s390_idle, dev->id); - now = get_clock(); -repeat: - sequence = idle->sequence; - smp_rmb(); - if (sequence & 1) - goto repeat; - idle_time = idle->idle_time; - idle_enter = idle->idle_enter; - if (idle_enter != 0ULL && idle_enter < now) - idle_time += now - idle_enter; - smp_rmb(); - if (idle->sequence != sequence) - goto repeat; + do { + now = get_tod_clock(); + sequence = ACCESS_ONCE(idle->sequence); + idle_time = ACCESS_ONCE(idle->idle_time); + idle_enter = ACCESS_ONCE(idle->clock_idle_enter); + idle_exit = ACCESS_ONCE(idle->clock_idle_exit); + } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); + idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; return sprintf(buf, "%llu\n", idle_time >> 12); } static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); static struct attribute *cpu_online_attrs[] = { - &dev_attr_capability.attr, &dev_attr_idle_count.attr, &dev_attr_idle_time_us.attr, NULL, @@ -1017,40 +944,36 @@ static struct attribute_group cpu_online_attr_group = { .attrs = cpu_online_attrs, }; -static int __cpuinit smp_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int smp_cpu_notify(struct notifier_block *self, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = &per_cpu(cpu_devices, cpu); + struct cpu *c = pcpu_devices[cpu].cpu; struct device *s = &c->dev; - struct s390_idle_data *idle; int err = 0; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - idle = &per_cpu(s390_idle, cpu); - memset(idle, 0, sizeof(struct s390_idle_data)); err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); break; case CPU_DEAD: - case CPU_DEAD_FROZEN: sysfs_remove_group(&s->kobj, &cpu_online_attr_group); break; } return notifier_from_errno(err); } -static struct notifier_block __cpuinitdata smp_cpu_nb = { - .notifier_call = smp_cpu_notify, -}; - -static int __devinit smp_add_present_cpu(int cpu) +static int smp_add_present_cpu(int cpu) { - struct cpu *c = &per_cpu(cpu_devices, cpu); - struct device *s = &c->dev; + struct device *s; + struct cpu *c; int rc; + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + pcpu_devices[cpu].cpu = c; + s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); if (rc) @@ -1085,29 +1008,21 @@ out: int __ref smp_rescan_cpus(void) { - cpumask_t newcpus; - int cpu; - int rc; + struct sclp_cpu_info *info; + int nr; + info = smp_get_cpu_info(); + if (!info) + return -ENOMEM; get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); - cpumask_copy(&newcpus, cpu_present_mask); - rc = __smp_rescan_cpus(); - if (rc) - goto out; - cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); - for_each_cpu(cpu, &newcpus) { - rc = smp_add_present_cpu(cpu); - if (rc) - set_cpu_present(cpu, false); - } - rc = 0; -out: + nr = __smp_rescan_cpus(info, 1); mutex_unlock(&smp_cpu_state_mutex); put_online_cpus(); - if (!cpumask_empty(&newcpus)) + kfree(info); + if (nr) topology_schedule_update(); - return rc; + return 0; } static ssize_t __ref rescan_store(struct device *dev, @@ -1125,19 +1040,24 @@ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); static int __init s390_smp_init(void) { - int cpu, rc; + int cpu, rc = 0; - register_cpu_notifier(&smp_cpu_nb); #ifdef CONFIG_HOTPLUG_CPU rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); if (rc) return rc; #endif + cpu_notifier_register_begin(); for_each_present_cpu(cpu) { rc = smp_add_present_cpu(cpu); if (rc) - return rc; + goto out; } - return 0; + + __hotcpu_notifier(smp_cpu_notify, 0); + +out: + cpu_notifier_register_done(); + return rc; } subsys_initcall(s390_smp_init); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 8841919ef7e..1785cd82253 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -1,9 +1,7 @@ /* - * arch/s390/kernel/stacktrace.c - * * Stack trace management functions * - * Copyright (C) IBM Corp. 2006 + * Copyright IBM Corp. 2006 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 47df775c844..a7a7537ce1e 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -9,7 +9,11 @@ #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm.h> -#include <asm/system.h> +#include <asm/ctl_reg.h> +#include <asm/ipl.h> +#include <asm/cio.h> +#include <asm/pci.h> +#include "entry.h" /* * References to section boundaries @@ -41,6 +45,7 @@ struct page_key_data { static struct page_key_data *page_key_data; static struct page_key_data *page_key_rp, *page_key_wp; static unsigned long page_key_rx, page_key_wx; +unsigned long suspend_zero_pages; /* * For each page in the hibernation image one additional byte is @@ -149,6 +154,36 @@ int pfn_is_nosave(unsigned long pfn) return 0; } +/* + * PM notifier callback for suspend + */ +static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); + if (!suspend_zero_pages) + return NOTIFY_BAD; + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + free_pages(suspend_zero_pages, LC_ORDER); + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int __init suspend_pm_init(void) +{ + pm_notifier(suspend_pm_cb, 0); + return 0; +} +arch_initcall(suspend_pm_init); + void save_processor_state(void) { /* swsusp_arch_suspend() actually saves all cpu register contents. @@ -180,3 +215,11 @@ void restore_processor_state(void) __ctl_set_bit(0,28); local_mcck_enable(); } + +/* Called at the end of swsusp_arch_resume */ +void s390_early_resume(void) +{ + lgr_info_log(); + channel_subsystem_reinit(); + zpci_rescan(); +} diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S deleted file mode 100644 index bfe070bc765..00000000000 --- a/arch/s390/kernel/switch_cpu.S +++ /dev/null @@ -1,58 +0,0 @@ -/* - * 31-bit switch cpu code - * - * Copyright IBM Corp. 2009 - * - */ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/ptrace.h> - -# smp_switch_to_cpu switches to destination cpu and executes the passed function -# Parameter: %r2 - function to call -# %r3 - function parameter -# %r4 - stack poiner -# %r5 - current cpu -# %r6 - destination cpu - - .section .text -ENTRY(smp_switch_to_cpu) - stm %r6,%r15,__SF_GPRS(%r15) - lr %r1,%r15 - ahi %r15,-STACK_FRAME_OVERHEAD - st %r1,__SF_BACKCHAIN(%r15) - basr %r13,0 -0: la %r1,.gprregs_addr-0b(%r13) - l %r1,0(%r1) - stm %r0,%r15,0(%r1) -1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ - brc 2,1b /* busy, try again */ -2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ - brc 2,2b /* busy, try again */ -3: j 3b - -ENTRY(smp_restart_cpu) - basr %r13,0 -0: la %r1,.gprregs_addr-0b(%r13) - l %r1,0(%r1) - lm %r0,%r15,0(%r1) -1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ - brc 10,1b /* busy, accepted (status 0), running */ - tmll %r0,0x40 /* Test if calling CPU is stopped */ - jz 1b - ltr %r4,%r4 /* New stack ? */ - jz 1f - lr %r15,%r4 -1: lr %r14,%r2 /* r14: Function to call */ - lr %r2,%r3 /* r2 : Parameter for function*/ - basr %r14,%r14 /* Call function */ - -.gprregs_addr: - .long .gprregs - - .section .data,"aw",@progbits -.gprregs: - .rept 16 - .long 0 - .endr diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S deleted file mode 100644 index fcc42d799e4..00000000000 --- a/arch/s390/kernel/switch_cpu64.S +++ /dev/null @@ -1,51 +0,0 @@ -/* - * 64-bit switch cpu code - * - * Copyright IBM Corp. 2009 - * - */ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/ptrace.h> - -# smp_switch_to_cpu switches to destination cpu and executes the passed function -# Parameter: %r2 - function to call -# %r3 - function parameter -# %r4 - stack poiner -# %r5 - current cpu -# %r6 - destination cpu - - .section .text -ENTRY(smp_switch_to_cpu) - stmg %r6,%r15,__SF_GPRS(%r15) - lgr %r1,%r15 - aghi %r15,-STACK_FRAME_OVERHEAD - stg %r1,__SF_BACKCHAIN(%r15) - larl %r1,.gprregs - stmg %r0,%r15,0(%r1) -1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ - brc 2,1b /* busy, try again */ -2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ - brc 2,2b /* busy, try again */ -3: j 3b - -ENTRY(smp_restart_cpu) - larl %r1,.gprregs - lmg %r0,%r15,0(%r1) -1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ - brc 10,1b /* busy, accepted (status 0), running */ - tmll %r0,0x40 /* Test if calling CPU is stopped */ - jz 1b - ltgr %r4,%r4 /* New stack ? */ - jz 1f - lgr %r15,%r4 -1: lgr %r14,%r2 /* r14: Function to call */ - lgr %r2,%r3 /* r2 : Parameter for function*/ - basr %r14,%r14 /* Call function */ - - .section .data,"aw",@progbits -.gprregs: - .rept 16 - .quad 0 - .endr diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index acb78cdee89..6b09fdffbd2 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -12,6 +12,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/sigp.h> /* * Save register context in absolute 0 lowcore and call swsusp_save() to @@ -35,14 +36,14 @@ ENTRY(swsusp_arch_suspend) /* Store prefix register on stack */ stpx __SF_EMPTY(%r15) - /* Save prefix register contents for lowcore */ - llgf %r4,__SF_EMPTY(%r15) + /* Save prefix register contents for lowcore copy */ + llgf %r10,__SF_EMPTY(%r15) /* Get pointer to save area */ lghi %r1,0x1000 /* Save CPU address */ - stap __LC_CPU_ADDRESS(%r0) + stap __LC_EXT_CPU_ADDR(%r0) /* Store registers */ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ @@ -90,7 +91,18 @@ ENTRY(swsusp_arch_suspend) xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) + /* Save absolute zero pages */ + larl %r2,suspend_zero_pages + lg %r2,0(%r2) + lghi %r4,0 + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + + /* Copy lowcore to absolute zero lowcore */ lghi %r2,0 + lgr %r4,%r10 lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 @@ -163,7 +175,7 @@ ENTRY(swsusp_arch_resume) diag %r0,%r0,0x308 restart_entry: lhi %r1,1 - sigp %r1,%r0,0x12 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 larl %r1,.Lnew_pgm_check_psw lpswe 0(%r1) @@ -173,15 +185,15 @@ pgm_check_entry: larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ stap 0(%r1) llgh %r2,0(%r1) - llgh %r1,__LC_CPU_ADDRESS(%r0) /* Suspend CPU address: r1 */ + llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */ cgr %r1,%r2 je restore_registers /* r1 = r2 -> nothing to do */ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 3: - sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET - brc 8,4f /* accepted */ - brc 2,3b /* busy, try again */ + sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */ + brc 8,4f /* accepted */ + brc 2,3b /* busy, try again */ /* Suspend CPU not available -> panic */ larl %r15,init_thread_union @@ -190,16 +202,16 @@ pgm_check_entry: larl %r3,_sclp_print_early lghi %r1,0 sam31 - sigp %r1,%r0,0x12 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE basr %r14,%r3 larl %r3,.Ldisabled_wait_31 lpsw 0(%r3) 4: /* Switch to suspend CPU */ - sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ + sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */ brc 2,4b /* busy, try again */ 5: - sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ + sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */ brc 2,5b /* busy, try again */ 6: j 6b @@ -207,7 +219,7 @@ restart_suspend: larl %r1,.Lresume_cpu llgh %r2,0(%r1) 7: - sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ + sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */ brc 8,7b /* accepted, status 0, still running */ brc 2,7b /* busy, try again */ tmll %r9,0x40 /* Test if resume CPU is stopped */ @@ -247,8 +259,20 @@ restore_registers: /* Load old stack */ lg %r15,0x2f8(%r13) + /* Save prefix register */ + mvc __SF_EMPTY(4,%r15),0x318(%r13) + + /* Restore absolute zero pages */ + lghi %r2,0 + larl %r4,suspend_zero_pages + lg %r4,0(%r4) + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + /* Restore prefix register */ - spx 0x318(%r13) + spx __SF_EMPTY(%r15) /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 @@ -257,8 +281,8 @@ restore_registers: lghi %r2,0 brasl %r14,arch_set_page_states - /* Reinitialize the channel subsystem */ - brasl %r14,channel_subsystem_reinit + /* Call arch specific early resume code */ + brasl %r14,s390_early_resume /* Return 0 */ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 78ea1948ff5..23eb222c165 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/sys_s390.c - * * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999, 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Thomas Spatzier (tspat@de.ibm.com) * @@ -83,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } @@ -133,19 +132,9 @@ SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) * to * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len */ -SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset, - u32 len_high, u32 len_low) +SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset, + u32, len_high, u32, len_low) { return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); } -#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS -asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset, - long len_high, long len_low) -{ - return SYSC_s390_fallocate((int) fd, (int) mode, offset, - (u32) len_high, (u32) len_low); -} -SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate); -#endif - #endif diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index bcab2f04ba5..fe5cdf29a00 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -9,344 +9,350 @@ #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall) NI_SYSCALL /* 0 */ -SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) +SYSCALL(sys_exit,sys_exit,compat_sys_exit) SYSCALL(sys_fork,sys_fork,sys_fork) -SYSCALL(sys_read,sys_read,sys32_read_wrapper) -SYSCALL(sys_write,sys_write,sys32_write_wrapper) -SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ -SYSCALL(sys_close,sys_close,sys32_close_wrapper) +SYSCALL(sys_read,sys_read,compat_sys_s390_read) +SYSCALL(sys_write,sys_write,compat_sys_s390_write) +SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */ +SYSCALL(sys_close,sys_close,compat_sys_close) SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) -SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) -SYSCALL(sys_link,sys_link,sys32_link_wrapper) -SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ -SYSCALL(sys_execve,sys_execve,sys32_execve_wrapper) -SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) -SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ -SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) -SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */ -SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/ +SYSCALL(sys_creat,sys_creat,compat_sys_creat) +SYSCALL(sys_link,sys_link,compat_sys_link) +SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */ +SYSCALL(sys_execve,sys_execve,compat_sys_execve) +SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir) +SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */ +SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod) +SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */ +SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ NI_SYSCALL /* old break syscall holder */ NI_SYSCALL /* old stat syscall holder */ -SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper) +SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek) SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */ -SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper) -SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper) -SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/ -SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/ -SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */ -SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper) -SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper) +SYSCALL(sys_mount,sys_mount,compat_sys_mount) +SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount) +SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ +SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ +SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ +SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace) +SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm) NI_SYSCALL /* old fstat syscall */ SYSCALL(sys_pause,sys_pause,sys_pause) -SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper) /* 30 */ +SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */ NI_SYSCALL /* old stty syscall */ NI_SYSCALL /* old gtty syscall */ -SYSCALL(sys_access,sys_access,sys32_access_wrapper) -SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper) +SYSCALL(sys_access,sys_access,compat_sys_access) +SYSCALL(sys_nice,sys_nice,compat_sys_nice) NI_SYSCALL /* 35 old ftime syscall */ SYSCALL(sys_sync,sys_sync,sys_sync) -SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper) -SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper) -SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper) -SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper) /* 40 */ -SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper) -SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper) -SYSCALL(sys_times,sys_times,compat_sys_times_wrapper) +SYSCALL(sys_kill,sys_kill,compat_sys_kill) +SYSCALL(sys_rename,sys_rename,compat_sys_rename) +SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir) +SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */ +SYSCALL(sys_dup,sys_dup,compat_sys_dup) +SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe) +SYSCALL(sys_times,sys_times,compat_sys_times) NI_SYSCALL /* old prof syscall */ -SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper) /* 45 */ -SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper) /* old setgid16 syscall*/ -SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/ -SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper) -SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */ -SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16) /* 50 old getegid16 syscall */ -SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper) -SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper) +SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */ +SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ +SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ +SYSCALL(sys_signal,sys_signal,compat_sys_signal) +SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ +SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ +SYSCALL(sys_acct,sys_acct,compat_sys_acct) +SYSCALL(sys_umount,sys_umount,compat_sys_umount) NI_SYSCALL /* old lock syscall */ -SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper) -SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper) /* 55 */ +SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl) +SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */ NI_SYSCALL /* intel mpx syscall */ -SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper) +SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid) NI_SYSCALL /* old ulimit syscall */ NI_SYSCALL /* old uname syscall */ -SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper) /* 60 */ -SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper) -SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper) -SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper) +SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */ +SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot) +SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat) +SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2) SYSCALL(sys_getppid,sys_getppid,sys_getppid) SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ SYSCALL(sys_setsid,sys_setsid,sys_setsid) -SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper) +SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction) NI_SYSCALL /* old sgetmask syscall*/ NI_SYSCALL /* old ssetmask syscall*/ -SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */ -SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */ -SYSCALL(sys_sigsuspend,sys_sigsuspend,sys_sigsuspend_wrapper) -SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper) -SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper) -SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */ -SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper) -SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper) -SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday_wrapper) -SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday_wrapper) -SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */ -SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */ +SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ +SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ +SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend) +SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending) +SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname) +SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */ +SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit) +SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage) +SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday) +SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday) +SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ +SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ NI_SYSCALL /* old select syscall */ -SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper) +SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink) NI_SYSCALL /* old lstat syscall */ -SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper) /* 85 */ -SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper) -SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper) -SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper) -SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */ -SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */ -SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper) -SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper) -SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper) -SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper) -SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/ -SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper) -SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper) +SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */ +SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib) +SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon) +SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot) +SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ +SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ +SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap) +SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate) +SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate) +SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod) +SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ +SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority) +SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority) NI_SYSCALL /* old profil syscall */ -SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper) -SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */ +SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs) +SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */ NI_SYSCALL /* ioperm for i386 */ -SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper) -SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper) -SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper) -SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */ -SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper) -SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper) -SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper) +SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall) +SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog) +SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer) +SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */ +SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat) +SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat) +SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat) NI_SYSCALL /* old uname syscall */ -SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 110 */ +SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) NI_SYSCALL /* old "idle" system call */ NI_SYSCALL /* vm86old for i386 */ -SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper) -SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ -SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) -SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper) -SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) -SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) -SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */ -SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) -SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper) +SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) +SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */ +SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo) +SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc) +SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync) +SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn) +SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */ +SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname) +SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname) NI_SYSCALL /* modify_ldt for i386 */ -SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) -SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ -SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) +SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex) +SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */ +SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask) NI_SYSCALL /* old "create module" */ -SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper) -SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper) +SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module) +SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module) NI_SYSCALL /* 130: old get_kernel_syms */ -SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper) -SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper) -SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper) -SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper) -SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */ -SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper) +SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl) +SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid) +SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir) +SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush) +SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */ +SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality) NI_SYSCALL /* for afs_syscall */ -SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */ -SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */ -SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper) /* 140 */ -SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper) -SYSCALL(sys_select,sys_select,compat_sys_select_wrapper) -SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper) -SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper) -SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */ -SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper) -SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper) -SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper) -SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper) -SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */ -SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper) -SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper) +SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ +SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ +SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */ +SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents) +SYSCALL(sys_select,sys_select,compat_sys_select) +SYSCALL(sys_flock,sys_flock,compat_sys_flock) +SYSCALL(sys_msync,sys_msync,compat_sys_msync) +SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */ +SYSCALL(sys_writev,sys_writev,compat_sys_writev) +SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid) +SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync) +SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) +SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */ +SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock) +SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall) SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall) -SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper) -SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */ -SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper) -SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper) +SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam) +SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ +SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler) +SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler) SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) -SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper) -SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */ -SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper) -SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper) -SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper) -SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */ -SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old getresuid16 syscall */ +SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max) +SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ +SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) +SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep) +SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap) +SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ +SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ NI_SYSCALL /* for vm86 */ NI_SYSCALL /* old sys_query_module */ -SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper) +SYSCALL(sys_poll,sys_poll,compat_sys_poll) NI_SYSCALL /* old nfsservctl */ -SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ -SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ -SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) -SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn) -SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) -SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ -SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) -SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper) -SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper) -SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend_wrapper) -SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */ -SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper) -SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */ -SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) -SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) -SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ -SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack_wrapper) -SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) +SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ +SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ +SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl) +SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn) +SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction) +SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ +SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending) +SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) +SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) +SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend) +SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */ +SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64) +SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ +SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd) +SYSCALL(sys_capget,sys_capget,compat_sys_capget) +SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */ +SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) +SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile) NI_SYSCALL /* streams1 */ NI_SYSCALL /* streams2 */ SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ -SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) -SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) -SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) -SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper) -SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper) /* 195 */ -SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper) -SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper) -SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper) +SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit) +SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2) +SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64) +SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64) +SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ +SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64) +SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64) +SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown) SYSCALL(sys_getuid,sys_getuid,sys_getuid) SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */ SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid) SYSCALL(sys_getegid,sys_getegid,sys_getegid) -SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper) -SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper) -SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper) /* 205 */ -SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper) -SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper) -SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper) -SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper) -SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper) /* 210 */ -SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper) -SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper) -SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper) -SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper) -SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */ -SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper) -SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper) -SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper) -SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper) -SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */ -SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper) -SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper) -SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64_wrapper) -SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper) -SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */ -SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper) -SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper) -SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper) -SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper) -SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper) /* 230 */ -SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper) -SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper) -SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper) -SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper) -SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */ +SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid) +SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid) +SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */ +SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups) +SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown) +SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid) +SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid) +SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */ +SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid) +SYSCALL(sys_chown,sys_chown,compat_sys_chown) +SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid) +SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid) +SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */ +SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid) +SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root) +SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore) +SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise) +SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */ +SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64) +SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead) +SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64) +SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr) +SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ +SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr) +SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr) +SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr) +SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr) +SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */ +SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr) +SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr) +SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr) +SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr) +SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ SYSCALL(sys_gettid,sys_gettid,sys_gettid) -SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper) -SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper) -SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper) -SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */ -SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper) +SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill) +SYSCALL(sys_futex,sys_futex,compat_sys_futex) +SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity) +SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ +SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill) NI_SYSCALL /* reserved for TUX */ -SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper) -SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper) -SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper) /* 245 */ -SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper) -SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper) -SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper) -SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper) -SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */ -SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper) -SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper) -SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper) -SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper) -SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */ -SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper) -SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper) -SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper) -SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper) -SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */ -SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper) -SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper) +SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup) +SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy) +SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */ +SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit) +SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel) +SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group) +SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create) +SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ +SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait) +SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address) +SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64) +SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create) +SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */ +SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime) +SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun) +SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete) +SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime) +SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ +SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres) +SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep) NI_SYSCALL /* reserved for vserver */ -SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper) -SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper) -SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper) -SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper) +SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64) +SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64) +SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64) +SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages) NI_SYSCALL /* 268 sys_mbind */ NI_SYSCALL /* 269 sys_get_mempolicy */ NI_SYSCALL /* 270 sys_set_mempolicy */ -SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper) -SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper) -SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper) -SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper) -SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */ -SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper) -SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper) -SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper) -SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper) -SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper) /* 280 */ -SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper) -SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper) -SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper) +SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open) +SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink) +SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend) +SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive) +SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */ +SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr) +SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load) +SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key) +SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key) +SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */ +SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid) +SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set) +SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get) SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init) -SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper) /* 285 */ -SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper) +SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ +SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch) NI_SYSCALL /* 287 sys_migrate_pages */ -SYSCALL(sys_openat,sys_openat,compat_sys_openat_wrapper) -SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper) -SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */ -SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper) -SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper) -SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper) -SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper) -SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */ -SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper) -SYSCALL(sys_symlinkat,sys_symlinkat,sys_symlinkat_wrapper) -SYSCALL(sys_readlinkat,sys_readlinkat,sys_readlinkat_wrapper) -SYSCALL(sys_fchmodat,sys_fchmodat,sys_fchmodat_wrapper) -SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper) /* 300 */ -SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper) -SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper) -SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper) -SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list_wrapper) -SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapper) -SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) -SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) -SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) -SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) +SYSCALL(sys_openat,sys_openat,compat_sys_openat) +SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat) +SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */ +SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat) +SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat) +SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64) +SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat) +SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */ +SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat) +SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat) +SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat) +SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat) +SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */ +SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6) +SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll) +SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare) +SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list) +SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list) +SYSCALL(sys_splice,sys_splice,compat_sys_splice) +SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range) +SYSCALL(sys_tee,sys_tee,compat_sys_tee) +SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice) NI_SYSCALL /* 310 sys_move_pages */ -SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) -SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper) -SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper) -SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper) -SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */ -SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) +SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu) +SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait) +SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes) +SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate) +SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */ +SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd) NI_SYSCALL /* 317 old sys_timer_fd */ -SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper) -SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper) -SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime_wrapper) /* 320 */ -SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime_wrapper) -SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4_wrapper) -SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper) -SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper) -SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */ -SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) -SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) -SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) -SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) -SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ -SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) -SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) -SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) -SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) -SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */ -SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper) -SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) -SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) -SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) -SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */ -SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) +SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd) +SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create) +SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ +SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) +SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4) +SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2) +SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1) +SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */ +SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3) +SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1) +SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv) +SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) +SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ +SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open) +SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init) +SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) +SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64) +SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ +SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) +SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime) +SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs) +SYSCALL(sys_setns,sys_setns,compat_sys_setns) +SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ +SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev) +SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr) +SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp) +SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) +SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ +SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) +SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 2a94b774695..811f542b8ed 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -22,17 +22,41 @@ #include <math-emu/soft-fp.h> #include <math-emu/single.h> -static inline int stsi_0(void) +int topology_max_mnest; + +/* + * stsi - store system information + * + * Returns the current configuration level if function code 0 was specified. + * Otherwise returns 0 on success or a negative value on error. + */ +int stsi(void *sysinfo, int fc, int sel1, int sel2) { - int rc = stsi(NULL, 0, 0, 0); - return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); + register int r0 asm("0") = (fc << 28) | sel1; + register int r1 asm("1") = sel2; + int rc = 0; + + asm volatile( + " stsi 0(%3)\n" + "0: jz 2f\n" + "1: lhi %1,%4\n" + "2:\n" + EX_TABLE(0b, 1b) + : "+d" (r0), "+d" (rc) + : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP) + : "cc", "memory"); + if (rc) + return rc; + return fc ? 0 : ((unsigned int) r0) >> 28; } +EXPORT_SYMBOL(stsi); -static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) +static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) { - if (stsi(info, 1, 1, 1) == -ENOSYS) - return len; + int i; + if (stsi(info, 1, 1, 1)) + return; EBCASC(info->manufacturer, sizeof(info->manufacturer)); EBCASC(info->type, sizeof(info->type)); EBCASC(info->model, sizeof(info->model)); @@ -41,242 +65,197 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) EBCASC(info->model_capacity, sizeof(info->model_capacity)); EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); - len += sprintf(page + len, "Manufacturer: %-16.16s\n", - info->manufacturer); - len += sprintf(page + len, "Type: %-4.4s\n", - info->type); + seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer); + seq_printf(m, "Type: %-4.4s\n", info->type); + /* + * Sigh: the model field has been renamed with System z9 + * to model_capacity and a new model field has been added + * after the plant field. To avoid confusing older programs + * the "Model:" prints "model_capacity model" or just + * "model_capacity" if the model string is empty . + */ + seq_printf(m, "Model: %-16.16s", info->model_capacity); if (info->model[0] != '\0') - /* - * Sigh: the model field has been renamed with System z9 - * to model_capacity and a new model field has been added - * after the plant field. To avoid confusing older programs - * the "Model:" prints "model_capacity model" or just - * "model_capacity" if the model string is empty . - */ - len += sprintf(page + len, - "Model: %-16.16s %-16.16s\n", - info->model_capacity, info->model); - else - len += sprintf(page + len, "Model: %-16.16s\n", - info->model_capacity); - len += sprintf(page + len, "Sequence Code: %-16.16s\n", - info->sequence); - len += sprintf(page + len, "Plant: %-4.4s\n", - info->plant); - len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n", - info->model_capacity, *(u32 *) info->model_cap_rating); - if (info->model_perm_cap[0] != '\0') - len += sprintf(page + len, - "Model Perm. Capacity: %-16.16s %08u\n", - info->model_perm_cap, - *(u32 *) info->model_perm_cap_rating); - if (info->model_temp_cap[0] != '\0') - len += sprintf(page + len, - "Model Temp. Capacity: %-16.16s %08u\n", - info->model_temp_cap, - *(u32 *) info->model_temp_cap_rating); + seq_printf(m, " %-16.16s", info->model); + seq_putc(m, '\n'); + seq_printf(m, "Sequence Code: %-16.16s\n", info->sequence); + seq_printf(m, "Plant: %-4.4s\n", info->plant); + seq_printf(m, "Model Capacity: %-16.16s %08u\n", + info->model_capacity, info->model_cap_rating); + if (info->model_perm_cap_rating) + seq_printf(m, "Model Perm. Capacity: %-16.16s %08u\n", + info->model_perm_cap, + info->model_perm_cap_rating); + if (info->model_temp_cap_rating) + seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n", + info->model_temp_cap, + info->model_temp_cap_rating); + if (info->ncr) + seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr); + if (info->npr) + seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr); + if (info->ntr) + seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr); if (info->cai) { - len += sprintf(page + len, - "Capacity Adj. Ind.: %d\n", - info->cai); - len += sprintf(page + len, "Capacity Ch. Reason: %d\n", - info->ccr); + seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai); + seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr); + seq_printf(m, "Capacity Transient: %d\n", info->t); + } + if (info->p) { + for (i = 1; i <= ARRAY_SIZE(info->typepct); i++) { + seq_printf(m, "Type %d Percentage: %d\n", + i, info->typepct[i - 1]); + } } - return len; } -static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len) +static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info) { static int max_mnest; int i, rc; - len += sprintf(page + len, "\n"); + seq_putc(m, '\n'); if (!MACHINE_HAS_TOPOLOGY) - return len; - if (max_mnest) { - stsi(info, 15, 1, max_mnest); - } else { - for (max_mnest = 6; max_mnest > 1; max_mnest--) { - rc = stsi(info, 15, 1, max_mnest); - if (rc != -ENOSYS) - break; - } - } - len += sprintf(page + len, "CPU Topology HW: "); + return; + if (stsi(info, 15, 1, topology_max_mnest)) + return; + seq_printf(m, "CPU Topology HW: "); for (i = 0; i < TOPOLOGY_NR_MAG; i++) - len += sprintf(page + len, " %d", info->mag[i]); - len += sprintf(page + len, "\n"); + seq_printf(m, " %d", info->mag[i]); + seq_putc(m, '\n'); #ifdef CONFIG_SCHED_MC store_topology(info); - len += sprintf(page + len, "CPU Topology SW: "); + seq_printf(m, "CPU Topology SW: "); for (i = 0; i < TOPOLOGY_NR_MAG; i++) - len += sprintf(page + len, " %d", info->mag[i]); - len += sprintf(page + len, "\n"); + seq_printf(m, " %d", info->mag[i]); + seq_putc(m, '\n'); #endif - return len; } -static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) +static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info) { struct sysinfo_1_2_2_extension *ext; int i; - if (stsi(info, 1, 2, 2) == -ENOSYS) - return len; + if (stsi(info, 1, 2, 2)) + return; ext = (struct sysinfo_1_2_2_extension *) ((unsigned long) info + info->acc_offset); - - len += sprintf(page + len, "CPUs Total: %d\n", - info->cpus_total); - len += sprintf(page + len, "CPUs Configured: %d\n", - info->cpus_configured); - len += sprintf(page + len, "CPUs Standby: %d\n", - info->cpus_standby); - len += sprintf(page + len, "CPUs Reserved: %d\n", - info->cpus_reserved); - - if (info->format == 1) { - /* - * Sigh 2. According to the specification the alternate - * capability field is a 32 bit floating point number - * if the higher order 8 bits are not zero. Printing - * a floating point number in the kernel is a no-no, - * always print the number as 32 bit unsigned integer. - * The user-space needs to know about the strange - * encoding of the alternate cpu capability. - */ - len += sprintf(page + len, "Capability: %u %u\n", - info->capability, ext->alt_capability); - for (i = 2; i <= info->cpus_total; i++) - len += sprintf(page + len, - "Adjustment %02d-way: %u %u\n", - i, info->adjustment[i-2], - ext->alt_adjustment[i-2]); - - } else { - len += sprintf(page + len, "Capability: %u\n", - info->capability); - for (i = 2; i <= info->cpus_total; i++) - len += sprintf(page + len, - "Adjustment %02d-way: %u\n", - i, info->adjustment[i-2]); + seq_printf(m, "CPUs Total: %d\n", info->cpus_total); + seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured); + seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby); + seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved); + /* + * Sigh 2. According to the specification the alternate + * capability field is a 32 bit floating point number + * if the higher order 8 bits are not zero. Printing + * a floating point number in the kernel is a no-no, + * always print the number as 32 bit unsigned integer. + * The user-space needs to know about the strange + * encoding of the alternate cpu capability. + */ + seq_printf(m, "Capability: %u", info->capability); + if (info->format == 1) + seq_printf(m, " %u", ext->alt_capability); + seq_putc(m, '\n'); + if (info->nominal_cap) + seq_printf(m, "Nominal Capability: %d\n", info->nominal_cap); + if (info->secondary_cap) + seq_printf(m, "Secondary Capability: %d\n", info->secondary_cap); + for (i = 2; i <= info->cpus_total; i++) { + seq_printf(m, "Adjustment %02d-way: %u", + i, info->adjustment[i-2]); + if (info->format == 1) + seq_printf(m, " %u", ext->alt_adjustment[i-2]); + seq_putc(m, '\n'); } - - if (info->secondary_capability != 0) - len += sprintf(page + len, "Secondary Capability: %d\n", - info->secondary_capability); - return len; } -static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) +static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) { - if (stsi(info, 2, 2, 2) == -ENOSYS) - return len; - + if (stsi(info, 2, 2, 2)) + return; EBCASC(info->name, sizeof(info->name)); - - len += sprintf(page + len, "\n"); - len += sprintf(page + len, "LPAR Number: %d\n", - info->lpar_number); - - len += sprintf(page + len, "LPAR Characteristics: "); + seq_putc(m, '\n'); + seq_printf(m, "LPAR Number: %d\n", info->lpar_number); + seq_printf(m, "LPAR Characteristics: "); if (info->characteristics & LPAR_CHAR_DEDICATED) - len += sprintf(page + len, "Dedicated "); + seq_printf(m, "Dedicated "); if (info->characteristics & LPAR_CHAR_SHARED) - len += sprintf(page + len, "Shared "); + seq_printf(m, "Shared "); if (info->characteristics & LPAR_CHAR_LIMITED) - len += sprintf(page + len, "Limited "); - len += sprintf(page + len, "\n"); - - len += sprintf(page + len, "LPAR Name: %-8.8s\n", - info->name); - - len += sprintf(page + len, "LPAR Adjustment: %d\n", - info->caf); - - len += sprintf(page + len, "LPAR CPUs Total: %d\n", - info->cpus_total); - len += sprintf(page + len, "LPAR CPUs Configured: %d\n", - info->cpus_configured); - len += sprintf(page + len, "LPAR CPUs Standby: %d\n", - info->cpus_standby); - len += sprintf(page + len, "LPAR CPUs Reserved: %d\n", - info->cpus_reserved); - len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n", - info->cpus_dedicated); - len += sprintf(page + len, "LPAR CPUs Shared: %d\n", - info->cpus_shared); - return len; + seq_printf(m, "Limited "); + seq_putc(m, '\n'); + seq_printf(m, "LPAR Name: %-8.8s\n", info->name); + seq_printf(m, "LPAR Adjustment: %d\n", info->caf); + seq_printf(m, "LPAR CPUs Total: %d\n", info->cpus_total); + seq_printf(m, "LPAR CPUs Configured: %d\n", info->cpus_configured); + seq_printf(m, "LPAR CPUs Standby: %d\n", info->cpus_standby); + seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); + seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); + seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); } -static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) +static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) { int i; - if (stsi(info, 3, 2, 2) == -ENOSYS) - return len; + if (stsi(info, 3, 2, 2)) + return; for (i = 0; i < info->count; i++) { EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); - len += sprintf(page + len, "\n"); - len += sprintf(page + len, "VM%02d Name: %-8.8s\n", - i, info->vm[i].name); - len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", - i, info->vm[i].cpi); - - len += sprintf(page + len, "VM%02d Adjustment: %d\n", - i, info->vm[i].caf); - - len += sprintf(page + len, "VM%02d CPUs Total: %d\n", - i, info->vm[i].cpus_total); - len += sprintf(page + len, "VM%02d CPUs Configured: %d\n", - i, info->vm[i].cpus_configured); - len += sprintf(page + len, "VM%02d CPUs Standby: %d\n", - i, info->vm[i].cpus_standby); - len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n", - i, info->vm[i].cpus_reserved); + seq_putc(m, '\n'); + seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name); + seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi); + seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf); + seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total); + seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured); + seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby); + seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved); } - return len; } -static int proc_read_sysinfo(char *page, char **start, - off_t off, int count, - int *eof, void *data) +static int sysinfo_show(struct seq_file *m, void *v) { - unsigned long info = get_zeroed_page(GFP_KERNEL); - int level, len; + void *info = (void *)get_zeroed_page(GFP_KERNEL); + int level; if (!info) return 0; - - len = 0; - level = stsi_0(); + level = stsi(NULL, 0, 0, 0); if (level >= 1) - len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); - + stsi_1_1_1(m, info); if (level >= 1) - len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len); - + stsi_15_1_x(m, info); if (level >= 1) - len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); - + stsi_1_2_2(m, info); if (level >= 2) - len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); - + stsi_2_2_2(m, info); if (level >= 3) - len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); + stsi_3_2_2(m, info); + free_page((unsigned long)info); + return 0; +} - free_page(info); - return len; +static int sysinfo_open(struct inode *inode, struct file *file) +{ + return single_open(file, sysinfo_show, NULL); } -static __init int create_proc_sysinfo(void) +static const struct file_operations sysinfo_fops = { + .open = sysinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init sysinfo_create_proc(void) { - create_proc_read_entry("sysinfo", 0444, NULL, - proc_read_sysinfo, NULL); + proc_create("sysinfo", 0444, NULL, &sysinfo_fops); return 0; } -device_initcall(create_proc_sysinfo); +device_initcall(sysinfo_create_proc); /* * Service levels interface. @@ -393,27 +372,6 @@ static __init int create_proc_service_level(void) subsys_initcall(create_proc_service_level); /* - * Bogomips calculation based on cpu capability. - */ -int get_cpu_capability(unsigned int *capability) -{ - struct sysinfo_1_2_2 *info; - int rc; - - info = (void *) get_zeroed_page(GFP_KERNEL); - if (!info) - return -ENOMEM; - rc = stsi(info, 1, 2, 2); - if (rc == -ENOSYS) - goto out; - rc = 0; - *capability = info->capability; -out: - free_page((unsigned long) info); - return rc; -} - -/* * CPU capability might have changed. Therefore recalculate loops_per_jiffy. */ void s390_adjust_jiffies(void) @@ -428,7 +386,7 @@ void s390_adjust_jiffies(void) if (!info) return; - if (stsi(info, 1, 2, 2) != -ENOSYS) { + if (stsi(info, 1, 2, 2) == 0) { /* * Major sigh. The cpu capability encoding is "special". * If the first 9 bits of info->capability are 0 then it @@ -460,7 +418,7 @@ void s390_adjust_jiffies(void) /* * calibrate the delay loop */ -void __cpuinit calibrate_delay(void) +void calibrate_delay(void) { s390_adjust_jiffies(); /* Print the good old Bogomips line .. */ diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fa02f443f5f..0931b110c82 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -1,5 +1,4 @@ /* - * arch/s390/kernel/time.c * Time of day based timer functions. * * S390 version @@ -35,7 +34,7 @@ #include <linux/profile.h> #include <linux/timex.h> #include <linux/notifier.h> -#include <linux/clocksource.h> +#include <linux/timekeeper_internal.h> #include <linux/clockchips.h> #include <linux/gfp.h> #include <linux/kprobes.h> @@ -45,7 +44,7 @@ #include <asm/vdso.h> #include <asm/irq.h> #include <asm/irq_regs.h> -#include <asm/timer.h> +#include <asm/vtimer.h> #include <asm/etr.h> #include <asm/cio.h> #include "entry.h" @@ -64,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return (get_clock_monotonic() * 125) >> 9; + return tod_to_ns(get_tod_clock_monotonic()); } /* @@ -93,7 +92,6 @@ void clock_comparator_work(void) struct clock_event_device *cd; S390_lowcore.clock_comparator = -1ULL; - set_clock_comparator(S390_lowcore.clock_comparator); cd = &__get_cpu_var(comparators); cd->event_handler(cd); } @@ -110,14 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta) set_clock_comparator(S390_lowcore.clock_comparator); } -static int s390_next_ktime(ktime_t expires, +static int s390_next_event(unsigned long delta, struct clock_event_device *evt) { - u64 nsecs; - - nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset())); - do_div(nsecs, 125); - S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9); + S390_lowcore.clock_comparator = get_tod_clock() + delta; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } @@ -142,15 +136,14 @@ void init_cpu_timer(void) cpu = smp_processor_id(); cd = &per_cpu(comparators, cpu); cd->name = "comparator"; - cd->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_KTIME; + cd->features = CLOCK_EVT_FEAT_ONESHOT; cd->mult = 16777; cd->shift = 12; cd->min_delta_ns = 1; cd->max_delta_ns = LONG_MAX; cd->rating = 400; cd->cpumask = cpumask_of(cpu); - cd->set_next_ktime = s390_next_ktime; + cd->set_next_event = s390_next_event; cd->set_mode = s390_set_mode; clockevents_register_device(cd); @@ -162,11 +155,11 @@ void init_cpu_timer(void) __ctl_set_bit(0, 4); } -static void clock_comparator_interrupt(unsigned int ext_int_code, +static void clock_comparator_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++; + inc_irq_stat(IRQEXT_CLK); if (S390_lowcore.clock_comparator == -1ULL) set_clock_comparator(S390_lowcore.clock_comparator); } @@ -174,10 +167,10 @@ static void clock_comparator_interrupt(unsigned int ext_int_code, static void etr_timing_alert(struct etr_irq_parm *); static void stp_timing_alert(struct stp_irq_parm *); -static void timing_alert_interrupt(unsigned int ext_int_code, +static void timing_alert_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; + inc_irq_stat(IRQEXT_TLA); if (param32 & 0x00c40000) etr_timing_alert((struct etr_irq_parm *) ¶m32); if (param32 & 0x00038000) @@ -189,7 +182,7 @@ static void stp_reset(void); void read_persistent_clock(struct timespec *ts) { - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); } void read_boot_clock(struct timespec *ts) @@ -199,7 +192,7 @@ void read_boot_clock(struct timespec *ts) static cycle_t read_tod_clock(struct clocksource *cs) { - return get_clock(); + return get_tod_clock(); } static struct clocksource clocksource_tod = { @@ -217,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void) return &clocksource_tod; } -void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult) +void update_vsyscall(struct timekeeper *tk) { - if (clock != &clocksource_tod) + u64 nsecps; + + if (tk->clock != &clocksource_tod) return; /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; - vdso_data->xtime_clock_sec = wall_time->tv_sec; - vdso_data->xtime_clock_nsec = wall_time->tv_nsec; - vdso_data->wtom_clock_sec = wtm->tv_sec; - vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->ntp_mult = mult; + vdso_data->xtime_tod_stamp = tk->clock->cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->wtom_clock_sec = + tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = tk->xtime_nsec + + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); + nsecps = (u64) NSEC_PER_SEC << tk->shift; + while (vdso_data->wtom_clock_nsec >= nsecps) { + vdso_data->wtom_clock_nsec -= nsecps; + vdso_data->wtom_clock_sec++; + } + vdso_data->tk_mult = tk->mult; + vdso_data->tk_shift = tk->shift; smp_wmb(); ++vdso_data->tb_update_count; } @@ -260,11 +262,11 @@ void __init time_init(void) stp_reset(); /* request the clock comparator external interrupt */ - if (register_external_interrupt(0x1004, clock_comparator_interrupt)) - panic("Couldn't request external interrupt 0x1004"); + if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt)) + panic("Couldn't request external interrupt 0x1004"); /* request the timing alert external interrupt */ - if (register_external_interrupt(0x1406, timing_alert_interrupt)) + if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) panic("Couldn't request external interrupt 0x1406"); if (clocksource_register(&clocksource_tod) != 0) @@ -327,7 +329,7 @@ static unsigned long clock_sync_flags; * The synchronous get_clock function. It will write the current clock * value to the clock pointer and return 0 if the clock is in sync with * the external time source. If the clock mode is local it will return - * -ENOSYS and -EAGAIN if the clock is not in sync with the external + * -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external * reference. */ int get_sync_clock(unsigned long long *clock) @@ -337,7 +339,7 @@ int get_sync_clock(unsigned long long *clock) sw_ptr = &get_cpu_var(clock_sync_word); sw0 = atomic_read(sw_ptr); - *clock = get_clock(); + *clock = get_tod_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(clock_sync_word); if (sw0 == sw1 && (sw0 & 0x80000000U)) @@ -345,7 +347,7 @@ int get_sync_clock(unsigned long long *clock) return 0; if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) - return -ENOSYS; + return -EOPNOTSUPP; if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) return -EACCES; @@ -481,7 +483,7 @@ static void etr_reset(void) .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, .es = 0, .sl = 0 }; if (etr_setr(&etr_eacr) == 0) { - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); if (etr_port0_online && etr_port1_online) set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); @@ -763,8 +765,8 @@ static int etr_sync_clock(void *data) __ctl_set_bit(14, 21); __ctl_set_bit(0, 29); clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; - old_clock = get_clock(); - if (set_clock(clock) == 0) { + old_clock = get_tod_clock(); + if (set_tod_clock(clock) == 0) { __udelay(1); /* Wait for the clock to start. */ __ctl_clear_bit(0, 29); __ctl_clear_bit(14, 21); @@ -840,7 +842,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p0 = etr_port0_online; if (!eacr.p0) eacr.e0 = 0; @@ -853,7 +855,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p1 = etr_port1_online; if (!eacr.p1) eacr.e1 = 0; @@ -969,7 +971,7 @@ static void etr_update_eacr(struct etr_eacr eacr) etr_eacr = eacr; etr_setr(&etr_eacr); if (dp_changed) - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); } /* @@ -1007,7 +1009,7 @@ static void etr_work_fn(struct work_struct *work) /* Store aib to get the current ETR status word. */ BUG_ON(etr_stetr(&aib) != 0); etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ - now = get_clock(); + now = get_tod_clock(); /* * Update the port information if the last stepping port change @@ -1532,10 +1534,10 @@ static int stp_sync_clock(void *data) if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || stp_info.tmd != 2) { - old_clock = get_clock(); + old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_clock(), 0); + delta = adjust_time(old_clock, get_tod_clock(), 0); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 7370a41948c..355a16c5570 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2007,2011 + * Copyright IBM Corp. 2007, 2011 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ @@ -10,6 +10,7 @@ #include <linux/bootmem.h> #include <linux/cpuset.h> #include <linux/device.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> @@ -17,6 +18,7 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/mm.h> +#include <asm/sysinfo.h> #define PTF_HORIZONTAL (0UL) #define PTF_VERTICAL (1UL) @@ -28,83 +30,70 @@ struct mask_info { cpumask_t mask; }; -static int topology_enabled = 1; +static void set_topology_timer(void); static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; -static void set_topology_timer(void); -static DECLARE_WORK(topology_work, topology_work_fn); -/* topology_lock protects the core linked list */ -static DEFINE_SPINLOCK(topology_lock); -static struct mask_info core_info; -cpumask_t cpu_core_map[NR_CPUS]; -unsigned char cpu_core_id[NR_CPUS]; +static int topology_enabled = 1; +static DECLARE_WORK(topology_work, topology_work_fn); +/* topology_lock protects the socket and book linked lists */ +static DEFINE_SPINLOCK(topology_lock); +static struct mask_info socket_info; static struct mask_info book_info; -cpumask_t cpu_book_map[NR_CPUS]; -unsigned char cpu_book_id[NR_CPUS]; -/* smp_cpu_state_mutex must be held when accessing this array */ -int cpu_polarization[NR_CPUS]; +struct cpu_topology_s390 cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { cpumask_t mask; - cpumask_clear(&mask); - if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { - cpumask_copy(&mask, cpumask_of(cpu)); + cpumask_copy(&mask, cpumask_of(cpu)); + if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) return mask; + for (; info; info = info->next) { + if (cpumask_test_cpu(cpu, &info->mask)) + return info->mask; } - while (info) { - if (cpumask_test_cpu(cpu, &info->mask)) { - mask = info->mask; - break; - } - info = info->next; - } - if (cpumask_empty(&mask)) - cpumask_copy(&mask, cpumask_of(cpu)); return mask; } static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, struct mask_info *book, - struct mask_info *core, - int one_core_per_cpu) + struct mask_info *socket, + int one_socket_per_cpu) { unsigned int cpu; - for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); - cpu < TOPOLOGY_CPU_BITS; - cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) - { - unsigned int rcpu, lcpu; + for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { + unsigned int rcpu; + int lcpu; rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; - for_each_present_cpu(lcpu) { - if (cpu_logical_map(lcpu) != rcpu) - continue; - cpumask_set_cpu(lcpu, &book->mask); - cpu_book_id[lcpu] = book->id; - cpumask_set_cpu(lcpu, &core->mask); - if (one_core_per_cpu) { - cpu_core_id[lcpu] = rcpu; - core = core->next; - } else { - cpu_core_id[lcpu] = core->id; - } - cpu_set_polarization(lcpu, tl_cpu->pp); + lcpu = smp_find_processor_id(rcpu); + if (lcpu < 0) + continue; + cpumask_set_cpu(lcpu, &book->mask); + cpu_topology[lcpu].book_id = book->id; + cpumask_set_cpu(lcpu, &socket->mask); + cpu_topology[lcpu].core_id = rcpu; + if (one_socket_per_cpu) { + cpu_topology[lcpu].socket_id = rcpu; + socket = socket->next; + } else { + cpu_topology[lcpu].socket_id = socket->id; } + smp_cpu_set_polarization(lcpu, tl_cpu->pp); } - return core; + return socket; } static void clear_masks(void) { struct mask_info *info; - info = &core_info; + info = &socket_info; while (info) { cpumask_clear(&info->mask); info = info->next; @@ -123,9 +112,9 @@ static union topology_entry *next_tle(union topology_entry *tle) return (union topology_entry *)((struct topology_container *)tle + 1); } -static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) +static void __tl_to_masks_generic(struct sysinfo_15_1_x *info) { - struct mask_info *core = &core_info; + struct mask_info *socket = &socket_info; struct mask_info *book = &book_info; union topology_entry *tle, *end; @@ -138,11 +127,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) book->id = tle->container.id; break; case 1: - core = core->next; - core->id = tle->container.id; + socket = socket->next; + socket->id = tle->container.id; break; case 0: - add_cpus_to_mask(&tle->cpu, book, core, 0); + add_cpus_to_mask(&tle->cpu, book, socket, 0); break; default: clear_masks(); @@ -152,9 +141,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) } } -static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) +static void __tl_to_masks_z10(struct sysinfo_15_1_x *info) { - struct mask_info *core = &core_info; + struct mask_info *socket = &socket_info; struct mask_info *book = &book_info; union topology_entry *tle, *end; @@ -167,7 +156,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) book->id = tle->container.id; break; case 0: - core = add_cpus_to_mask(&tle->cpu, book, core, 1); + socket = add_cpus_to_mask(&tle->cpu, book, socket, 1); break; default: clear_masks(); @@ -177,20 +166,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) } } -static void tl_to_cores(struct sysinfo_15_1_x *info) +static void tl_to_masks(struct sysinfo_15_1_x *info) { struct cpuid cpu_id; - get_cpu_id(&cpu_id); spin_lock_irq(&topology_lock); + get_cpu_id(&cpu_id); clear_masks(); switch (cpu_id.machine) { case 0x2097: case 0x2098: - __tl_to_cores_z10(info); + __tl_to_masks_z10(info); break; default: - __tl_to_cores_generic(info); + __tl_to_masks_generic(info); } spin_unlock_irq(&topology_lock); } @@ -201,7 +190,7 @@ static void topology_update_polarization_simple(void) mutex_lock(&smp_cpu_state_mutex); for_each_possible_cpu(cpu) - cpu_set_polarization(cpu, POLARIZATION_HRZ); + smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); mutex_unlock(&smp_cpu_state_mutex); } @@ -231,31 +220,34 @@ int topology_set_cpu_management(int fc) if (rc) return -EBUSY; for_each_possible_cpu(cpu) - cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); return rc; } -static void update_cpu_core_map(void) +static void update_cpu_masks(void) { unsigned long flags; int cpu; spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { - cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); - cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); + cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); + cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); + if (!MACHINE_HAS_TOPOLOGY) { + cpu_topology[cpu].core_id = cpu; + cpu_topology[cpu].socket_id = cpu; + cpu_topology[cpu].book_id = cpu; + } } spin_unlock_irqrestore(&topology_lock, flags); } void store_topology(struct sysinfo_15_1_x *info) { - int rc; - - rc = stsi(info, 15, 1, 3); - if (rc != -ENOSYS) - return; - stsi(info, 15, 1, 2); + if (topology_max_mnest >= 3) + stsi(info, 15, 1, 3); + else + stsi(info, 15, 1, 2); } int arch_update_cpu_topology(void) @@ -265,13 +257,13 @@ int arch_update_cpu_topology(void) int cpu; if (!MACHINE_HAS_TOPOLOGY) { - update_cpu_core_map(); + update_cpu_masks(); topology_update_polarization_simple(); return 0; } store_topology(info); - tl_to_cores(info); - update_cpu_core_map(); + tl_to_masks(info); + update_cpu_masks(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); kobject_uevent(&dev->kobj, KOBJ_CHANGE); @@ -341,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { - mask->next = alloc_bootmem(sizeof(struct mask_info)); + mask->next = alloc_bootmem_align( + roundup_pow_of_two(sizeof(struct mask_info)), + roundup_pow_of_two(sizeof(struct mask_info))); mask = mask->next; } } @@ -360,7 +354,7 @@ void __init s390_init_cpu_topology(void) for (i = 0; i < TOPOLOGY_NR_MAG; i++) printk(KERN_CONT " %d", info->mag[i]); printk(KERN_CONT " / %d\n", info->mnest); - alloc_masks(info, &core_info, 1); + alloc_masks(info, &socket_info, 1); alloc_masks(info, &book_info, 2); } @@ -415,7 +409,7 @@ static ssize_t cpu_polarization_show(struct device *dev, ssize_t count; mutex_lock(&smp_cpu_state_mutex); - switch (cpu_read_polarization(cpu)) { + switch (smp_cpu_get_polarization(cpu)) { case POLARIZATION_HRZ: count = sprintf(buf, "horizontal\n"); break; @@ -451,6 +445,23 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_topology[cpu].core_mask; +} + +static const struct cpumask *cpu_book_mask(int cpu) +{ + return &cpu_topology[cpu].book_mask; +} + +static struct sched_domain_topology_level s390_topology[] = { + { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, + { cpu_book_mask, SD_INIT_NAME(BOOK) }, + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + static int __init topology_init(void) { if (!MACHINE_HAS_TOPOLOGY) { @@ -459,7 +470,9 @@ static int __init topology_init(void) } set_topology_timer(); out: - update_cpu_core_map(); + + set_sched_topology(s390_topology); + return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5ce3750b181..c5762324d9e 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/traps.c - * * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999, 2000 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * @@ -14,257 +12,31 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */ -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/module.h> #include <linux/ptrace.h> -#include <linux/timer.h> +#include <linux/sched.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/seq_file.h> -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/kdebug.h> -#include <linux/kallsyms.h> -#include <linux/reboot.h> -#include <linux/kprobes.h> -#include <linux/bug.h> -#include <linux/utsname.h> -#include <asm/system.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <linux/atomic.h> -#include <asm/mathemu.h> -#include <asm/cpcmd.h> -#include <asm/lowcore.h> -#include <asm/debug.h> #include "entry.h" -void (*pgm_check_table[128])(struct pt_regs *regs); - int show_unhandled_signals = 1; -#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) - -#ifndef CONFIG_64BIT -#define LONG "%08lx " -#define FOURLONG "%08lx %08lx %08lx %08lx\n" -static int kstack_depth_to_print = 12; -#else /* CONFIG_64BIT */ -#define LONG "%016lx " -#define FOURLONG "%016lx %016lx %016lx %016lx\n" -static int kstack_depth_to_print = 20; -#endif /* CONFIG_64BIT */ - -/* - * For show_trace we have tree different stack to consider: - * - the panic stack which is used if the kernel stack has overflown - * - the asynchronous interrupt stack (cpu related) - * - the synchronous kernel stack (process related) - * The stack trace can start at any of the three stack and can potentially - * touch all of them. The order is: panic stack, async stack, sync stack. - */ -static unsigned long -__show_trace(unsigned long sp, unsigned long low, unsigned long high) -{ - struct stack_frame *sf; - struct pt_regs *regs; - - while (1) { - sp = sp & PSW_ADDR_INSN; - if (sp < low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); - /* Follow the backchain. */ - while (1) { - low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; - if (!sp) - break; - if (sp <= low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); - } - /* Zero backchain detected, check for interrupt frame. */ - sp = (unsigned long) (sf + 1); - if (sp <= low || sp > high - sizeof(*regs)) - return sp; - regs = (struct pt_regs *) sp; - printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); - print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); - low = sp; - sp = regs->gprs[15]; - } -} - -static void show_trace(struct task_struct *task, unsigned long *stack) -{ - register unsigned long __r15 asm ("15"); - unsigned long sp; - - sp = (unsigned long) stack; - if (!sp) - sp = task ? task->thread.ksp : __r15; - printk("Call Trace:\n"); -#ifdef CONFIG_CHECK_STACK - sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, - S390_lowcore.panic_stack); -#endif - sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); - if (task) - __show_trace(sp, (unsigned long) task_stack_page(task), - (unsigned long) task_stack_page(task) + THREAD_SIZE); - else - __show_trace(sp, S390_lowcore.thread_info, - S390_lowcore.thread_info + THREAD_SIZE); - if (!task) - task = current; - debug_show_held_locks(task); -} - -void show_stack(struct task_struct *task, unsigned long *sp) -{ - register unsigned long * __r15 asm ("15"); - unsigned long *stack; - int i; - - if (!sp) - stack = task ? (unsigned long *) task->thread.ksp : __r15; - else - stack = sp; - - for (i = 0; i < kstack_depth_to_print; i++) { - if (((addr_t) stack & (THREAD_SIZE-1)) == 0) - break; - if (i && ((i * sizeof (long) % 32) == 0)) - printk("\n "); - printk(LONG, *stack++); - } - printk("\n"); - show_trace(task, sp); -} - -static void show_last_breaking_event(struct pt_regs *regs) +static inline void __user *get_trap_ip(struct pt_regs *regs) { #ifdef CONFIG_64BIT - printk("Last Breaking-Event-Address:\n"); - printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); - print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); -#endif -} + unsigned long address; -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - -static inline int mask_bits(struct pt_regs *regs, unsigned long bits) -{ - return (regs->psw.mask & bits) / ((~bits + 1) & bits); -} - -void show_registers(struct pt_regs *regs) -{ - char *mode; - - mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; - printk("%s PSW : %p %p", - mode, (void *) regs->psw.mask, - (void *) regs->psw.addr); - print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " - "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), - mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), - mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), - mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), - mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), - mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); -#ifdef CONFIG_64BIT - printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); -#endif - printk("\n%s GPRS: " FOURLONG, mode, - regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); - printk(" " FOURLONG, - regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); - printk(" " FOURLONG, - regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); - printk(" " FOURLONG, - regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); - - show_code(regs); -} - -void show_regs(struct pt_regs *regs) -{ - print_modules(); - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_registers(regs); - /* Show stack backtrace if pt_regs is from kernel mode */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) - show_trace(NULL, (unsigned long *) regs->gprs[15]); - show_last_breaking_event(regs); -} - -static DEFINE_SPINLOCK(die_lock); - -void die(struct pt_regs *regs, const char *str) -{ - static int die_counter; - - oops_enter(); - debug_stop_all(); - console_verbose(); - spin_lock_irq(&die_lock); - bust_spinlocks(1); - printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP "); -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC"); + if (regs->int_code & 0x200) + address = *(unsigned long *)(current->thread.trap_tdb + 24); + else + address = regs->psw.addr; + return (void __user *) + ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); +#else + return (void __user *) + ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); #endif - printk("\n"); - notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); - show_regs(regs); - bust_spinlocks(0); - add_taint(TAINT_DIE); - spin_unlock_irq(&die_lock); - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); - oops_exit(); - do_exit(SIGSEGV); } static inline void report_user_fault(struct pt_regs *regs, int signr) @@ -286,12 +58,6 @@ int is_valid_bugaddr(unsigned long addr) return 1; } -static inline void __user *get_psw_address(struct pt_regs *regs) -{ - return (void __user *) - ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); -} - static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) { @@ -301,18 +67,18 @@ static void __kprobes do_trap(struct pt_regs *regs, regs->int_code, si_signo) == NOTIFY_STOP) return; - if (regs->psw.mask & PSW_MASK_PSTATE) { + if (user_mode(regs)) { info.si_signo = si_signo; info.si_errno = 0; info.si_code = si_code; - info.si_addr = get_psw_address(regs); + info.si_addr = get_trap_ip(regs); force_sig_info(si_signo, &info, current); report_user_fault(regs, si_signo); } else { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) - regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; else { enum bug_trap_type btt; @@ -340,9 +106,9 @@ void __kprobes do_per_trap(struct pt_regs *regs) force_sig_info(SIGTRAP, &info, current); } -static void default_trap_handler(struct pt_regs *regs) +void default_trap_handler(struct pt_regs *regs) { - if (regs->psw.mask & PSW_MASK_PSTATE) { + if (user_mode(regs)) { report_user_fault(regs, SIGSEGV); do_exit(SIGSEGV); } else @@ -350,9 +116,9 @@ static void default_trap_handler(struct pt_regs *regs) } #define DO_ERROR_INFO(name, signr, sicode, str) \ -static void name(struct pt_regs *regs) \ -{ \ - do_trap(regs, signr, sicode, str); \ +void name(struct pt_regs *regs) \ +{ \ + do_trap(regs, signr, sicode, str); \ } DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, @@ -382,6 +148,11 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, "translation exception") +#ifdef CONFIG_64BIT +DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, + "transaction constraint exception") +#endif + static inline void do_fp_trap(struct pt_regs *regs, int fpc) { int si_code = 0; @@ -402,16 +173,16 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) do_trap(regs, SIGFPE, si_code, "floating point exception"); } -static void __kprobes illegal_op(struct pt_regs *regs) +void __kprobes illegal_op(struct pt_regs *regs) { siginfo_t info; __u8 opcode[6]; __u16 __user *location; int signal = 0; - location = get_psw_address(regs); + location = get_trap_ip(regs); - if (regs->psw.mask & PSW_MASK_PSTATE) { + if (user_mode(regs)) { if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { @@ -477,9 +248,9 @@ void specification_exception(struct pt_regs *regs) __u16 __user *location = NULL; int signal = 0; - location = (__u16 __user *) get_psw_address(regs); + location = (__u16 __user *) get_trap_ip(regs); - if (regs->psw.mask & PSW_MASK_PSTATE) { + if (user_mode(regs)) { get_user(*((__u16 *) opcode), location); switch (opcode[0]) { case 0x28: /* LDR Rx,Ry */ @@ -521,18 +292,18 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); #endif -static void data_exception(struct pt_regs *regs) +void data_exception(struct pt_regs *regs) { __u16 __user *location; int signal = 0; - location = get_psw_address(regs); + location = get_trap_ip(regs); if (MACHINE_HAS_IEEE) asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); #ifdef CONFIG_MATHEMU - else if (regs->psw.mask & PSW_MASK_PSTATE) { + else if (user_mode(regs)) { __u8 opcode[6]; get_user(*((__u16 *) opcode), location); switch (opcode[0]) { @@ -596,10 +367,10 @@ static void data_exception(struct pt_regs *regs) do_trap(regs, signal, ILL_ILLOPN, "data exception"); } -static void space_switch_exception(struct pt_regs *regs) +void space_switch_exception(struct pt_regs *regs) { /* Set user psw back to home space mode. */ - if (regs->psw.mask & PSW_MASK_PSTATE) + if (user_mode(regs)) regs->psw.mask |= PSW_ASC_HOME; /* Send SIGILL. */ do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); @@ -614,42 +385,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs) panic("Corrupt kernel stack, can't continue."); } -/* init is done in lowcore.S and head.S */ - void __init trap_init(void) { - int i; - - for (i = 0; i < 128; i++) - pgm_check_table[i] = &default_trap_handler; - pgm_check_table[1] = &illegal_op; - pgm_check_table[2] = &privileged_op; - pgm_check_table[3] = &execute_exception; - pgm_check_table[4] = &do_protection_exception; - pgm_check_table[5] = &addressing_exception; - pgm_check_table[6] = &specification_exception; - pgm_check_table[7] = &data_exception; - pgm_check_table[8] = &overflow_exception; - pgm_check_table[9] = ÷_exception; - pgm_check_table[0x0A] = &overflow_exception; - pgm_check_table[0x0B] = ÷_exception; - pgm_check_table[0x0C] = &hfp_overflow_exception; - pgm_check_table[0x0D] = &hfp_underflow_exception; - pgm_check_table[0x0E] = &hfp_significance_exception; - pgm_check_table[0x0F] = &hfp_divide_exception; - pgm_check_table[0x10] = &do_dat_exception; - pgm_check_table[0x11] = &do_dat_exception; - pgm_check_table[0x12] = &translation_exception; - pgm_check_table[0x13] = &special_op_exception; -#ifdef CONFIG_64BIT - pgm_check_table[0x38] = &do_asce_exception; - pgm_check_table[0x39] = &do_dat_exception; - pgm_check_table[0x3A] = &do_dat_exception; - pgm_check_table[0x3B] = &do_dat_exception; -#endif /* CONFIG_64BIT */ - pgm_check_table[0x15] = &operand_exception; - pgm_check_table[0x1C] = &space_switch_exception; - pgm_check_table[0x1D] = &hfp_sqrt_exception; - /* Enable machine checks early. */ local_mcck_enable(); } diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index d73630b4fe1..61364909678 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -25,12 +25,12 @@ #include <linux/compat.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/vdso.h> +#include <asm/facility.h> #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) extern char vdso32_start, vdso32_end; @@ -63,7 +63,7 @@ static int __init vdso_setup(char *s) else if (strncmp(s, "off", 4) == 0) vdso_enabled = 0; else { - rc = strict_strtoul(s, 0, &val); + rc = kstrtoul(s, 0, &val); vdso_enabled = rc ? 0 : !!val; } return !rc; @@ -84,23 +84,16 @@ struct vdso_data *vdso_data = &vdso_data_store.data; */ static void vdso_init_data(struct vdso_data *vd) { - vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); + vd->ectg_available = test_facility(31); } #ifdef CONFIG_64BIT /* - * Setup per cpu vdso data page. - */ -static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd) -{ -} - -/* * Allocate/free per cpu vdso data. */ #define SEGMENT_ORDER 2 -int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) +int vdso_alloc_per_cpu(struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; @@ -108,7 +101,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) lowcore->vdso_per_cpu_data = __LC_PASTE; - if (user_mode == HOME_SPACE_MODE || !vdso_enabled) + if (!vdso_enabled) return 0; segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); @@ -119,11 +112,11 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE << SEGMENT_ORDER); - clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, + clear_table((unsigned long *) page_table, _PAGE_INVALID, 256*sizeof(unsigned long)); *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; - *(unsigned long *) page_table = _PAGE_RO + page_frame; + *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; psal = (u32 *) (page_table + 256*sizeof(unsigned long)); aste = psal + 32; @@ -132,14 +125,13 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) psal[i] = 0x80000000; lowcore->paste[4] = (u32)(addr_t) psal; - psal[0] = 0x20000000; + psal[0] = 0x02000000; psal[2] = (u32)(addr_t) aste; *(unsigned long *) (aste + 2) = segment_table + _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; aste[4] = (u32)(addr_t) psal; lowcore->vdso_per_cpu_data = page_frame; - vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame); return 0; out: @@ -149,12 +141,12 @@ out: return -ENOMEM; } -void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) +void vdso_free_per_cpu(struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; - if (user_mode == HOME_SPACE_MODE || !vdso_enabled) + if (!vdso_enabled) return; psal = (u32 *)(addr_t) lowcore->paste[4]; @@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) free_pages(segment_table, SEGMENT_ORDER); } -static void __vdso_init_cr5(void *dummy) +static void vdso_init_cr5(void) { unsigned long cr5; + if (!vdso_enabled) + return; cr5 = offsetof(struct _lowcore, paste); __ctl_load(cr5, 5, 5); } - -static void vdso_init_cr5(void) -{ - if (user_mode != HOME_SPACE_MODE && vdso_enabled) - on_each_cpu(__vdso_init_cr5, NULL, 1); -} #endif /* CONFIG_64BIT */ /* @@ -253,17 +241,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * on the "data" page of the vDSO or you'll stop getting kernel * updates and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code - * pages though - * - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later - * without matching up the same kernel and hardware config to see - * what PC values meant. + * pages though. */ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (rc) current->mm->context.vdso_base = 0; @@ -322,10 +304,8 @@ static int __init vdso_init(void) } vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); vdso64_pagelist[vdso64_pages] = NULL; -#ifndef CONFIG_SMP - if (vdso_alloc_per_cpu(0, &S390_lowcore)) + if (vdso_alloc_per_cpu(&S390_lowcore)) BUG(); -#endif vdso_init_cr5(); #endif /* CONFIG_64BIT */ @@ -335,7 +315,7 @@ static int __init vdso_init(void) return 0; } -arch_initcall(vdso_init); +early_initcall(vdso_init); int in_gate_area_no_mm(unsigned long addr) { diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore new file mode 100644 index 00000000000..e45fba9d0ce --- /dev/null +++ b/arch/s390/kernel/vdso32/.gitignore @@ -0,0 +1 @@ +vdso32.lds diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index b2224e0b974..65fc3979c2f 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -38,25 +38,21 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f ahi %r0,-1 -2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 3f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 3: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - al %r1,__VDSO_XTIME_NSEC+4(%r5) - brc 12,4f - ahi %r0,1 -4: l %r2,__VDSO_XTIME_SEC+4(%r5) - al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ + al %r0,__VDSO_WTOM_NSEC(%r5) al %r1,__VDSO_WTOM_NSEC+4(%r5) brc 12,5f ahi %r0,1 -5: al %r2,__VDSO_WTOM_SEC+4(%r5) +5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_WTOM_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b basr %r5,0 @@ -86,20 +82,21 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f ahi %r0,-1 -12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 13f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 13: alr %r0,%r2 - srdl %r0,12 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,14f ahi %r0,1 -14: l %r2,__VDSO_XTIME_SEC+4(%r5) +14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r2) /* >> tk->shift */ + l %r2,__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 11b basr %r5,0 diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index 2d3633175e3..fd621a950f7 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -35,15 +35,14 @@ __kernel_gettimeofday: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 -3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ +3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ st %r0,24(%r15) - l %r0,__VDSO_NTP_MULT(%r5) + l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f - a %r0,__VDSO_NTP_MULT(%r5) + a %r0,__VDSO_TK_MULT(%r5) 4: al %r0,24(%r15) - srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,5f @@ -51,6 +50,8 @@ __kernel_gettimeofday: 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b + l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srdl %r0,0(%r4) /* >> tk->shift */ l %r4,24(%r15) /* get tv_sec from stack */ basr %r5,0 6: ltr %r0,%r0 diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore new file mode 100644 index 00000000000..3fd18cf9fec --- /dev/null +++ b/arch/s390/kernel/vdso64/.gitignore @@ -0,0 +1 @@ +vdso64.lds diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 176e1f75f9a..34deba7c7ed 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -23,7 +23,9 @@ __kernel_clock_getres: je 0f cghi %r2,__CLOCK_MONOTONIC je 0f - cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ + cghi %r2,__CLOCK_THREAD_CPUTIME_ID + je 0f + cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ jne 2f larl %r5,_vdso_data icm %r0,15,__LC_ECTG_OK(%r5) diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index d46c95ed5f1..91940ed33a4 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -22,7 +22,9 @@ __kernel_clock_gettime: larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME je 4f - cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ + cghi %r2,__CLOCK_THREAD_CPUTIME_ID + je 9f + cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC jne 12f @@ -34,14 +36,13 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 0b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + lg %r0,__VDSO_WTOM_SEC(%r5) lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) - alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ - alg %r0,__VDSO_WTOM_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_WTOM_NSEC(%r5) + srlg %r1,%r1,0(%r2) /* >> tk->shift */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b larl %r5,13f @@ -62,12 +63,13 @@ __kernel_clock_gettime: tmll %r4,0x0001 /* pending update ? loop */ jnz 5b stck 48(%r15) /* Store TOD clock */ + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - lg %r0,__VDSO_XTIME_SEC(%r5) + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + srlg %r1,%r1,0(%r2) /* >> tk->shift */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 5b larl %r5,13f diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index 36ee674722e..d0860d1d0cc 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -31,12 +31,13 @@ __kernel_gettimeofday: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ - lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b + lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ + srlg %r1,%r1,0(%r5) /* >> tk->shift */ larl %r5,5f 2: clg %r1,0(%r5) jl 3f diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 21109c63eb1..35b13ed0af5 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #ifndef CONFIG_64BIT OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390) +OUTPUT_ARCH(s390:31-bit) ENTRY(startup) jiffies = jiffies_64 + 4; #else @@ -45,7 +45,7 @@ SECTIONS .dummy : { *(.dummy) } :data - RODATA + RO_DATA_SECTION(PAGE_SIZE) #ifdef CONFIG_SHARED_KERNEL . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ @@ -75,6 +75,10 @@ SECTIONS EXIT_TEXT } + .exit.data : { + EXIT_DATA + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index bb48977f546..8c34363d6f1 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -1,70 +1,83 @@ /* - * arch/s390/kernel/vtime.c * Virtual cpu timer based timer functions. * - * S390 version - * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2004, 2012 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> */ -#include <linux/module.h> +#include <linux/kernel_stat.h> +#include <linux/notifier.h> +#include <linux/kprobes.h> +#include <linux/export.h> #include <linux/kernel.h> -#include <linux/time.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/types.h> #include <linux/timex.h> -#include <linux/notifier.h> -#include <linux/kernel_stat.h> -#include <linux/rcupdate.h> -#include <linux/posix-timers.h> +#include <linux/types.h> +#include <linux/time.h> #include <linux/cpu.h> -#include <linux/kprobes.h> +#include <linux/smp.h> -#include <asm/timer.h> #include <asm/irq_regs.h> #include <asm/cputime.h> +#include <asm/vtimer.h> +#include <asm/vtime.h> #include <asm/irq.h> +#include "entry.h" -static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); +static void virt_timer_expire(void); DEFINE_PER_CPU(struct s390_idle_data, s390_idle); -static inline __u64 get_vtimer(void) +static LIST_HEAD(virt_timer_list); +static DEFINE_SPINLOCK(virt_timer_lock); +static atomic64_t virt_timer_current; +static atomic64_t virt_timer_elapsed; + +static inline u64 get_vtimer(void) { - __u64 timer; + u64 timer; - asm volatile("STPT %0" : "=m" (timer)); + asm volatile("stpt %0" : "=m" (timer)); return timer; } -static inline void set_vtimer(__u64 expires) +static inline void set_vtimer(u64 expires) { - __u64 timer; + u64 timer; - asm volatile (" STPT %0\n" /* Store current cpu timer value */ - " SPT %1" /* Set new value immediately afterwards */ - : "=m" (timer) : "m" (expires) ); + asm volatile( + " stpt %0\n" /* Store current cpu timer value */ + " spt %1" /* Set new value imm. afterwards */ + : "=m" (timer) : "m" (expires)); S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; S390_lowcore.last_update_timer = expires; } +static inline int virt_timer_forward(u64 elapsed) +{ + BUG_ON(!irqs_disabled()); + + if (list_empty(&virt_timer_list)) + return 0; + elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); + return elapsed >= atomic64_read(&virt_timer_current); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) +static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) { struct thread_info *ti = task_thread_info(tsk); - __u64 timer, clock, user, system, steal; + u64 timer, clock, user, system, steal; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; - asm volatile (" STPT %0\n" /* Store current cpu timer value */ - " STCK %1" /* Store current tod clock value */ - : "=m" (S390_lowcore.last_update_timer), - "=m" (S390_lowcore.last_update_clock) ); + asm volatile( + " stpt %0\n" /* Store current cpu timer value */ + " stck %1" /* Store current tod clock value */ + : "=m" (S390_lowcore.last_update_timer), + "=m" (S390_lowcore.last_update_clock)); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; @@ -83,9 +96,11 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.steal_timer = 0; account_steal_time(steal); } + + return virt_timer_forward(user + system); } -void account_vtime(struct task_struct *prev, struct task_struct *next) +void vtime_task_switch(struct task_struct *prev) { struct thread_info *ti; @@ -93,24 +108,32 @@ void account_vtime(struct task_struct *prev, struct task_struct *next) ti = task_thread_info(prev); ti->user_timer = S390_lowcore.user_timer; ti->system_timer = S390_lowcore.system_timer; - ti = task_thread_info(next); + ti = task_thread_info(current); S390_lowcore.user_timer = ti->user_timer; S390_lowcore.system_timer = ti->system_timer; } -void account_process_tick(struct task_struct *tsk, int user_tick) +/* + * In s390, accounting pending user time also implies + * accounting system time in order to correctly compute + * the stolen time accounting. + */ +void vtime_account_user(struct task_struct *tsk) { - do_account_vtime(tsk, HARDIRQ_OFFSET); + if (do_account_vtime(tsk, HARDIRQ_OFFSET)) + virt_timer_expire(); } /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -void account_system_vtime(struct task_struct *tsk) +void vtime_account_irq_enter(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - __u64 timer, system; + u64 timer, system; + + WARN_ON_ONCE(!irqs_disabled()); timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); @@ -120,156 +143,56 @@ void account_system_vtime(struct task_struct *tsk) S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; account_system_time(tsk, 0, system, system); + + virt_timer_forward(system); } -EXPORT_SYMBOL_GPL(account_system_vtime); +EXPORT_SYMBOL_GPL(vtime_account_irq_enter); -void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer) +void vtime_account_system(struct task_struct *tsk) +__attribute__((alias("vtime_account_irq_enter"))); +EXPORT_SYMBOL_GPL(vtime_account_system); + +void __kprobes vtime_stop_cpu(void) { struct s390_idle_data *idle = &__get_cpu_var(s390_idle); - struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); - __u64 idle_time, expires; + unsigned long long idle_time; + unsigned long psw_mask; - if (idle->idle_enter == 0ULL) - return; + trace_hardirqs_on(); - /* Account time spent with enabled wait psw loaded as idle time. */ - idle_time = int_clock - idle->idle_enter; - account_idle_time(idle_time); - S390_lowcore.steal_timer += - idle->idle_enter - S390_lowcore.last_update_clock; - S390_lowcore.last_update_clock = int_clock; - - /* Account system time spent going idle. */ - S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; - S390_lowcore.last_update_timer = enter_timer; - - /* Restart vtime CPU timer */ - if (vq->do_spt) { - /* Program old expire value but first save progress. */ - expires = vq->idle - enter_timer; - expires += get_vtimer(); - set_vtimer(expires); - } else { - /* Don't account the CPU timer delta while the cpu was idle. */ - vq->elapsed -= vq->idle - enter_timer; - } + /* Wait for external, I/O or machine check interrupt. */ + psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; + idle->nohz_delay = 0; + + /* Call the assembler magic in entry.S */ + psw_idle(idle, psw_mask); + /* Account time spent with enabled wait psw loaded as idle time. */ idle->sequence++; smp_wmb(); + idle_time = idle->clock_idle_exit - idle->clock_idle_enter; + idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->idle_time += idle_time; - idle->idle_enter = 0ULL; idle->idle_count++; + account_idle_time(idle_time); smp_wmb(); idle->sequence++; } -void __kprobes vtime_stop_cpu(void) -{ - struct s390_idle_data *idle = &__get_cpu_var(s390_idle); - struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); - psw_t psw; - - /* Wait for external, I/O or machine check interrupt. */ - psw.mask = psw_kernel_bits | PSW_MASK_WAIT | - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - - idle->nohz_delay = 0; - - /* Check if the CPU timer needs to be reprogrammed. */ - if (vq->do_spt) { - __u64 vmax = VTIMER_MAX_SLICE; - /* - * The inline assembly is equivalent to - * vq->idle = get_cpu_timer(); - * set_cpu_timer(VTIMER_MAX_SLICE); - * idle->idle_enter = get_clock(); - * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | - * PSW_MASK_DAT | PSW_MASK_IO | - * PSW_MASK_EXT | PSW_MASK_MCHECK); - * The difference is that the inline assembly makes sure that - * the last three instruction are stpt, stck and lpsw in that - * order. This is done to increase the precision. - */ - asm volatile( -#ifndef CONFIG_64BIT - " basr 1,0\n" - "0: ahi 1,1f-0b\n" - " st 1,4(%2)\n" -#else /* CONFIG_64BIT */ - " larl 1,1f\n" - " stg 1,8(%2)\n" -#endif /* CONFIG_64BIT */ - " stpt 0(%4)\n" - " spt 0(%5)\n" - " stck 0(%3)\n" -#ifndef CONFIG_64BIT - " lpsw 0(%2)\n" -#else /* CONFIG_64BIT */ - " lpswe 0(%2)\n" -#endif /* CONFIG_64BIT */ - "1:" - : "=m" (idle->idle_enter), "=m" (vq->idle) - : "a" (&psw), "a" (&idle->idle_enter), - "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) - : "memory", "cc", "1"); - } else { - /* - * The inline assembly is equivalent to - * vq->idle = get_cpu_timer(); - * idle->idle_enter = get_clock(); - * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | - * PSW_MASK_DAT | PSW_MASK_IO | - * PSW_MASK_EXT | PSW_MASK_MCHECK); - * The difference is that the inline assembly makes sure that - * the last three instruction are stpt, stck and lpsw in that - * order. This is done to increase the precision. - */ - asm volatile( -#ifndef CONFIG_64BIT - " basr 1,0\n" - "0: ahi 1,1f-0b\n" - " st 1,4(%2)\n" -#else /* CONFIG_64BIT */ - " larl 1,1f\n" - " stg 1,8(%2)\n" -#endif /* CONFIG_64BIT */ - " stpt 0(%4)\n" - " stck 0(%3)\n" -#ifndef CONFIG_64BIT - " lpsw 0(%2)\n" -#else /* CONFIG_64BIT */ - " lpswe 0(%2)\n" -#endif /* CONFIG_64BIT */ - "1:" - : "=m" (idle->idle_enter), "=m" (vq->idle) - : "a" (&psw), "a" (&idle->idle_enter), - "a" (&vq->idle), "m" (psw) - : "memory", "cc", "1"); - } -} - cputime64_t s390_get_idle_time(int cpu) { - struct s390_idle_data *idle; - unsigned long long now, idle_time, idle_enter; + struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); + unsigned long long now, idle_enter, idle_exit; unsigned int sequence; - idle = &per_cpu(s390_idle, cpu); - - now = get_clock(); -repeat: - sequence = idle->sequence; - smp_rmb(); - if (sequence & 1) - goto repeat; - idle_time = 0; - idle_enter = idle->idle_enter; - if (idle_enter != 0ULL && idle_enter < now) - idle_time = now - idle_enter; - smp_rmb(); - if (idle->sequence != sequence) - goto repeat; - return idle_time; + do { + now = get_tod_clock(); + sequence = ACCESS_ONCE(idle->sequence); + idle_enter = ACCESS_ONCE(idle->clock_idle_enter); + idle_exit = ACCESS_ONCE(idle->clock_idle_exit); + } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); + return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; } /* @@ -278,11 +201,11 @@ repeat: */ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) { - struct vtimer_list *event; + struct vtimer_list *tmp; - list_for_each_entry(event, head, entry) { - if (event->expires > timer->expires) { - list_add_tail(&timer->entry, &event->entry); + list_for_each_entry(tmp, head, entry) { + if (tmp->expires > timer->expires) { + list_add_tail(&timer->entry, &tmp->entry); return; } } @@ -290,84 +213,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) } /* - * Do the callback functions of expired vtimer events. - * Called from within the interrupt handler. + * Handler for expired virtual CPU timer. */ -static void do_callbacks(struct list_head *cb_list) +static void virt_timer_expire(void) { - struct vtimer_queue *vq; - struct vtimer_list *event, *tmp; - - if (list_empty(cb_list)) - return; - - vq = &__get_cpu_var(virt_cpu_timer); - - list_for_each_entry_safe(event, tmp, cb_list, entry) { - list_del_init(&event->entry); - (event->function)(event->data); - if (event->interval) { - /* Recharge interval timer */ - event->expires = event->interval + vq->elapsed; - spin_lock(&vq->lock); - list_add_sorted(event, &vq->list); - spin_unlock(&vq->lock); - } - } -} - -/* - * Handler for the virtual CPU timer. - */ -static void do_cpu_timer_interrupt(unsigned int ext_int_code, - unsigned int param32, unsigned long param64) -{ - struct vtimer_queue *vq; - struct vtimer_list *event, *tmp; - struct list_head cb_list; /* the callback queue */ - __u64 elapsed, next; - - kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; - INIT_LIST_HEAD(&cb_list); - vq = &__get_cpu_var(virt_cpu_timer); - - /* walk timer list, fire all expired events */ - spin_lock(&vq->lock); - - elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); - BUG_ON((s64) elapsed < 0); - vq->elapsed = 0; - list_for_each_entry_safe(event, tmp, &vq->list, entry) { - if (event->expires < elapsed) + struct vtimer_list *timer, *tmp; + unsigned long elapsed; + LIST_HEAD(cb_list); + + /* walk timer list, fire all expired timers */ + spin_lock(&virt_timer_lock); + elapsed = atomic64_read(&virt_timer_elapsed); + list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { + if (timer->expires < elapsed) /* move expired timer to the callback queue */ - list_move_tail(&event->entry, &cb_list); + list_move_tail(&timer->entry, &cb_list); else - event->expires -= elapsed; + timer->expires -= elapsed; + } + if (!list_empty(&virt_timer_list)) { + timer = list_first_entry(&virt_timer_list, + struct vtimer_list, entry); + atomic64_set(&virt_timer_current, timer->expires); + } + atomic64_sub(elapsed, &virt_timer_elapsed); + spin_unlock(&virt_timer_lock); + + /* Do callbacks and recharge periodic timers */ + list_for_each_entry_safe(timer, tmp, &cb_list, entry) { + list_del_init(&timer->entry); + timer->function(timer->data); + if (timer->interval) { + /* Recharge interval timer */ + timer->expires = timer->interval + + atomic64_read(&virt_timer_elapsed); + spin_lock(&virt_timer_lock); + list_add_sorted(timer, &virt_timer_list); + spin_unlock(&virt_timer_lock); + } } - spin_unlock(&vq->lock); - - vq->do_spt = list_empty(&cb_list); - do_callbacks(&cb_list); - - /* next event is first in list */ - next = VTIMER_MAX_SLICE; - spin_lock(&vq->lock); - if (!list_empty(&vq->list)) { - event = list_first_entry(&vq->list, struct vtimer_list, entry); - next = event->expires; - } else - vq->do_spt = 0; - spin_unlock(&vq->lock); - /* - * To improve precision add the time spent by the - * interrupt handler to the elapsed time. - * Note: CPU timer counts down and we got an interrupt, - * the current content is negative - */ - elapsed = S390_lowcore.async_enter_timer - get_vtimer(); - set_vtimer(next - elapsed); - vq->timer = next - elapsed; - vq->elapsed = elapsed; } void init_virt_timer(struct vtimer_list *timer) @@ -379,179 +263,108 @@ EXPORT_SYMBOL(init_virt_timer); static inline int vtimer_pending(struct vtimer_list *timer) { - return (!list_empty(&timer->entry)); + return !list_empty(&timer->entry); } -/* - * this function should only run on the specified CPU - */ static void internal_add_vtimer(struct vtimer_list *timer) { - struct vtimer_queue *vq; - unsigned long flags; - __u64 left, expires; - - vq = &per_cpu(virt_cpu_timer, timer->cpu); - spin_lock_irqsave(&vq->lock, flags); - - BUG_ON(timer->cpu != smp_processor_id()); - - if (list_empty(&vq->list)) { - /* First timer on this cpu, just program it. */ - list_add(&timer->entry, &vq->list); - set_vtimer(timer->expires); - vq->timer = timer->expires; - vq->elapsed = 0; + if (list_empty(&virt_timer_list)) { + /* First timer, just program it. */ + atomic64_set(&virt_timer_current, timer->expires); + atomic64_set(&virt_timer_elapsed, 0); + list_add(&timer->entry, &virt_timer_list); } else { - /* Check progress of old timers. */ - expires = timer->expires; - left = get_vtimer(); - if (likely((s64) expires < (s64) left)) { + /* Update timer against current base. */ + timer->expires += atomic64_read(&virt_timer_elapsed); + if (likely((s64) timer->expires < + (s64) atomic64_read(&virt_timer_current))) /* The new timer expires before the current timer. */ - set_vtimer(expires); - vq->elapsed += vq->timer - left; - vq->timer = expires; - } else { - vq->elapsed += vq->timer - left; - vq->timer = left; - } - /* Insert new timer into per cpu list. */ - timer->expires += vq->elapsed; - list_add_sorted(timer, &vq->list); + atomic64_set(&virt_timer_current, timer->expires); + /* Insert new timer into the list. */ + list_add_sorted(timer, &virt_timer_list); } - - spin_unlock_irqrestore(&vq->lock, flags); - /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ - put_cpu(); } -static inline void prepare_vtimer(struct vtimer_list *timer) +static void __add_vtimer(struct vtimer_list *timer, int periodic) { - BUG_ON(!timer->function); - BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); - BUG_ON(vtimer_pending(timer)); - timer->cpu = get_cpu(); + unsigned long flags; + + timer->interval = periodic ? timer->expires : 0; + spin_lock_irqsave(&virt_timer_lock, flags); + internal_add_vtimer(timer); + spin_unlock_irqrestore(&virt_timer_lock, flags); } /* * add_virt_timer - add an oneshot virtual CPU timer */ -void add_virt_timer(void *new) +void add_virt_timer(struct vtimer_list *timer) { - struct vtimer_list *timer; - - timer = (struct vtimer_list *)new; - prepare_vtimer(timer); - timer->interval = 0; - internal_add_vtimer(timer); + __add_vtimer(timer, 0); } EXPORT_SYMBOL(add_virt_timer); /* * add_virt_timer_int - add an interval virtual CPU timer */ -void add_virt_timer_periodic(void *new) +void add_virt_timer_periodic(struct vtimer_list *timer) { - struct vtimer_list *timer; - - timer = (struct vtimer_list *)new; - prepare_vtimer(timer); - timer->interval = timer->expires; - internal_add_vtimer(timer); + __add_vtimer(timer, 1); } EXPORT_SYMBOL(add_virt_timer_periodic); -static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) +static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) { - struct vtimer_queue *vq; unsigned long flags; - int cpu; + int rc; BUG_ON(!timer->function); - BUG_ON(!expires || expires > VTIMER_MAX_SLICE); if (timer->expires == expires && vtimer_pending(timer)) return 1; - - cpu = get_cpu(); - vq = &per_cpu(virt_cpu_timer, cpu); - - /* disable interrupts before test if timer is pending */ - spin_lock_irqsave(&vq->lock, flags); - - /* if timer isn't pending add it on the current CPU */ - if (!vtimer_pending(timer)) { - spin_unlock_irqrestore(&vq->lock, flags); - - if (periodic) - timer->interval = expires; - else - timer->interval = 0; - timer->expires = expires; - timer->cpu = cpu; - internal_add_vtimer(timer); - return 0; - } - - /* check if we run on the right CPU */ - BUG_ON(timer->cpu != cpu); - - list_del_init(&timer->entry); + spin_lock_irqsave(&virt_timer_lock, flags); + rc = vtimer_pending(timer); + if (rc) + list_del_init(&timer->entry); + timer->interval = periodic ? expires : 0; timer->expires = expires; - if (periodic) - timer->interval = expires; - - /* the timer can't expire anymore so we can release the lock */ - spin_unlock_irqrestore(&vq->lock, flags); internal_add_vtimer(timer); - return 1; + spin_unlock_irqrestore(&virt_timer_lock, flags); + return rc; } /* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on. - * * returns whether it has modified a pending timer (1) or not (0) */ -int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +int mod_virt_timer(struct vtimer_list *timer, u64 expires) { return __mod_vtimer(timer, expires, 0); } EXPORT_SYMBOL(mod_virt_timer); /* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on. - * * returns whether it has modified a pending timer (1) or not (0) */ -int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) +int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) { return __mod_vtimer(timer, expires, 1); } EXPORT_SYMBOL(mod_virt_timer_periodic); /* - * delete a virtual timer + * Delete a virtual timer. * * returns whether the deleted timer was pending (1) or not (0) */ int del_virt_timer(struct vtimer_list *timer) { unsigned long flags; - struct vtimer_queue *vq; - /* check if timer is pending */ if (!vtimer_pending(timer)) return 0; - - vq = &per_cpu(virt_cpu_timer, timer->cpu); - spin_lock_irqsave(&vq->lock, flags); - - /* we don't interrupt a running timer, just let it expire! */ + spin_lock_irqsave(&virt_timer_lock, flags); list_del_init(&timer->entry); - - spin_unlock_irqrestore(&vq->lock, flags); + spin_unlock_irqrestore(&virt_timer_lock, flags); return 1; } EXPORT_SYMBOL(del_virt_timer); @@ -561,27 +374,19 @@ EXPORT_SYMBOL(del_virt_timer); */ void init_cpu_vtimer(void) { - struct vtimer_queue *vq; - - /* initialize per cpu vtimer structure */ - vq = &__get_cpu_var(virt_cpu_timer); - INIT_LIST_HEAD(&vq->list); - spin_lock_init(&vq->lock); - - /* enable cpu timer interrupts */ - __ctl_set_bit(0,10); + /* set initial cpu timer */ + set_vtimer(VTIMER_MAX_SLICE); } -static int __cpuinit s390_nohz_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int s390_nohz_notify(struct notifier_block *self, unsigned long action, + void *hcpu) { struct s390_idle_data *idle; long cpu = (long) hcpu; idle = &per_cpu(s390_idle, cpu); - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DYING: - case CPU_DYING_FROZEN: idle->nohz_delay = 0; default: break; @@ -591,12 +396,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self, void __init vtime_init(void) { - /* request the cpu timer external interrupt */ - if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) - panic("Couldn't request external interrupt 0x1005"); - /* Enable cpu timer interrupts on the boot cpu. */ init_cpu_vtimer(); cpu_notifier(s390_nohz_notify, 0); } - |
