diff options
Diffstat (limited to 'arch/arm64')
47 files changed, 1076 insertions, 589 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f9ccff91591..fd70a68387e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1,15 +1,18 @@ config ARM64 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_COMPAT_IPC_PARSE_VERSION + select ARCH_WANT_FRAME_POINTERS + select ARM_AMBA + select ARM_ARCH_TIMER + select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_KERNEL_EXECVE - select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND @@ -21,7 +24,6 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_HARDIRQS select HAVE_HW_BREAKPOINT if PERF_EVENTS - select HAVE_IRQ_WORK select HAVE_MEMBLOCK select HAVE_PERF_EVENTS select IRQ_DOMAIN @@ -33,7 +35,6 @@ config ARM64 select RTC_LIB select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE - select CLONE_BACKWARDS help ARM 64-bit (AArch64) Linux support. @@ -92,6 +93,9 @@ config SWIOTLB config IOMMU_HELPER def_bool SWIOTLB +config GENERIC_GPIO + bool + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -202,6 +206,8 @@ config COMPAT depends on !ARM64_64K_PAGES select COMPAT_BINFMT_ELF select HAVE_UID16 + select OLD_SIGSUSPEND3 + select COMPAT_OLD_SIGACTION help This option enables support for a 32-bit EL0 running under a 64-bit kernel at EL1. AArch32-specific components such as system calls, diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index d7553f2bda6..51493430f14 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T output. +config EARLY_PRINTK + bool "Early printk support" + default y + help + Say Y here if you want to have an early console using the + earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It + is assumed that the early console device has been initialised + by the boot loader prior to starting the Linux kernel. + +config PID_IN_CONTEXTIDR + bool "Write the current PID to the CONTEXTIDR register" + help + Enabling this option causes the kernel to write the current PID to + the CONTEXTIDR register, at the expense of some additional + instructions during context switch. Say Y here only if you are + planning to use hardware trace tools with this kernel. + endmenu diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 801e2d7fcbc..32ac0aef006 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -1,4 +1,5 @@ targets += dtbs +targets += $(dtb-y) dtbs: $(addprefix $(obj)/, $(dtb-y)) diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 14a9d5a2b85..e5fe4f99fe1 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -19,6 +19,7 @@ generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mman.h @@ -48,3 +49,4 @@ generic-y += trace_clock.h generic-y += types.h generic-y += unaligned.h generic-y += user.h +generic-y += xor.h diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h new file mode 100644 index 00000000000..91e2a6a6fcd --- /dev/null +++ b/arch/arm64/include/asm/arch_timer.h @@ -0,0 +1,133 @@ +/* + * arch/arm64/include/asm/arch_timer.h + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_TIMER_H +#define __ASM_ARCH_TIMER_H + +#include <asm/barrier.h> + +#include <linux/init.h> +#include <linux/types.h> + +#include <clocksource/arm_arch_timer.h> + +static inline void arch_timer_reg_write(int access, int reg, u32 val) +{ + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntv_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + isb(); +} + +static inline u32 arch_timer_reg_read(int access, int reg) +{ + u32 val; + + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val; + asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); + return val; +} + +static inline void __cpuinit arch_counter_set_user_access(void) +{ + u32 cntkctl; + + /* Disable user access to the timers and the physical counter. */ + asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); + cntkctl &= ~((3 << 8) | (1 << 0)); + + /* Enable user access to the virtual counter and frequency. */ + cntkctl |= (1 << 1); + asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); +} + +static inline u64 arch_counter_get_cntpct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); + + return cval; +} + +static inline u64 arch_counter_get_cntvct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); + + return cval; +} + +#endif diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h deleted file mode 100644 index df2aeb82f74..00000000000 --- a/arch/arm64/include/asm/arm_generic.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * arch/arm64/include/asm/arm_generic.h - * - * Copyright (C) 2012 ARM Ltd. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_ARM_GENERIC_H -#define __ASM_ARM_GENERIC_H - -#include <linux/clocksource.h> - -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IMASK (1 << 1) -#define ARCH_TIMER_CTRL_ISTATUS (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_FREQ 1 -#define ARCH_TIMER_REG_TVAL 2 - -static inline void arch_timer_reg_write(int reg, u32 val) -{ - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); - break; - default: - BUILD_BUG(); - } - - isb(); -} - -static inline u32 arch_timer_reg_read(int reg) -{ - u32 val; - - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); - break; - case ARCH_TIMER_REG_FREQ: - asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); - break; - default: - BUILD_BUG(); - } - - return val; -} - -static inline void __cpuinit arch_counter_enable_user_access(void) -{ - u32 cntkctl; - - /* Disable user access to the timers and the physical counter. */ - asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); - cntkctl &= ~((3 << 8) | (1 << 0)); - - /* Enable user access to the virtual counter and frequency. */ - cntkctl |= (1 << 1); - asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); -} - -static inline cycle_t arch_counter_get_cntpct(void) -{ - cycle_t cval; - - asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); - - return cval; -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ - cycle_t cval; - - asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); - - return cval; -} - -#endif diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 407717ba060..83636446857 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) int result; asm volatile("// atomic_add\n" -"1: ldxr %w0, [%3]\n" -" add %w0, %w0, %w4\n" -" stxr %w1, %w0, [%3]\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, [%3]\n" -" add %w0, %w0, %w4\n" -" stlxr %w1, %w0, [%3]\n" +"1: ldaxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; asm volatile("// atomic_sub\n" -"1: ldxr %w0, [%3]\n" -" sub %w0, %w0, %w4\n" -" stxr %w1, %w0, [%3]\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, [%3]\n" -" sub %w0, %w0, %w4\n" -" stlxr %w1, %w0, [%3]\n" +"1: ldaxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) int oldval; asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, [%3]\n" -" cmp %w1, %w4\n" +"1: ldaxr %w1, %2\n" +" cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w5, [%3]\n" +" stlxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" - : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); return oldval; } @@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; asm volatile("// atomic_clear_mask\n" -"1: ldxr %0, [%3]\n" -" bic %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" bic %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) - : "r" (addr), "Ir" (mask) + : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) + : "Ir" (mask) : "cc"); } @@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add\n" -"1: ldxr %0, [%3]\n" -" add %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" add %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, [%3]\n" -" add %0, %0, %4\n" -" stlxr %w1, %0, [%3]\n" +"1: ldaxr %0, %2\n" +" add %0, %0, %3\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub\n" -"1: ldxr %0, [%3]\n" -" sub %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" sub %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, [%3]\n" -" sub %0, %0, %4\n" -" stlxr %w1, %0, [%3]\n" +"1: ldaxr %0, %2\n" +" sub %0, %0, %3\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) unsigned long res; asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, [%3]\n" -" cmp %1, %4\n" +"1: ldaxr %1, %2\n" +" cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %5, [%3]\n" +" stlxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" - : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); + : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); return oldval; } @@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, [%3]\n" +"1: ldaxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" -" stlxr %w1, %0, [%3]\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" "2:" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : + : "cc", "memory"); return result; } diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9..968b5cbfc26 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, [%3]\n" - " stlxrb %w1, %w2, [%3]\n" + "1: ldaxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, [%3]\n" - " stlxrh %w1, %w2, [%3]\n" + "1: ldaxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, [%3]\n" - " stlxr %w1, %w2, [%3]\n" + "1: ldaxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, [%3]\n" - " stlxr %w1, %2, [%3]\n" + "1: ldaxr %0, %2\n" + " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "cc", "memory"); break; default: BUILD_BUG(); @@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 1: do { asm volatile("// __cmpxchg1\n" - " ldxrb %w1, [%2]\n" + " ldxrb %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxrb %w0, %w4, [%2]\n" + " stxrb %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; @@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 2: do { asm volatile("// __cmpxchg2\n" - " ldxrh %w1, [%2]\n" + " ldxrh %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxrh %w0, %w4, [%2]\n" + " stxrh %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); + : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); } while (res); break; case 4: do { asm volatile("// __cmpxchg4\n" - " ldxr %w1, [%2]\n" + " ldxr %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxr %w0, %w4, [%2]\n" + " stxr %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; @@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 8: do { asm volatile("// __cmpxchg8\n" - " ldxr %1, [%2]\n" + " ldxr %1, %2\n" " mov %w0, #0\n" " cmp %1, %3\n" " b.ne 1f\n" - " stxr %w0, %4, [%2]\n" + " stxr %w0, %4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 37e610dc084..618b450e5a1 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,6 +23,7 @@ */ #include <linux/types.h> #include <linux/sched.h> +#include <linux/ptrace.h> #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "armv8l\0\0" @@ -209,10 +210,11 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#define compat_user_stack_pointer() (current_pt_regs()->compat_sp) + static inline void __user *arch_compat_alloc_user_space(long len) { - struct pt_regs *regs = task_pt_regs(current); - return (void __user *)regs->compat_sp - len; + return (void __user *)compat_user_stack_pointer() - len; } struct compat_ipc64_perm { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 538f4b44db5..99477689419 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -50,6 +50,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) { struct dma_map_ops *ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dev_addr); return ops->mapping_error(dev, dev_addr); } diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 07fea290d7c..fe32c0e4ac0 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -26,7 +26,10 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) +#define ELF_CORE_COPY_REGS(dest, regs) \ + *(struct user_pt_regs *)&(dest) = (regs)->user_regs; + typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fpsimd_state elf_fpregset_t; diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 3468ae8439f..c582fa31636 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -39,7 +39,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc") + : "cc", "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index d2f05a60827..57f12c991de 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr); #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define ARCH_HAS_IOREMAP_WC #include <asm-generic/iomap.h> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1cac16a001c..381f556b664 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -43,6 +43,7 @@ #define PAGE_OFFSET UL(0xffffffc000000000) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) +#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) #define VA_BITS (39) #define TASK_SIZE_64 (UL(1) << VA_BITS) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index d4f7fd5b9e3..2494fc01896 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -26,5 +26,6 @@ typedef struct { extern void paging_init(void); extern void setup_mm_for_reboot(void); +extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f68465dee02..e2bc385adb6 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +#ifdef CONFIG_PID_IN_CONTEXTIDR +static inline void contextidr_thread_switch(struct task_struct *next) +{ + asm( + " msr contextidr_el1, %0\n" + " isb" + : + : "r" (task_pid_nr(next))); +} +#else +static inline void contextidr_thread_switch(struct task_struct *next) +{ +} +#endif + /* * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. */ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index a6fffd511c5..d26d1d53c0d 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -17,6 +17,11 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H -/* It's quiet around here... */ +#ifdef CONFIG_HW_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 64b13394950..e333a243bfc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -24,7 +24,8 @@ /* * Software defined PTE bits definition. */ -#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */ +#define PTE_VALID (_AT(pteval_t, 1) << 0) +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) @@ -60,9 +61,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val); extern pgprot_t pgprot_default; -#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) -#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -72,7 +76,7 @@ extern pgprot_t pgprot_default; #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) -#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -125,16 +129,15 @@ extern struct page *empty_zero_page; /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & PTE_VALID) +#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) -#define pte_present_exec_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \ - (PTE_VALID | PTE_USER)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -157,10 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_present_exec_user(pte)) - __sync_icache_dcache(pte, addr); - if (!pte_dirty(pte)) - pte = pte_wrprotect(pte); + if (pte_valid_user(pte)) { + if (pte_exec(pte)) + __sync_icache_dcache(pte, addr); + if (!pte_dirty(pte)) + pte = pte_wrprotect(pte); + } + set_pte(ptep, pte); } @@ -170,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - #define __HAVE_ARCH_PTE_SPECIAL /* @@ -264,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY; + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | + PTE_PROT_NONE | PTE_VALID; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h new file mode 100644 index 00000000000..0604237ecd9 --- /dev/null +++ b/arch/arm64/include/asm/psci.h @@ -0,0 +1,38 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 ARM Limited + */ + +#ifndef __ASM_PSCI_H +#define __ASM_PSCI_H + +#define PSCI_POWER_STATE_TYPE_STANDBY 0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 + +struct psci_power_state { + u16 id; + u8 type; + u8 affinity_level; +}; + +struct psci_operations { + int (*cpu_suspend)(struct psci_power_state state, + unsigned long entry_point); + int (*cpu_off)(struct psci_power_state state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); +}; + +extern struct psci_operations psci_ops; + +int psci_init(void); + +#endif /* __ASM_PSCI_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 4ce845f8ee1..41a71ee4c3d 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -42,6 +42,16 @@ #define COMPAT_PSR_MODE_UND 0x0000001b #define COMPAT_PSR_MODE_SYS 0x0000001f #define COMPAT_PSR_T_BIT 0x00000020 +#define COMPAT_PSR_F_BIT 0x00000040 +#define COMPAT_PSR_I_BIT 0x00000080 +#define COMPAT_PSR_A_BIT 0x00000100 +#define COMPAT_PSR_E_BIT 0x00000200 +#define COMPAT_PSR_J_BIT 0x01000000 +#define COMPAT_PSR_Q_BIT 0x08000000 +#define COMPAT_PSR_V_BIT 0x10000000 +#define COMPAT_PSR_C_BIT 0x20000000 +#define COMPAT_PSR_Z_BIT 0x40000000 +#define COMPAT_PSR_N_BIT 0x80000000 #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 7e34295f78e..4b8023c5d14 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +struct device_node; + +struct smp_enable_ops { + const char *name; + int (*init_cpu)(struct device_node *, int); + int (*prepare_cpu)(int); +}; + +extern const struct smp_enable_ops smp_spin_table_ops; +extern const struct smp_enable_ops smp_psci_ops; + #endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 41112fe2f8b..7065e920149 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%1]\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "memory"); + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) unsigned int tmp; asm volatile( - " ldaxr %w0, [%1]\n" + " ldaxr %w0, %1\n" " cbnz %w0, 1f\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" "1:\n" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "memory"); + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); return !tmp; } @@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { asm volatile( - " stlr %w1, [%0]\n" - : : "r" (&lock->lock), "r" (0) : "memory"); + " stlr %w1, %0\n" + : "=Q" (lock->lock) : "r" (0) : "memory"); } /* @@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%1]\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "memory"); + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) unsigned int tmp; asm volatile( - " ldaxr %w0, [%1]\n" + " ldaxr %w0, %1\n" " cbnz %w0, 1f\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" "1:\n" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "memory"); + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); return !tmp; } @@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile( - " stlr %w1, [%0]\n" - : : "r" (&rw->lock), "r" (0) : "memory"); + " stlr %w1, %0\n" + : "=Q" (rw->lock) : "r" (0) : "memory"); } /* write_can_lock - would write_trylock() succeed? */ @@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%2]\n" + "2: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1b\n" - " stxr %w1, %w0, [%2]\n" + " stxr %w1, %w0, %2\n" " cbnz %w1, 2b\n" - : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) unsigned int tmp, tmp2; asm volatile( - "1: ldxr %w0, [%2]\n" + "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" - " stlxr %w1, %w0, [%2]\n" + " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b\n" - : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) unsigned int tmp, tmp2 = 1; asm volatile( - " ldaxr %w0, [%2]\n" + " ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1f\n" - " stxr %w1, %w0, [%2]\n" + " stxr %w1, %w0, %2\n" "1:\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h index 20d63b29066..48fe7c600e9 100644 --- a/arch/arm64/include/asm/syscalls.h +++ b/arch/arm64/include/asm/syscalls.h @@ -24,8 +24,6 @@ * System call wrappers implemented in kernel/entry.S. */ asmlinkage long sys_rt_sigreturn_wrapper(void); -asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss, - stack_t __user *uoss); #include <asm-generic/syscalls.h> diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index d69aeea6da1..82ce217e94c 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -22,11 +22,9 @@ #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_COMPAT_SYS_SENDFILE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 58432625fdb..12f22492df4 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -40,7 +40,7 @@ __SYSCALL(15, sys_chmod) __SYSCALL(16, sys_lchown16) __SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */ __SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */ -__SYSCALL(19, compat_sys_lseek_wrapper) +__SYSCALL(19, compat_sys_lseek) __SYSCALL(20, sys_getpid) __SYSCALL(21, compat_sys_mount) __SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */ @@ -93,7 +93,7 @@ __SYSCALL(68, sys_ni_syscall) /* 68 was sys_sgetmask */ __SYSCALL(69, sys_ni_syscall) /* 69 was sys_ssetmask */ __SYSCALL(70, sys_setreuid16) __SYSCALL(71, sys_setregid16) -__SYSCALL(72, compat_sys_sigsuspend) +__SYSCALL(72, sys_sigsuspend) __SYSCALL(73, compat_sys_sigpending) __SYSCALL(74, sys_sethostname) __SYSCALL(75, compat_sys_setrlimit) @@ -113,8 +113,8 @@ __SYSCALL(88, sys_reboot) __SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */ __SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */ __SYSCALL(91, sys_munmap) -__SYSCALL(92, sys_truncate) -__SYSCALL(93, sys_ftruncate) +__SYSCALL(92, compat_sys_truncate) +__SYSCALL(93, compat_sys_ftruncate) __SYSCALL(94, sys_fchmod) __SYSCALL(95, sys_fchown16) __SYSCALL(96, sys_getpriority) @@ -207,7 +207,7 @@ __SYSCALL(182, sys_chown16) __SYSCALL(183, sys_getcwd) __SYSCALL(184, sys_capget) __SYSCALL(185, sys_capset) -__SYSCALL(186, compat_sys_sigaltstack_wrapper) +__SYSCALL(186, compat_sys_sigaltstack) __SYSCALL(187, compat_sys_sendfile) __SYSCALL(188, sys_ni_syscall) /* 188 reserved */ __SYSCALL(189, sys_ni_syscall) /* 189 reserved */ @@ -395,8 +395,13 @@ __SYSCALL(370, sys_name_to_handle_at) __SYSCALL(371, compat_sys_open_by_handle_at) __SYSCALL(372, compat_sys_clock_adjtime) __SYSCALL(373, sys_syncfs) +__SYSCALL(374, compat_sys_sendmmsg) +__SYSCALL(375, sys_setns) +__SYSCALL(376, compat_sys_process_vm_readv) +__SYSCALL(377, compat_sys_process_vm_writev) +__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ -#define __NR_compat_syscalls 374 +#define __NR_compat_syscalls 379 /* * Compat syscall numbers used by the AArch64 kernel. diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index ca5b65f75c7..e4b78bdca19 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -1,11 +1,14 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generic-y += kvm_para.h + header-y += auxvec.h header-y += bitsperlong.h header-y += byteorder.h header-y += fcntl.h header-y += hwcap.h +header-y += kvm_para.h header-y += param.h header-y += ptrace.h header-y += setup.h diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 74239c31e25..7b4b564961d 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o + hyp-stub.o psci.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o -arm64-obj-$(CONFIG_SMP) += smp.o +arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o +arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c new file mode 100644 index 00000000000..7e320a2edb9 --- /dev/null +++ b/arch/arm64/kernel/early_printk.c @@ -0,0 +1,118 @@ +/* + * Earlyprintk support. + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/io.h> + +#include <linux/amba/serial.h> + +static void __iomem *early_base; +static void (*printch)(char ch); + +/* + * PL011 single character TX. + */ +static void pl011_printch(char ch) +{ + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF) + ; + writeb_relaxed(ch, early_base + UART01x_DR); + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY) + ; +} + +struct earlycon_match { + const char *name; + void (*printch)(char ch); +}; + +static const struct earlycon_match earlycon_match[] __initconst = { + { .name = "pl011", .printch = pl011_printch, }, + {} +}; + +static void early_write(struct console *con, const char *s, unsigned n) +{ + while (n-- > 0) { + if (*s == '\n') + printch('\r'); + printch(*s); + s++; + } +} + +static struct console early_console = { + .name = "earlycon", + .write = early_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, +}; + +/* + * Parse earlyprintk=... parameter in the format: + * + * <name>[,<addr>][,<options>] + * + * and register the early console. It is assumed that the UART has been + * initialised by the bootloader already. + */ +static int __init setup_early_printk(char *buf) +{ + const struct earlycon_match *match = earlycon_match; + phys_addr_t paddr = 0; + + if (!buf) { + pr_warning("No earlyprintk arguments passed.\n"); + return 0; + } + + while (match->name) { + size_t len = strlen(match->name); + if (!strncmp(buf, match->name, len)) { + buf += len; + break; + } + match++; + } + if (!match->name) { + pr_warning("Unknown earlyprintk arguments: %s\n", buf); + return 0; + } + + /* I/O address */ + if (!strncmp(buf, ",0x", 3)) { + char *e; + paddr = simple_strtoul(buf + 1, &e, 16); + buf = e; + } + /* no options parsing yet */ + + if (paddr) + early_base = early_io_map(paddr, EARLYCON_IOBASE); + + printch = match->printch; + register_console(&early_console); + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9c94f404ded..514d6098dbe 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -677,10 +677,5 @@ ENTRY(sys_rt_sigreturn_wrapper) b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) -ENTRY(sys_sigaltstack_wrapper) - ldr x2, [sp, #S_SP] - b sys_sigaltstack -ENDPROC(sys_sigaltstack_wrapper) - ENTRY(handle_arch_irq) .quad 0 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 368ad1f7c36..0a0a4975682 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -82,10 +82,8 @@ #ifdef CONFIG_ARM64_64K_PAGES #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS -#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS #else #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS -#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS #endif /* @@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset) * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled, including the FDT blob (TTBR1) + * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) */ __create_page_tables: pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses @@ -420,6 +419,15 @@ __create_page_tables: sub x6, x6, #1 // inclusive range create_block_map x0, x7, x3, x5, x6 1: +#ifdef CONFIG_EARLY_PRINTK + /* + * Create the pgd entry for the UART mapping. The full mapping is done + * later based earlyprintk kernel parameter. + */ + ldr x5, =EARLYCON_IOBASE // UART virtual address + add x0, x26, #2 * PAGE_SIZE // section table address + create_pgd_entry x26, x0, x5, x6, x7 +#endif ret ENDPROC(__create_page_tables) .ltorg diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index c76c7241125..1e49e5eb81e 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1221,7 +1221,7 @@ static struct of_device_id armpmu_of_device_ids[] = { {}, }; -static int __devinit armpmu_device_probe(struct platform_device *pdev) +static int armpmu_device_probe(struct platform_device *pdev) { if (!cpu_pmu) return -ENODEV; @@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry, { struct frame_tail __user *tail; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } + tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < PERF_MAX_STACK_DEPTH && @@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, { struct stackframe frame; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } + frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; walk_stackframe(&frame, callchain_trace, entry); } + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + return perf_guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + int misc = 0; + + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (perf_guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index cb0956bc96e..0337cdb0667 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -45,9 +45,10 @@ #include <asm/compat.h> #include <asm/cacheflush.h> +#include <asm/fpsimd.h> +#include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/stacktrace.h> -#include <asm/fpsimd.h> static void setup_restart(void) { @@ -97,14 +98,9 @@ static void default_idle(void) local_irq_enable(); } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL_GPL(pm_idle); - /* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way. The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. + * The idle thread. + * We always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { @@ -122,10 +118,10 @@ void cpu_idle(void) local_irq_disable(); if (!need_resched()) { stop_critical_timings(); - pm_idle(); + default_idle(); start_critical_timings(); /* - * pm_idle functions should always return + * default_idle functions should always return * with IRQs enabled. */ WARN_ON(irqs_disabled()); @@ -319,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev, /* the actual thread switch */ last = cpu_switch_to(prev, next); + contextidr_thread_switch(next); return last; } diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c new file mode 100644 index 00000000000..14f73c445ff --- /dev/null +++ b/arch/arm64/kernel/psci.c @@ -0,0 +1,211 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/init.h> +#include <linux/of.h> + +#include <asm/compiler.h> +#include <asm/errno.h> +#include <asm/psci.h> + +struct psci_operations psci_ops; + +static int (*invoke_psci_fn)(u64, u64, u64, u64); + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_RET_SUCCESS 0 +#define PSCI_RET_EOPNOTSUPP -1 +#define PSCI_RET_EINVAL -2 +#define PSCI_RET_EPERM -3 + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_EOPNOTSUPP: + return -EOPNOTSUPP; + case PSCI_RET_EINVAL: + return -EINVAL; + case PSCI_RET_EPERM: + return -EPERM; + }; + + return -EINVAL; +} + +#define PSCI_POWER_STATE_ID_MASK 0xffff +#define PSCI_POWER_STATE_ID_SHIFT 0 +#define PSCI_POWER_STATE_TYPE_MASK 0x1 +#define PSCI_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_POWER_STATE_AFFL_MASK 0x3 +#define PSCI_POWER_STATE_AFFL_SHIFT 24 + +static u32 psci_power_state_pack(struct psci_power_state state) +{ + return ((state.id & PSCI_POWER_STATE_ID_MASK) + << PSCI_POWER_STATE_ID_SHIFT) | + ((state.type & PSCI_POWER_STATE_TYPE_MASK) + << PSCI_POWER_STATE_TYPE_SHIFT) | + ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) + << PSCI_POWER_STATE_AFFL_SHIFT); +} + +/* + * The following two functions are invoked via the invoke_psci_fn pointer + * and will not be inlined, allowing us to piggyback on the AAPCS. + */ +static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, + u64 arg2) +{ + asm volatile( + __asmeq("%0", "x0") + __asmeq("%1", "x1") + __asmeq("%2", "x2") + __asmeq("%3", "x3") + "hvc #0\n" + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, + u64 arg2) +{ + asm volatile( + __asmeq("%0", "x0") + __asmeq("%1", "x1") + __asmeq("%2", "x2") + __asmeq("%3", "x3") + "smc #0\n" + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static int psci_cpu_suspend(struct psci_power_state state, + unsigned long entry_point) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(struct psci_power_state state) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", }, + {}, +}; + +int __init psci_init(void) +{ + struct device_node *np; + const char *method; + u32 id; + int err = 0; + + np = of_find_matching_node(NULL, psci_of_match); + if (!np) + return -ENODEV; + + pr_info("probing function IDs from device-tree\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warning("missing \"method\" property\n"); + err = -ENXIO; + goto out_put_node; + } + + if (!strcmp("hvc", method)) { + invoke_psci_fn = __invoke_psci_fn_hvc; + } else if (!strcmp("smc", method)) { + invoke_psci_fn = __invoke_psci_fn_smc; + } else { + pr_warning("invalid \"method\" property: %s\n", method); + err = -EINVAL; + goto out_put_node; + } + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return err; +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7665a9bfdb1..113db863f83 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -39,6 +39,7 @@ #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <asm/cputype.h> #include <asm/elf.h> @@ -49,6 +50,7 @@ #include <asm/tlbflush.h> #include <asm/traps.h> #include <asm/memblock.h> +#include <asm/psci.h> unsigned int processor_id; EXPORT_SYMBOL(processor_id); @@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); + psci_init(); + #ifdef CONFIG_SMP smp_init_cpus(); #endif @@ -289,6 +293,13 @@ static int __init topology_init(void) } subsys_initcall(topology_init); +static int __init arm64_device_probe(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +device_initcall(arm64_device_probe); + static const char *hwcap_str[] = { "fp", "asimd", diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index abd756315cb..890a591f75d 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -149,8 +149,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, frame)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, - NULL, regs->sp) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->regs[0]; @@ -164,12 +163,6 @@ badframe: return 0; } -asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - unsigned long sp) -{ - return do_sigaltstack(uss, uoss, sp); -} - static int setup_sigframe(struct rt_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { @@ -250,7 +243,6 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; - stack_t stack; int err = 0; frame = get_sigframe(ka, regs); @@ -260,12 +252,7 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, __put_user_error(0, &frame->uc.uc_flags, err); __put_user_error(NULL, &frame->uc.uc_link, err); - memset(&stack, 0, sizeof(stack)); - stack.ss_sp = (void __user *)current->sas_ss_sp; - stack.ss_flags = sas_ss_flags(regs->sp); - stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); - + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); err |= setup_sigframe(frame, regs, set); if (err == 0) { setup_return(regs, ka, frame, usig); diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index a4db3d22aac..7f4f3673f2b 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -28,26 +28,6 @@ #include <asm/uaccess.h> #include <asm/unistd32.h> -struct compat_sigaction { - compat_uptr_t sa_handler; - compat_ulong_t sa_flags; - compat_uptr_t sa_restorer; - compat_sigset_t sa_mask; -}; - -struct compat_old_sigaction { - compat_uptr_t sa_handler; - compat_old_sigset_t sa_mask; - compat_ulong_t sa_flags; - compat_uptr_t sa_restorer; -}; - -typedef struct compat_sigaltstack { - compat_uptr_t ss_sp; - int ss_flags; - compat_size_t ss_size; -} compat_stack_t; - struct compat_sigcontext { /* We always set these two fields to 0 */ compat_ulong_t trap_no; @@ -76,7 +56,7 @@ struct compat_sigcontext { struct compat_ucontext { compat_ulong_t uc_flags; - struct compat_ucontext *uc_link; + compat_uptr_t uc_link; compat_stack_t uc_stack; struct compat_sigcontext uc_mcontext; compat_sigset_t uc_sigmask; @@ -339,127 +319,6 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) return err ? -EFAULT : 0; } -/* - * atomically swap in the new signal mask, and wait for a signal. - */ -asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask, - compat_old_sigset_t mask) -{ - sigset_t blocked; - - siginitset(¤t->blocked, mask); - return sigsuspend(&blocked); -} - -asmlinkage int compat_sys_sigaction(int sig, - const struct compat_old_sigaction __user *act, - struct compat_old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - compat_old_sigset_t mask; - compat_uptr_t handler, restorer; - - if (act) { - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(handler, &act->sa_handler) || - __get_user(restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - - new_ka.sa.sa_handler = compat_ptr(handler); - new_ka.sa.sa_restorer = compat_ptr(restorer); - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(ptr_to_compat(old_ka.sa.sa_handler), - &oact->sa_handler) || - __put_user(ptr_to_compat(old_ka.sa.sa_restorer), - &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -asmlinkage int compat_sys_rt_sigaction(int sig, - const struct compat_sigaction __user *act, - struct compat_sigaction __user *oact, - compat_size_t sigsetsize) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - - if (act) { - compat_uptr_t handler, restorer; - - ret = get_user(handler, &act->sa_handler); - new_ka.sa.sa_handler = compat_ptr(handler); - ret |= get_user(restorer, &act->sa_restorer); - new_ka.sa.sa_restorer = compat_ptr(restorer); - ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - if (ret) - return -EFAULT; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - if (!ret && oact) { - ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); - ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - } - return ret; -} - -int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss, - compat_ulong_t sp) -{ - compat_stack_t __user *newstack = compat_ptr(compat_uss); - compat_stack_t __user *oldstack = compat_ptr(compat_uoss); - compat_uptr_t ss_sp; - int ret; - mm_segment_t old_fs; - stack_t uss, uoss; - - /* Marshall the compat new stack into a stack_t */ - if (newstack) { - if (get_user(ss_sp, &newstack->ss_sp) || - __get_user(uss.ss_flags, &newstack->ss_flags) || - __get_user(uss.ss_size, &newstack->ss_size)) - return -EFAULT; - uss.ss_sp = compat_ptr(ss_sp); - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The __user pointer casts are valid because of the set_fs() */ - ret = do_sigaltstack( - newstack ? (stack_t __user *) &uss : NULL, - oldstack ? (stack_t __user *) &uoss : NULL, - (unsigned long)sp); - set_fs(old_fs); - - /* Convert the old stack_t into a compat stack. */ - if (!ret && oldstack && - (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || - __put_user(uoss.ss_flags, &oldstack->ss_flags) || - __put_user(uoss.ss_size, &oldstack->ss_size))) - return -EFAULT; - return ret; -} - static int compat_restore_sigframe(struct pt_regs *regs, struct compat_sigframe __user *sf) { @@ -562,9 +421,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) if (compat_restore_sigframe(regs, &frame->sig)) goto badframe; - if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack), - ptr_to_compat((void __user *)NULL), - regs->compat_sp) == -EFAULT) + if (compat_restore_altstack(&frame->sig.uc.uc_stack)) goto badframe; return regs->regs[0]; @@ -703,13 +560,9 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, err |= copy_siginfo_to_user32(&frame->info, info); __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(NULL, &frame->sig.uc.uc_link, err); + __put_user_error(0, &frame->sig.uc.uc_link, err); - memset(&stack, 0, sizeof(stack)); - stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; - stack.ss_flags = sas_ss_flags(regs->compat_sp); - stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); + err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp); err |= compat_setup_sigframe(&frame->sig, regs, set); @@ -742,75 +595,6 @@ int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, return err; } -/* - * RT signals don't have generic compat wrappers. - * See arch/powerpc/kernel/signal_32.c - */ -asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, - compat_size_t sigsetsize) -{ - sigset_t s; - sigset_t __user *up; - int ret; - mm_segment_t old_fs = get_fs(); - - if (set) { - if (get_sigset_t(&s, set)) - return -EFAULT; - } - - set_fs(KERNEL_DS); - /* This is valid because of the set_fs() */ - up = (sigset_t __user *) &s; - ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL, - sigsetsize); - set_fs(old_fs); - if (ret) - return ret; - if (oset) { - if (put_sigset_t(oset, &s)) - return -EFAULT; - } - return 0; -} - -asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set, - compat_size_t sigsetsize) -{ - sigset_t s; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ - ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); - set_fs(old_fs); - if (!ret) { - if (put_sigset_t(set, &s)) - return -EFAULT; - } - return ret; -} - -asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig, - compat_siginfo_t __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - ret = copy_siginfo_from_user32(&info, uinfo); - if (unlikely(ret)) - return ret; - - set_fs (KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); - set_fs (old_fs); - return ret; -} - void compat_setup_restart_syscall(struct pt_regs *regs) { regs->regs[7] = __NR_compat_restart_syscall; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 538300f2273..bdd34597254 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void) } static void (*smp_cross_call)(const struct cpumask *, unsigned int); -static phys_addr_t cpu_release_addr[NR_CPUS]; + +static const struct smp_enable_ops *enable_ops[] __initconst = { + &smp_spin_table_ops, + &smp_psci_ops, + NULL, +}; + +static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; + +static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) +{ + const struct smp_enable_ops *ops = enable_ops[0]; + + while (ops) { + if (!strcmp(name, ops->name)) + return ops; + + ops++; + } + + return NULL; +} /* * Enumerate the possible CPU set from the device tree. @@ -252,22 +273,22 @@ void __init smp_init_cpus(void) * We currently support only the "spin-table" enable-method. */ enable_method = of_get_property(dn, "enable-method", NULL); - if (!enable_method || strcmp(enable_method, "spin-table")) { - pr_err("CPU %d: missing or invalid enable-method property: %s\n", - cpu, enable_method); + if (!enable_method) { + pr_err("CPU %d: missing enable-method property\n", cpu); goto next; } - /* - * Determine the address from which the CPU is polling. - */ - if (of_property_read_u64(dn, "cpu-release-addr", - &cpu_release_addr[cpu])) { - pr_err("CPU %d: missing or invalid cpu-release-addr property\n", - cpu); + smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); + + if (!smp_enable_ops[cpu]) { + pr_err("CPU %d: invalid enable-method property: %s\n", + cpu, enable_method); goto next; } + if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) + goto next; + set_cpu_possible(cpu, true); next: cpu++; @@ -281,8 +302,7 @@ next: void __init smp_prepare_cpus(unsigned int max_cpus) { - int cpu; - void **release_addr; + int cpu, err; unsigned int ncores = num_possible_cpus(); /* @@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (max_cpus > ncores) max_cpus = ncores; + /* Don't bother if we're effectively UP */ + if (max_cpus <= 1) + return; + /* * Initialise the present map (which describes the set of CPUs * actually populated at the present time) and release the * secondaries from the bootloader. + * + * Make sure we online at most (max_cpus - 1) additional CPUs. */ + max_cpus--; for_each_possible_cpu(cpu) { if (max_cpus == 0) break; - if (!cpu_release_addr[cpu]) + if (cpu == smp_processor_id()) + continue; + + if (!smp_enable_ops[cpu]) continue; - release_addr = __va(cpu_release_addr[cpu]); - release_addr[0] = (void *)__pa(secondary_holding_pen); - __flush_dcache_area(release_addr, sizeof(release_addr[0])); + err = smp_enable_ops[cpu]->prepare_cpu(cpu); + if (err) + continue; set_cpu_present(cpu, true); max_cpus--; } - - /* - * Send an event to wake up the secondaries. - */ - sev(); } diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c new file mode 100644 index 00000000000..112091684c2 --- /dev/null +++ b/arch/arm64/kernel/smp_psci.c @@ -0,0 +1,52 @@ +/* + * PSCI SMP initialisation + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/smp.h> + +#include <asm/psci.h> + +static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) +{ + return 0; +} + +static int __init smp_psci_prepare_cpu(int cpu) +{ + int err; + + if (!psci_ops.cpu_on) { + pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); + return -ENODEV; + } + + err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); + if (err) { + pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); + return err; + } + + return 0; +} + +const struct smp_enable_ops smp_psci_ops __initconst = { + .name = "psci", + .init_cpu = smp_psci_init_cpu, + .prepare_cpu = smp_psci_prepare_cpu, +}; diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c new file mode 100644 index 00000000000..7c35fa682f7 --- /dev/null +++ b/arch/arm64/kernel/smp_spin_table.c @@ -0,0 +1,66 @@ +/* + * Spin Table SMP initialisation + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/smp.h> + +#include <asm/cacheflush.h> + +static phys_addr_t cpu_release_addr[NR_CPUS]; + +static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) +{ + /* + * Determine the address from which the CPU is polling. + */ + if (of_property_read_u64(dn, "cpu-release-addr", + &cpu_release_addr[cpu])) { + pr_err("CPU %d: missing or invalid cpu-release-addr property\n", + cpu); + + return -1; + } + + return 0; +} + +static int __init smp_spin_table_prepare_cpu(int cpu) +{ + void **release_addr; + + if (!cpu_release_addr[cpu]) + return -ENODEV; + + release_addr = __va(cpu_release_addr[cpu]); + release_addr[0] = (void *)__pa(secondary_holding_pen); + __flush_dcache_area(release_addr, sizeof(release_addr[0])); + + /* + * Send an event to wake up the secondary CPU. + */ + sev(); + + return 0; +} + +const struct smp_enable_ops smp_spin_table_ops __initconst = { + .name = "spin-table", + .init_cpu = smp_spin_table_init_cpu, + .prepare_cpu = smp_spin_table_prepare_cpu, +}; diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 8292a9b090f..3fa98ff14f0 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -40,7 +40,6 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, * Wrappers to pass the pt_regs argument. */ #define sys_rt_sigreturn sys_rt_sigreturn_wrapper -#define sys_sigaltstack sys_sigaltstack_wrapper #include <asm/syscalls.h> diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S index 7ef59e9245e..9416d045a68 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/sys32.S @@ -39,11 +39,6 @@ compat_sys_rt_sigreturn_wrapper: b compat_sys_rt_sigreturn ENDPROC(compat_sys_rt_sigreturn_wrapper) -compat_sys_sigaltstack_wrapper: - ldr x2, [sp, #S_COMPAT_SP] - b compat_do_sigaltstack -ENDPROC(compat_sys_sigaltstack_wrapper) - compat_sys_statfs64_wrapper: mov w3, #84 cmp w1, #88 @@ -63,11 +58,6 @@ ENDPROC(compat_sys_fstatfs64_wrapper) * in registers or that take 32-bit parameters which require sign * extension. */ -compat_sys_lseek_wrapper: - sxtw x1, w1 - b sys_lseek -ENDPROC(compat_sys_lseek_wrapper) - compat_sys_pread64_wrapper: orr x3, x4, x5, lsl #32 b sys_pread64 diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index f7b05edf8ce..26e9c4eeaba 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -28,21 +28,6 @@ #include <asm/cacheflush.h> #include <asm/unistd32.h> -asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); - set_fs(old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - static inline void do_compat_cache_op(unsigned long start, unsigned long end, int flags) { diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 3b4b7258f49..b0ef18d14c3 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -31,8 +31,9 @@ #include <linux/syscore_ops.h> #include <linux/timer.h> #include <linux/irq.h> +#include <linux/delay.h> -#include <clocksource/arm_generic.h> +#include <clocksource/arm_arch_timer.h> #include <asm/thread_info.h> #include <asm/stacktrace.h> @@ -59,7 +60,31 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif +static u64 sched_clock_mult __read_mostly; + +unsigned long long notrace sched_clock(void) +{ + return arch_timer_read_counter() * sched_clock_mult; +} + +int read_current_timer(unsigned long *timer_value) +{ + *timer_value = arch_timer_read_counter(); + return 0; +} + void __init time_init(void) { - arm_generic_timer_init(); + u32 arch_timer_rate; + + if (arch_timer_init()) + panic("Unable to initialise architected timer.\n"); + + arch_timer_rate = arch_timer_get_rate(); + + /* Cache the sched_clock multiplier to save a divide in the hot path. */ + sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; + + /* Calibrate the delay loop directly */ + lpj_fine = arch_timer_rate / HZ; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 3883f842434..b3c5f628bdb 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -242,7 +242,7 @@ void die(const char *str, struct pt_regs *regs, int err) crash_kexec(regs); bust_spinlocks(0); - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index c958cb84d75..6a389dc1bd4 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk) void update_vsyscall_tz(void) { - ++vdso_data->tb_seq_count; - smp_wmb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_wmb(); - ++vdso_data->tb_seq_count; } diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index 8bf658d974f..f0a6d10b521 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -73,8 +73,6 @@ ENTRY(__kernel_gettimeofday) /* If tz is NULL, return 0. */ cbz x1, 3f ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] - seqcnt_read w9 - seqcnt_check w9, 1b stp w4, w5, [x1, #TZ_MINWEST] 3: mov x0, xzr diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a6885d896ab..224b44ab534 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/fs.h> +#include <linux/io.h> #include <asm/cputype.h> #include <asm/sections.h> @@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, } while (pgd++, addr = next, addr != end); } +#ifdef CONFIG_EARLY_PRINTK +/* + * Create an early I/O mapping using the pgd/pmd entries already populated + * in head.S as this function is called too early to allocated any memory. The + * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. + */ +void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) +{ + unsigned long size, mask; + bool page64k = IS_ENABLED(ARM64_64K_PAGES); + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + /* + * No early pte entries with !ARM64_64K_PAGES configuration, so using + * sections (pmd). + */ + size = page64k ? PAGE_SIZE : SECTION_SIZE; + mask = ~(size - 1); + + pgd = pgd_offset_k(virt); + pud = pud_offset(pgd, virt); + if (pud_none(*pud)) + return NULL; + pmd = pmd_offset(pud, virt); + + if (page64k) { + if (pmd_none(*pmd)) + return NULL; + pte = pte_offset_kernel(pmd, virt); + set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); + } else { + set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); + } + + return (void __iomem *)((virt & mask) + (phys & ~mask)); +} +#endif + static void __init map_mem(void) { struct memblock_region *reg; @@ -392,4 +434,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |