diff options
Diffstat (limited to 'arch/arm64/include')
58 files changed, 914 insertions, 417 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 71c53ecfcc3..0b3fcf86e6b 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -10,8 +10,10 @@ generic-y += delay.h generic-y += div64.h generic-y += dma.h generic-y += emergency-restart.h +generic-y += early_ioremap.h generic-y += errno.h generic-y += ftrace.h +generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h @@ -22,24 +24,27 @@ generic-y += kmap_types.h generic-y += kvm_para.h generic-y += local.h generic-y += local64.h +generic-y += mcs_spinlock.h generic-y += mman.h generic-y += msgbuf.h generic-y += mutex.h generic-y += pci.h generic-y += poll.h -generic-y += posix_types.h +generic-y += preempt.h generic-y += resource.h +generic-y += rwsem.h generic-y += scatterlist.h generic-y += sections.h generic-y += segment.h generic-y += sembuf.h generic-y += serial.h generic-y += shmbuf.h +generic-y += simd.h generic-y += sizes.h generic-y += socket.h generic-y += sockios.h -generic-y += switch_to.h generic-y += swab.h +generic-y += switch_to.h generic-y += termbits.h generic-y += termios.h generic-y += topology.h @@ -49,5 +54,3 @@ generic-y += unaligned.h generic-y += user.h generic-y += vga.h generic-y += xor.h -generic-y += preempt.h -generic-y += hash.h diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index fd3e3924041..5901480bfdc 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -21,6 +21,7 @@ #endif #include <asm/ptrace.h> +#include <asm/thread_info.h> /* * Stack pushing/popping (register pairs only). Equivalent to store decrement @@ -68,23 +69,31 @@ msr daifclr, #8 .endm - .macro disable_step, tmp + .macro disable_step_tsk, flgs, tmp + tbz \flgs, #TIF_SINGLESTEP, 9990f mrs \tmp, mdscr_el1 bic \tmp, \tmp, #1 msr mdscr_el1, \tmp + isb // Synchronise with enable_dbg +9990: .endm - .macro enable_step, tmp + .macro enable_step_tsk, flgs, tmp + tbz \flgs, #TIF_SINGLESTEP, 9990f + disable_dbg mrs \tmp, mdscr_el1 orr \tmp, \tmp, #1 msr mdscr_el1, \tmp +9990: .endm - .macro enable_dbg_if_not_stepping, tmp - mrs \tmp, mdscr_el1 - tbnz \tmp, #0, 9990f - enable_dbg -9990: +/* + * Enable both debug exceptions and interrupts. This is likely to be + * faster than two daifclr operations, since writes to this register + * are self-synchronising. + */ + .macro enable_dbg_and_irq + msr daifclr, #(8 | 2) .endm /* diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 01de5aaa3ed..65f1569ac96 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline int atomic_sub_return(int i, atomic_t *v) @@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long tmp; int oldval; + smp_mb(); + asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w4, %2\n" +" stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -149,17 +152,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - /* * 64-bit atomic operations. */ #define ATOMIC64_INIT(i) { (i) } -#define atomic64_read(v) (*(volatile long long *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) static inline void atomic64_add(u64 i, atomic64_t *v) @@ -173,8 +171,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_add_return(long i, atomic64_t *v) @@ -183,14 +180,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " add %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -205,8 +203,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) " stxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "cc"); + : "Ir" (i)); } static inline long atomic64_sub_return(long i, atomic64_t *v) @@ -215,14 +212,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " sub %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) - : "cc", "memory"); + : "memory"); + smp_mb(); return result; } @@ -231,17 +229,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) long oldval; unsigned long res; + smp_mb(); + asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, %2\n" +"1: ldxr %1, %2\n" " cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %4, %2\n" +" stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) - : "cc", "memory"); + : "cc"); + smp_mb(); return oldval; } @@ -253,11 +254,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" +" dmb ish\n" "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 78e20ba8806..6389d60574d 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -25,11 +25,12 @@ #define wfi() asm volatile("wfi" : : : "memory") #define isb() asm volatile("isb" : : : "memory") -#define dsb() asm volatile("dsb sy" : : : "memory") +#define dmb(opt) asm volatile("dmb " #opt : : : "memory") +#define dsb(opt) asm volatile("dsb " #opt : : : "memory") -#define mb() dsb() -#define rmb() asm volatile("dsb ld" : : : "memory") -#define wmb() asm volatile("dsb st" : : : "memory") +#define mb() dsb(sy) +#define rmb() dsb(ld) +#define wmb() dsb(st) #ifndef CONFIG_SMP #define smp_mb() barrier() @@ -39,7 +40,7 @@ #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ ACCESS_ONCE(*p) = (v); \ } while (0) @@ -47,15 +48,15 @@ do { \ ({ \ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ ___p1; \ }) #else -#define smp_mb() asm volatile("dmb ish" : : : "memory") -#define smp_rmb() asm volatile("dmb ishld" : : : "memory") -#define smp_wmb() asm volatile("dmb ishst" : : : "memory") +#define smp_mb() dmb(ish) +#define smp_rmb() dmb(ishld) +#define smp_wmb() dmb(ishst) #define smp_store_release(p, v) \ do { \ @@ -97,6 +98,9 @@ do { \ #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define nop() asm volatile("nop"); +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + #endif /* __ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index aa5b59d6ba4..9c19594ce7c 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,17 +17,8 @@ #define __ASM_BITOPS_H #include <linux/compiler.h> - #include <asm/barrier.h> -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif - #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 390308a67f0..88cc05b5f3a 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -16,6 +16,8 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H +#include <asm/cachetype.h> + #define L1_CACHE_SHIFT 6 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) @@ -27,6 +29,15 @@ * the CPU. */ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES -#define ARCH_SLAB_MINALIGN 8 + +#ifndef __ASSEMBLY__ + +static inline int cache_line_size(void) +{ + u32 cwg = cache_type_cwg(); + return cwg ? 4 << cwg : L1_CACHE_BYTES; +} + +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index fea9ee32720..a5176cf32da 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -85,6 +85,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma, } /* + * Cache maintenance functions used by the DMA API. No to be used directly. + */ +extern void __dma_map_area(const void *, size_t, int); +extern void __dma_unmap_area(const void *, size_t, int); +extern void __dma_flush_range(const void *, const void *); + +/* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. @@ -116,6 +123,7 @@ extern void flush_dcache_page(struct page *); static inline void __flush_icache_all(void) { asm("ic ialluis"); + dsb(ish); } #define flush_dcache_mmap_lock(mapping) \ @@ -142,7 +150,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) * set_pte_at() called from vmap_pte_range() does not * have a DSB after cleaning the cache line. */ - dsb(); + dsb(ish); } static inline void flush_cache_vunmap(unsigned long start, unsigned long end) diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 85f5f511352..4b23e758d5e 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h @@ -20,12 +20,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 +#define CTR_CWG_SHIFT 24 +#define CTR_CWG_MASK 15 #define ICACHE_POLICY_RESERVED 0 #define ICACHE_POLICY_AIVIVT 1 #define ICACHE_POLICY_VIPT 2 #define ICACHE_POLICY_PIPT 3 +#ifndef __ASSEMBLY__ + static inline u32 icache_policy(void) { return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK; @@ -45,4 +49,11 @@ static inline int icache_is_aivivt(void) return icache_policy() == ICACHE_POLICY_AIVIVT; } +static inline u32 cache_type_cwg(void) +{ + return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_CACHETYPE_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 56166d7f4a2..ddb9d783055 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,49 +29,55 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, %2\n" + "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, %2\n" + "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, %2\n" + "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, %2\n" + "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "r" (x) - : "cc", "memory"); + : "memory"); break; default: BUILD_BUG(); } + smp_mb(); return ret; } #define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index fda2704b3f9..253e33bc94f 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -228,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -#define compat_user_stack_pointer() (current_pt_regs()->compat_sp) +#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) static inline void __user *arch_compat_alloc_user_space(long len) { @@ -305,11 +305,6 @@ static inline int is_compat_thread(struct thread_info *thread) #else /* !CONFIG_COMPAT */ -static inline int is_compat_task(void) -{ - return 0; -} - static inline int is_compat_thread(struct thread_info *thread) { return 0; diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 15241307650..d7b4b38a8e8 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -39,6 +39,7 @@ struct device_node; * from the cpu to be killed. * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. + * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * to wrong parameters or error conditions. Called from the * CPU being suspended. Must be called with IRQs disabled. @@ -52,6 +53,7 @@ struct cpu_operations { #ifdef CONFIG_HOTPLUG_CPU int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); + int (*cpu_kill)(unsigned int cpu); #endif #ifdef CONFIG_ARM64_CPU_SUSPEND int (*cpu_suspend)(unsigned long); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h new file mode 100644 index 00000000000..cd4ac051648 --- /dev/null +++ b/arch/arm64/include/asm/cpufeature.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_CPUFEATURE_H +#define __ASM_CPUFEATURE_H + +#include <asm/hwcap.h> + +/* + * In the arm64 world (as in the ARM world), elf_hwcap is used both internally + * in the kernel and for user space to keep track of which optional features + * are supported by the current system. So let's map feature 'x' to HWCAP_x. + * Note that HWCAP_x constants are bit fields so we need to take the log. + */ + +#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) +#define cpu_feature(x) ilog2(HWCAP_ ## x) + +static inline bool cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} + +#endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index c404fb0df3a..27f54a7cc81 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -41,6 +41,7 @@ #define ARM_CPU_PART_AEM_V8 0xD0F0 #define ARM_CPU_PART_FOUNDATION 0xD000 +#define ARM_CPU_PART_CORTEX_A53 0xD030 #define ARM_CPU_PART_CORTEX_A57 0xD070 #define APM_CPU_PART_POTENZA 0x0000 diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 62314791570..6e9b5b36921 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -26,6 +26,53 @@ #define DBG_ESR_EVT_HWWP 0x2 #define DBG_ESR_EVT_BRK 0x6 +/* + * Break point instruction encoding + */ +#define BREAK_INSTR_SIZE 4 + +/* + * ESR values expected for dynamic and compile time BRK instruction + */ +#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff)) + +/* + * #imm16 values used for BRK instruction generation + * Allowed values for kgbd are 0x400 - 0x7ff + * 0x400: for dynamic BRK instruction + * 0x401: for compile time BRK instruction + */ +#define KGDB_DYN_DGB_BRK_IMM 0x400 +#define KDBG_COMPILED_DBG_BRK_IMM 0x401 + +/* + * BRK instruction encoding + * The #imm16 value should be placed at bits[20:5] within BRK ins + */ +#define AARCH64_BREAK_MON 0xd4200000 + +/* + * Extract byte from BRK instruction + */ +#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \ + ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) + +/* + * Extract byte from BRK #imm16 + */ +#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \ + (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) + +#define KGDB_DYN_DGB_BRK_BYTE(x) \ + (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x)) + +#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0) +#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1) +#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2) +#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3) + +#define CACHE_FLUSH_IS_SAFE 1 + enum debug_el { DBG_ACTIVE_EL0 = 0, DBG_ACTIVE_EL1, @@ -43,23 +90,6 @@ enum debug_el { #ifndef __ASSEMBLY__ struct task_struct; -#define local_dbg_save(flags) \ - do { \ - typecheck(unsigned long, flags); \ - asm volatile( \ - "mrs %0, daif // local_dbg_save\n" \ - "msr daifset, #8" \ - : "=r" (flags) : : "memory"); \ - } while (0) - -#define local_dbg_restore(flags) \ - do { \ - typecheck(unsigned long, flags); \ - asm volatile( \ - "msr daif, %0 // local_dbg_restore\n" \ - : : "r" (flags) : "memory"); \ - } while (0) - #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ #define DBG_HOOK_HANDLED 0 diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index fd0c0c0e447..dc82e52acdb 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -26,10 +26,10 @@ #include <xen/xen.h> #include <asm/xen/hypervisor.h> -#define ARCH_HAS_DMA_GET_REQUIRED_MASK - #define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; +extern struct dma_map_ops coherent_swiotlb_dma_ops; +extern struct dma_map_ops noncoherent_swiotlb_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { @@ -47,6 +47,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return __generic_dma_ops(dev); } +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + dev->archdata.dma_ops = ops; +} + #include <asm-generic/dma-mapping-common.h> static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h new file mode 100644 index 00000000000..5a46c4e7f53 --- /dev/null +++ b/arch/arm64/include/asm/efi.h @@ -0,0 +1,14 @@ +#ifndef _ASM_EFI_H +#define _ASM_EFI_H + +#include <asm/io.h> + +#ifdef CONFIG_EFI +extern void efi_init(void); +extern void efi_idmap_init(void); +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 78834123a32..72674f4c387 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -18,9 +18,11 @@ #ifndef __ASM_ESR_H #define __ASM_ESR_H -#define ESR_EL1_EC_SHIFT (26) -#define ESR_EL1_IL (1U << 25) +#define ESR_EL1_WRITE (1 << 6) +#define ESR_EL1_CM (1 << 8) +#define ESR_EL1_IL (1 << 25) +#define ESR_EL1_EC_SHIFT (26) #define ESR_EL1_EC_UNKNOWN (0x00) #define ESR_EL1_EC_WFI (0x01) #define ESR_EL1_EC_CP15_32 (0x03) @@ -42,7 +44,7 @@ #define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_SERROR (0x2F) #define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h new file mode 100644 index 00000000000..5f7bfe6df72 --- /dev/null +++ b/arch/arm64/include/asm/fixmap.h @@ -0,0 +1,67 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * Copyright (C) 2013 Mark Salter <msalter@redhat.com> + * + * Adapted from arch/x86_64 version. + * + */ + +#ifndef _ASM_ARM64_FIXMAP_H +#define _ASM_ARM64_FIXMAP_H + +#ifndef __ASSEMBLY__ +#include <linux/kernel.h> +#include <asm/page.h> + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. + * + * These 'compile-time allocated' memory buffers are + * page-sized. Use set_fixmap(idx,phys) to associate + * physical memory with fixmap indices. + * + */ +enum fixed_addresses { + FIX_EARLYCON_MEM_BASE, + __end_of_permanent_fixed_addresses, + + /* + * Temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + */ +#ifdef CONFIG_ARM64_64K_PAGES +#define NR_FIX_BTMAPS 4 +#else +#define NR_FIX_BTMAPS 64 +#endif +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) + +extern void __early_set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#define __set_fixmap __early_set_fixmap + +#include <asm-generic/fixmap.h> + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_ARM64_FIXMAP_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index c43b4ac1300..50f559f574f 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -37,8 +37,21 @@ struct fpsimd_state { u32 fpcr; }; }; + /* the id of the last cpu to have restored this state */ + unsigned int cpu; }; +/* + * Struct for stacking the bottom 'n' FP/SIMD registers. + */ +struct fpsimd_partial_state { + u32 fpsr; + u32 fpcr; + u32 num_regs; + __uint128_t vregs[32]; +}; + + #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f @@ -58,6 +71,16 @@ extern void fpsimd_load_state(struct fpsimd_state *state); extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); +extern void fpsimd_preserve_current_state(void); +extern void fpsimd_restore_current_state(void); +extern void fpsimd_update_current_state(struct fpsimd_state *state); + +extern void fpsimd_flush_task_state(struct task_struct *target); + +extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state, + u32 num_regs); +extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state); + #endif #endif diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index bbec599c96b..768414d55e6 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -62,3 +62,38 @@ ldr w\tmpnr, [\state, #16 * 2 + 4] msr fpcr, x\tmpnr .endm + +.altmacro +.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 + mrs x\tmpnr1, fpsr + str w\numnr, [\state, #8] + mrs x\tmpnr2, fpcr + stp w\tmpnr1, w\tmpnr2, [\state] + adr x\tmpnr1, 0f + add \state, \state, x\numnr, lsl #4 + sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 + br x\tmpnr1 + .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 + .irp qb, %(qa + 1) + stp q\qa, q\qb, [\state, # -16 * \qa - 16] + .endr + .endr +0: +.endm + +.macro fpsimd_restore_partial state, tmpnr1, tmpnr2 + ldp w\tmpnr1, w\tmpnr2, [\state] + msr fpsr, x\tmpnr1 + msr fpcr, x\tmpnr2 + adr x\tmpnr1, 0f + ldr w\tmpnr2, [\state, #8] + add \state, \state, x\tmpnr2, lsl #4 + sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 + br x\tmpnr1 + .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 + .irp qb, %(qa + 1) + ldp q\qa, q\qb, [\state, # -16 * \qa - 16] + .endr + .endr +0: +.endm diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h new file mode 100644 index 00000000000..c5534facf94 --- /dev/null +++ b/arch/arm64/include/asm/ftrace.h @@ -0,0 +1,59 @@ +/* + * arch/arm64/include/asm/ftrace.h + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_FTRACE_H +#define __ASM_FTRACE_H + +#include <asm/insn.h> + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE + +#ifndef __ASSEMBLY__ +#include <linux/compat.h> + +extern void _mcount(unsigned long); +extern void *return_address(unsigned int); + +struct dyn_arch_ftrace { + /* No extra data needed for arm64 */ +}; + +extern unsigned long ftrace_graph_call; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#define ftrace_return_address(n) return_address(n) + +/* + * Because AArch32 mode does not share the same syscall table with AArch64, + * tracing compat syscalls may result in reporting bogus syscalls or even + * hang-up, so just do not trace them. + * See kernel/trace/trace_syscalls.c + * + * x86 code says: + * If the user realy wants these, then they should use the + * raw syscall tracepoints with filtering. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} +#endif /* ifndef __ASSEMBLY__ */ + +#endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 78cc3aba5d6..5f750dc96e0 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -24,10 +24,11 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ -"1: ldaxr %w1, %2\n" \ +"1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ " cbnz %w3, 1b\n" \ +" dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -40,7 +41,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) @@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" "2: stlxr %w3, %w5, %2\n" " cbnz %w3, 1b\n" +" dmb ish\n" "3:\n" " .pushsection .fixup,\"ax\"\n" "4: mov %w0, %w6\n" @@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .popsection\n" : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) - : "cc", "memory"); + : "memory"); *uval = val; return ret; diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index ae4801d7751..0be67821f9c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -20,7 +20,7 @@ #include <linux/threads.h> #include <asm/irq.h> -#define NR_IPI 5 +#define NR_IPI 6 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 6cddbb0c9f5..024c46183c3 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -32,6 +32,12 @@ #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) #define COMPAT_HWCAP_EVTSTRM (1 << 21) +#define COMPAT_HWCAP2_AES (1 << 0) +#define COMPAT_HWCAP2_PMULL (1 << 1) +#define COMPAT_HWCAP2_SHA1 (1 << 2) +#define COMPAT_HWCAP2_SHA2 (1 << 3) +#define COMPAT_HWCAP2_CRC32 (1 << 4) + #ifndef __ASSEMBLY__ /* * This yields a mask that user programs can use to figure out what @@ -41,7 +47,8 @@ #ifdef CONFIG_COMPAT #define COMPAT_ELF_HWCAP (compat_elf_hwcap) -extern unsigned int compat_elf_hwcap; +#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2) +extern unsigned int compat_elf_hwcap, compat_elf_hwcap2; #endif extern unsigned long elf_hwcap; diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index c44ad39ed31..dc1f73b13e7 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -21,6 +21,7 @@ /* A64 instructions are always 32 bits. */ #define AARCH64_INSN_SIZE 4 +#ifndef __ASSEMBLY__ /* * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a * Section C3.1 "A64 instruction index by encoding": @@ -104,5 +105,6 @@ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4cc813eddac..e0ecdcf6632 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -27,6 +27,7 @@ #include <asm/byteorder.h> #include <asm/barrier.h> #include <asm/pgtable.h> +#include <asm/early_ioremap.h> #include <xen/xen.h> @@ -121,7 +122,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * I/O port access primitives. */ #define IO_SPACE_LIMIT 0xffff -#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) +#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) static inline u8 inb(unsigned long addr) { @@ -229,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) - #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) -#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - #define ARCH_HAS_IOREMAP_WC #include <asm-generic/iomap.h> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b2fcfbc51ec..11cc941bd10 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -90,5 +90,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return flags & PSR_I_BIT; } +/* + * save and restore debug state + */ +#define local_dbg_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "mrs %0, daif // local_dbg_save\n" \ + "msr daifset, #8" \ + : "=r" (flags) : : "memory"); \ + } while (0) + +#define local_dbg_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "msr daif, %0 // local_dbg_restore\n" \ + : : "r" (flags) : "memory"); \ + } while (0) + +#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory") +#define local_dbg_disable() asm("msr daifset, #8" : : : "memory") + #endif #endif diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h new file mode 100644 index 00000000000..3c8aafc1082 --- /dev/null +++ b/arch/arm64/include/asm/kgdb.h @@ -0,0 +1,84 @@ +/* + * AArch64 KGDB support + * + * Based on arch/arm/include/kgdb.h + * + * Copyright (C) 2013 Cavium Inc. + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KGDB_H +#define __ARM_KGDB_H + +#include <linux/ptrace.h> +#include <asm/debug-monitors.h> + +#ifndef __ASSEMBLY__ + +static inline void arch_kgdb_breakpoint(void) +{ + asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM)); +} + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; + +#endif /* !__ASSEMBLY__ */ + +/* + * gdb is expecting the following registers layout. + * + * General purpose regs: + * r0-r30: 64 bit + * sp,pc : 64 bit + * pstate : 64 bit + * Total: 34 + * FPU regs: + * f0-f31: 128 bit + * Total: 32 + * Extra regs + * fpsr & fpcr: 32 bit + * Total: 2 + * + */ + +#define _GP_REGS 34 +#define _FP_REGS 32 +#define _EXTRA_REGS 2 +/* + * general purpose registers size in bytes. + * pstate is only 4 bytes. subtract 4 bytes + */ +#define GP_REG_BYTES (_GP_REGS * 8) +#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 2048 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ + +#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \ + (_EXTRA_REGS * 4)) + +#endif /* __ASM_KGDB_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c98ef4771c7..3d6903006a8 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -62,6 +62,7 @@ * RW: 64bit by default, can be overriden for 32bit VMs * TAC: Trap ACTLR * TSC: Trap SMC + * TVM: Trap VM ops (until M+C set in SCTLR_EL1) * TSW: Trap cache operations by set/way * TWE: Trap WFE * TWI: Trap WFI @@ -74,7 +75,7 @@ * SWIO: Turn set/way invalidates into set/way clean+invalidate */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ - HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) @@ -106,7 +107,6 @@ /* VTCR_EL2 Registers bits */ #define VTCR_EL2_PS_MASK (7 << 16) -#define VTCR_EL2_PS_40B (2 << 16) #define VTCR_EL2_TG0_MASK (1 << 14) #define VTCR_EL2_TG0_4K (0 << 14) #define VTCR_EL2_TG0_64K (1 << 14) @@ -129,10 +129,9 @@ * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) */ -#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \ - VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ - VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ - VTCR_EL2_T0SZ_40B) +#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ + VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ + VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) #else /* @@ -142,10 +141,9 @@ * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) */ -#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \ - VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ - VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ - VTCR_EL2_T0SZ_40B) +#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ + VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ + VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B) #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) #endif @@ -231,7 +229,7 @@ #define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC64 (0x2C) -#define ESR_EL2_EC_SERRROR (0x2F) +#define ESR_EL2_EC_SERROR (0x2F) #define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_SOFTSTP (0x32) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index b25763bc0ec..9fcd54b1e16 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -79,7 +79,8 @@ #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ -#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ #define NR_CP15_REGS (NR_SYS_REGS * 2) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0a1d6975156..92242ce0630 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -39,7 +39,7 @@ #include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h> -#define KVM_VCPU_MAX_FEATURES 2 +#define KVM_VCPU_MAX_FEATURES 3 struct kvm_vcpu; int kvm_target_cpu(void); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 7f1f9408ff6..7d29847a893 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(unsigned long esr) return true; } -static inline void kvm_clean_dcache_area(void *addr, size_t size) {} static inline void kvm_clean_pgd(pgd_t *pgd) {} static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pte(pte_t *pte) {} @@ -122,11 +121,25 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) pmd_val(*pmd) |= PMD_S2_RDWR; } +#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) +#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) +#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, - unsigned long size) +#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { + return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, + unsigned long size) +{ + if (!vcpu_has_cache_enabled(vcpu)) + kvm_flush_dcache_to_poc((void *)hva, size); + if (!icache_is_aliasing()) { /* PIPT */ flush_icache_range(hva, hva + size); } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ @@ -135,8 +148,9 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, } } -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) +void stage2_flush_vm(struct kvm *kvm); + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h index e301a481635..bc39e557c56 100644 --- a/arch/arm64/include/asm/kvm_psci.h +++ b/arch/arm64/include/asm/kvm_psci.h @@ -18,6 +18,10 @@ #ifndef __ARM64_KVM_PSCI_H__ #define __ARM64_KVM_PSCI_H__ -bool kvm_psci_call(struct kvm_vcpu *vcpu); +#define KVM_ARM_PSCI_0_1 1 +#define KVM_ARM_PSCI_0_2 2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu); #endif /* __ARM64_KVM_PSCI_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 9dc5dc39fde..902eb708804 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -49,13 +49,15 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) -#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) +#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE) #define TASK_SIZE_64 (UL(1) << VA_BITS) #ifdef CONFIG_COMPAT #define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 #endif /* CONFIG_COMPAT */ @@ -138,6 +140,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) /* * virt_to_page(k) convert a _valid_ virtual address to struct page * diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 2494fc01896..c2f006c48bd 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -22,10 +22,16 @@ typedef struct { void *vdso; } mm_context_t; +#define INIT_MM_CONTEXT(name) \ + .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), + #define ASID(mm) ((mm)->context.id & 0xffff) extern void paging_init(void); extern void setup_mm_for_reboot(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); +extern void init_mem_pgprot(void); +/* create an identity mapping for memory (or io if map_io is true) */ +extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); #endif diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h index b0cc58a9778..13ce4cc18e2 100644 --- a/arch/arm64/include/asm/neon.h +++ b/arch/arm64/include/asm/neon.h @@ -8,7 +8,11 @@ * published by the Free Software Foundation. */ +#include <linux/types.h> + #define cpu_has_neon() (1) -void kernel_neon_begin(void); +#define kernel_neon_begin() kernel_neon_begin_partial(32) + +void kernel_neon_begin_partial(u32 num_regs); void kernel_neon_end(void); diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 13fb0b3efc5..453a179469a 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#ifdef CONFIG_SMP + static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); @@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void) } #define __my_cpu_offset __my_cpu_offset() +#else /* !CONFIG_SMP */ + +#define set_my_cpu_offset(x) do { } while (0) + +#endif /* CONFIG_SMP */ + #include <asm-generic/percpu.h> #endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index b1d2e26c3c8..955e8c5f0af 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -29,6 +29,8 @@ */ #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) +#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0) +#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0) /* * Level 2 descriptor (PMD). @@ -100,9 +102,9 @@ #define PTE_HYP PTE_USER /* - * 40-bit physical address supported. + * Highest possible physical address supported. */ -#define PHYS_MASK_SHIFT (40) +#define PHYS_MASK_SHIFT (48) #define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) /* @@ -120,9 +122,12 @@ #define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26)) #define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26)) #define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) +#define TCR_TG0_4K (UL(0) << 14) #define TCR_TG0_64K (UL(1) << 14) -#define TCR_TG1_64K (UL(1) << 30) -#define TCR_IPS_40BIT (UL(2) << 32) +#define TCR_TG0_16K (UL(2) << 14) +#define TCR_TG1_16K (UL(1) << 30) +#define TCR_TG1_4K (UL(2) << 30) +#define TCR_TG1_64K (UL(3) << 30) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b524dcd1724..e0ccceb317d 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -52,66 +52,59 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #endif #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) -/* - * The pgprot_* and protection_map entries will be fixed up at runtime to - * include the cachable and bufferable bits based on memory policy, as well as - * any architecture dependent bits like global/ASID and SMP shared mapping - * bits. - */ -#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF +#ifdef CONFIG_SMP +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#else +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF) +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) +#endif -extern pgprot_t pgprot_default; +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) +#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) -#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) -#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) -#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) -#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) -#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) +#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) -#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) -#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) -#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) -#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) - -#endif /* __ASSEMBLY__ */ - -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) +#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_EXEC +#define __P101 PAGE_READONLY_EXEC +#define __P110 PAGE_COPY_EXEC +#define __P111 PAGE_COPY_EXEC + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_EXEC +#define __S101 PAGE_READONLY_EXEC +#define __S110 PAGE_SHARED_EXEC +#define __S111 PAGE_SHARED_EXEC -#ifndef __ASSEMBLY__ /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -136,11 +129,11 @@ extern struct page *empty_zero_page; /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) -#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & PTE_AF) -#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) -#define pte_write(pte) (pte_val(pte) & PTE_WRITE) +#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) +#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) +#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) +#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) +#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_valid_user(pte) \ @@ -199,7 +192,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { if (pte_valid_user(pte)) { - if (pte_exec(pte)) + if (!pte_special(pte) && pte_exec(pte)) __sync_icache_dcache(pte, addr); if (pte_dirty(pte) && pte_write(pte)) pte_val(pte) &= ~PTE_RDONLY; @@ -227,36 +220,36 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_PTE_SPECIAL -/* - * Software PMD bits for THP - */ +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} -#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) -#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} /* * THP definitions. */ -#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) - -#define __HAVE_ARCH_PMD_WRITE -#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) -#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) +#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) #endif -#define PMD_BIT_FUNC(fn,op) \ -static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } +#define pmd_young(pmd) pte_young(pmd_pte(pmd)) +#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) +#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd))) +#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) +#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) +#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) +#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) -PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); -PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); -PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); -PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); -PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); -PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); -PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) pte_write(pmd_pte(pmd)) #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) @@ -265,32 +258,25 @@ PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) -static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) -{ - const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | - PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | - PMD_SECT_VALID; - pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); - return pmd; -} - -#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) +#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) static inline int has_transparent_hugepage(void) { return 1; } +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + /* * Mark the prot value as uncacheable and unbufferable. */ #define pgprot_noncached(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -306,11 +292,17 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) +#ifdef CONFIG_ARM64_64K_PAGES +#define pud_sect(pud) (0) +#else +#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ + PUD_TYPE_SECT) +#endif static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; - dsb(); + dsb(ishst); } static inline void pmd_clear(pmd_t *pmdp) @@ -340,7 +332,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) static inline void set_pud(pud_t *pudp, pud_t pud) { *pudp = pud; - dsb(); + dsb(ishst); } static inline void pud_clear(pud_t *pudp) @@ -383,6 +375,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return pte; } +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); +} + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; @@ -412,7 +409,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Ensure that there are not more swap files than can be encoded in the kernel - * the PTEs. + * PTEs. */ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 45b20cd6cbc..34de2a8f7d9 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -79,6 +79,7 @@ struct thread_struct { unsigned long tp_value; struct fpsimd_state fpsimd_state; unsigned long fault_address; /* fault info */ + unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ }; diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 0e7fa496373..501000fadb6 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,6 +21,10 @@ #include <uapi/asm/ptrace.h> +/* Current Exception Level values, as contained in CurrentEL */ +#define CurrentEL_EL1 (1 << 2) +#define CurrentEL_EL2 (2 << 2) + /* AArch32-specific ptrace requests */ #define COMPAT_PTRACE_GETREGS 12 #define COMPAT_PTRACE_SETREGS 13 @@ -68,6 +72,7 @@ /* Architecturally defined mapping between AArch32 and AArch64 registers */ #define compat_usr(x) regs[(x)] +#define compat_fp regs[11] #define compat_sp regs[13] #define compat_lr regs[14] #define compat_sp_hyp regs[15] @@ -132,7 +137,12 @@ struct pt_regs { (!((regs)->pstate & PSR_F_BIT)) #define user_stack_pointer(regs) \ - ((regs)->sp) + (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->regs[0]; +} /* * Are the current registers suitable for user mode? (used to maintain @@ -164,7 +174,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs) return 0; } -#define instruction_pointer(regs) (regs)->pc +#define instruction_pointer(regs) ((unsigned long)(regs)->pc) #ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h deleted file mode 100644 index dca1094acc7..00000000000 --- a/arch/arm64/include/asm/sigcontext.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_SIGCONTEXT_H -#define __ASM_SIGCONTEXT_H - -#include <uapi/asm/sigcontext.h> - -/* - * Auxiliary context saved in the sigcontext.__reserved array. Not exported to - * user space as it will change with the addition of new context. User space - * should check the magic/size information. - */ -struct aux_context { - struct fpsimd_context fpsimd; - /* additional context to be added before "end" */ - struct _aarch64_ctx end; -}; -#endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 3d5cf064d7a..c45b7b1b719 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " cbnz %w0, 2b\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) - : "cc", "memory"); + : "memory"); return !tmp; } @@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " cbnz %w1, 2b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) "1:\n" : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : - : "cc", "memory"); + : "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 3ee8b303d9a..64d2d4884a9 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -22,6 +22,18 @@ extern char *strrchr(const char *, int c); #define __HAVE_ARCH_STRCHR extern char *strchr(const char *, int c); +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *, const char *); + +#define __HAVE_ARCH_STRNCMP +extern int strncmp(const char *, const char *, __kernel_size_t); + +#define __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); + +#define __HAVE_ARCH_STRNLEN +extern __kernel_size_t strnlen(const char *, __kernel_size_t); + #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); @@ -34,4 +46,7 @@ extern void *memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, size_t); + #endif diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 70ba9d4ee97..383771eb0b8 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -18,6 +18,7 @@ #include <linux/err.h> +extern const void *sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 720e70b66ff..e40b6d06d51 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -91,17 +91,22 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active + * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace + * TIF_SYSCALL_AUDIT - syscall auditing + * TIF_SECOMP - syscall secure computing * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) - * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_SYSCALL_TRACE 8 -#define TIF_POLLING_NRFLAG 16 +#define TIF_SYSCALL_AUDIT 9 +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 @@ -112,10 +117,18 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) #endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 717031a762c..80e2c08900d 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -19,115 +19,45 @@ #ifndef __ASM_TLB_H #define __ASM_TLB_H -#include <linux/pagemap.h> -#include <linux/swap.h> +#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry -#include <asm/pgalloc.h> -#include <asm/tlbflush.h> - -#define MMU_GATHER_BUNDLE 8 - -/* - * TLB handling. This allows us to remove pages from the page - * tables, and efficiently handle the TLB issues. - */ -struct mmu_gather { - struct mm_struct *mm; - unsigned int fullmm; - struct vm_area_struct *vma; - unsigned long start, end; - unsigned long range_start; - unsigned long range_end; - unsigned int nr; - unsigned int max; - struct page **pages; - struct page *local[MMU_GATHER_BUNDLE]; -}; +#include <asm-generic/tlb.h> /* - * This is unnecessarily complex. There's three ways the TLB shootdown - * code is used: + * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. - * tlb->vma will be non-NULL. * 2. Unmapping all vmas. See exit_mmap(). * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. - * tlb->vma will be non-NULL. Additionally, page tables will be freed. + * Page tables will be freed. * 3. Unmapping argument pages. See shift_arg_pages(). * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. - * tlb->vma will be NULL. */ static inline void tlb_flush(struct mmu_gather *tlb) { - if (tlb->fullmm || !tlb->vma) + if (tlb->fullmm) { flush_tlb_mm(tlb->mm); - else if (tlb->range_end > 0) { - flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); - tlb->range_start = TASK_SIZE; - tlb->range_end = 0; + } else if (tlb->end > 0) { + struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + flush_tlb_range(&vma, tlb->start, tlb->end); + tlb->start = TASK_SIZE; + tlb->end = 0; } } static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) { if (!tlb->fullmm) { - if (addr < tlb->range_start) - tlb->range_start = addr; - if (addr + PAGE_SIZE > tlb->range_end) - tlb->range_end = addr + PAGE_SIZE; + tlb->start = min(tlb->start, addr); + tlb->end = max(tlb->end, addr + PAGE_SIZE); } } -static inline void __tlb_alloc_page(struct mmu_gather *tlb) -{ - unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); - - if (addr) { - tlb->pages = (void *)addr; - tlb->max = PAGE_SIZE / sizeof(struct page *); - } -} - -static inline void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush(tlb); - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - if (tlb->pages == tlb->local) - __tlb_alloc_page(tlb); -} - -static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->fullmm = !(start | (end+1)); - tlb->start = start; - tlb->end = end; - tlb->vma = NULL; - tlb->max = ARRAY_SIZE(tlb->local); - tlb->pages = tlb->local; - tlb->nr = 0; - __tlb_alloc_page(tlb); -} - -static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - if (tlb->pages != tlb->local) - free_pages((unsigned long)tlb->pages, 0); -} - /* * Memorize the range for the TLB flush. */ -static inline void -tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, + unsigned long addr) { tlb_add_flush(tlb, addr); } @@ -137,38 +67,24 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ -static inline void -tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +static inline void tlb_start_vma(struct mmu_gather *tlb, + struct vm_area_struct *vma) { if (!tlb->fullmm) { - tlb->vma = vma; - tlb->range_start = TASK_SIZE; - tlb->range_end = 0; + tlb->start = TASK_SIZE; + tlb->end = 0; } } -static inline void -tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +static inline void tlb_end_vma(struct mmu_gather *tlb, + struct vm_area_struct *vma) { if (!tlb->fullmm) tlb_flush(tlb); } -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - tlb->pages[tlb->nr++] = page; - VM_BUG_ON(tlb->nr > tlb->max); - return tlb->max - tlb->nr; -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (!__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} - static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, - unsigned long addr) + unsigned long addr) { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); @@ -184,16 +100,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, } #endif -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) -#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) - -#define tlb_migrate_finish(mm) do { } while (0) - -static inline void -tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) +static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long address) { - tlb_add_flush(tlb, addr); + tlb_add_flush(tlb, address); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8b482035cfc..b9349c4513e 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -72,9 +72,9 @@ extern struct cpu_tlb_fns cpu_tlb; */ static inline void flush_tlb_all(void) { - dsb(); + dsb(ishst); asm("tlbi vmalle1is"); - dsb(); + dsb(ish); isb(); } @@ -82,9 +82,9 @@ static inline void flush_tlb_mm(struct mm_struct *mm) { unsigned long asid = (unsigned long)ASID(mm) << 48; - dsb(); + dsb(ishst); asm("tlbi aside1is, %0" : : "r" (asid)); - dsb(); + dsb(ish); } static inline void flush_tlb_page(struct vm_area_struct *vma, @@ -93,16 +93,36 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(vma->vm_mm) << 48); - dsb(); + dsb(ishst); asm("tlbi vae1is, %0" : : "r" (addr)); - dsb(); + dsb(ish); } -/* - * Convert calls to our calling convention. - */ -#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) -#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; + unsigned long addr; + start = asid | (start >> 12); + end = asid | (end >> 12); + + dsb(ishst); + for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) + asm("tlbi vae1is, %0" : : "r"(addr)); + dsb(ish); +} + +static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long addr; + start >>= 12; + end >>= 12; + + dsb(ishst); + for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) + asm("tlbi vaae1is, %0" : : "r"(addr)); + dsb(ish); +} /* * On AArch64, the cache coherency is handled via the set_pte_at() function. @@ -114,7 +134,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, * set_pte() does not have a DSB, so make sure that the page table * write is visible. */ - dsb(); + dsb(ishst); } #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h new file mode 100644 index 00000000000..7ebcd31ce51 --- /dev/null +++ b/arch/arm64/include/asm/topology.h @@ -0,0 +1,36 @@ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + +#ifdef CONFIG_SMP + +#include <linux/cpumask.h> + +struct cpu_topology { + int thread_id; + int core_id; + int cluster_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; +}; + +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +#else + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } + +#endif + +#include <asm-generic/topology.h> + +#endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6c0f684aca8..3bf8f4e99a5 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs) * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size < (u65)current->addr_limit + * (u65)addr + (u65)size <= current->addr_limit * * This needs 65-bit arithmetic. */ @@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs) ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \ + asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ : "=&r" (flag), "=&r" (roksum) \ : "1" (addr), "Ir" (size), \ "r" (current_thread_info()->addr_limit) \ diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 82ce217e94c..e5f47df00c2 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -14,6 +14,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifdef CONFIG_COMPAT +#define __ARCH_WANT_COMPAT_SYS_GETDENTS64 #define __ARCH_WANT_COMPAT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE @@ -28,3 +29,5 @@ #endif #define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 58125bf008d..c8d8fc17bd5 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -399,9 +399,13 @@ __SYSCALL(374, compat_sys_sendmmsg) __SYSCALL(375, sys_setns) __SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(377, compat_sys_process_vm_writev) -__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ +__SYSCALL(378, sys_kcmp) +__SYSCALL(379, sys_finit_module) +__SYSCALL(380, sys_sched_setattr) +__SYSCALL(381, sys_sched_getattr) +__SYSCALL(382, sys_renameat2) -#define __NR_compat_syscalls 379 +#define __NR_compat_syscalls 383 /* * Compat syscall numbers used by the AArch64 kernel. diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 130e2be952c..215ad4649dd 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -22,7 +22,6 @@ #define BOOT_CPU_MODE_EL2 (0xe12) #ifndef __ASSEMBLY__ -#include <asm/cacheflush.h> /* * __boot_cpu_mode records what mode CPUs were booted in. @@ -38,20 +37,9 @@ extern u32 __boot_cpu_mode[2]; void __hyp_set_vectors(phys_addr_t phys_vector_base); phys_addr_t __hyp_get_vectors(void); -static inline void sync_boot_mode(void) -{ - /* - * As secondaries write to __boot_cpu_mode with caches disabled, we - * must flush the corresponding cache entries to ensure the visibility - * of their writes. - */ - __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode)); -} - /* Reports the availability of HYP mode */ static inline bool is_hyp_mode_available(void) { - sync_boot_mode(); return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); } @@ -59,7 +47,6 @@ static inline bool is_hyp_mode_available(void) /* Check if the bootloader has booted CPUs in different modes */ static inline bool is_hyp_mode_mismatched(void) { - sync_boot_mode(); return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index e4b78bdca19..942376d37d2 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -9,6 +9,7 @@ header-y += byteorder.h header-y += fcntl.h header-y += hwcap.h header-y += kvm_para.h +header-y += perf_regs.h header-y += param.h header-y += ptrace.h header-y += setup.h diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 495ab6f84a6..e633ff8cdec 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -31,6 +31,7 @@ #define KVM_NR_SPSR 5 #ifndef __ASSEMBLY__ +#include <linux/psci.h> #include <asm/types.h> #include <asm/ptrace.h> @@ -56,8 +57,9 @@ struct kvm_regs { #define KVM_ARM_TARGET_FOUNDATION_V8 1 #define KVM_ARM_TARGET_CORTEX_A57 2 #define KVM_ARM_TARGET_XGENE_POTENZA 3 +#define KVM_ARM_TARGET_CORTEX_A53 4 -#define KVM_ARM_NUM_TARGETS 4 +#define KVM_ARM_NUM_TARGETS 5 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 @@ -77,6 +79,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ +#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ struct kvm_vcpu_init { __u32 target; @@ -148,6 +151,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff @@ -177,10 +189,10 @@ struct kvm_arch_memory_slot { #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) -#define KVM_PSCI_RET_SUCCESS 0 -#define KVM_PSCI_RET_NI ((unsigned long)-1) -#define KVM_PSCI_RET_INVAL ((unsigned long)-2) -#define KVM_PSCI_RET_DENIED ((unsigned long)-3) +#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED #endif diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h new file mode 100644 index 00000000000..172b8317ee4 --- /dev/null +++ b/arch/arm64/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +#ifndef _ASM_ARM64_PERF_REGS_H +#define _ASM_ARM64_PERF_REGS_H + +enum perf_event_arm_regs { + PERF_REG_ARM64_X0, + PERF_REG_ARM64_X1, + PERF_REG_ARM64_X2, + PERF_REG_ARM64_X3, + PERF_REG_ARM64_X4, + PERF_REG_ARM64_X5, + PERF_REG_ARM64_X6, + PERF_REG_ARM64_X7, + PERF_REG_ARM64_X8, + PERF_REG_ARM64_X9, + PERF_REG_ARM64_X10, + PERF_REG_ARM64_X11, + PERF_REG_ARM64_X12, + PERF_REG_ARM64_X13, + PERF_REG_ARM64_X14, + PERF_REG_ARM64_X15, + PERF_REG_ARM64_X16, + PERF_REG_ARM64_X17, + PERF_REG_ARM64_X18, + PERF_REG_ARM64_X19, + PERF_REG_ARM64_X20, + PERF_REG_ARM64_X21, + PERF_REG_ARM64_X22, + PERF_REG_ARM64_X23, + PERF_REG_ARM64_X24, + PERF_REG_ARM64_X25, + PERF_REG_ARM64_X26, + PERF_REG_ARM64_X27, + PERF_REG_ARM64_X28, + PERF_REG_ARM64_X29, + PERF_REG_ARM64_LR, + PERF_REG_ARM64_SP, + PERF_REG_ARM64_PC, + PERF_REG_ARM64_MAX, +}; +#endif /* _ASM_ARM64_PERF_REGS_H */ diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h new file mode 100644 index 00000000000..7985ff60ca3 --- /dev/null +++ b/arch/arm64/include/uapi/asm/posix_types.h @@ -0,0 +1,10 @@ +#ifndef __ASM_POSIX_TYPES_H +#define __ASM_POSIX_TYPES_H + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +#define __kernel_old_uid_t __kernel_old_uid_t + +#include <asm-generic/posix_types.h> + +#endif /* __ASM_POSIX_TYPES_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 690ad51cc90..ee469be1ae1 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -53,5 +53,12 @@ struct fpsimd_context { __uint128_t vregs[32]; }; +/* ESR_EL1 context */ +#define ESR_MAGIC 0x45535201 + +struct esr_context { + struct _aarch64_ctx head; + __u64 esr; +}; #endif /* _UAPI__ASM_SIGCONTEXT_H */ |
