diff options
Diffstat (limited to 'arch/arm/include')
24 files changed, 645 insertions, 74 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index c79f61faa3a..da1c77d3932 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,6 +243,29 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } +#ifdef CONFIG_ARM_LPAE +static inline u64 atomic64_read(const atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + __asm__ __volatile__("@ atomic64_set\n" +" strd %2, %H2, [%1]" + : "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + ); +} +#else static inline u64 atomic64_read(const atomic64_t *v) { u64 result; @@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i) : "r" (&v->counter), "r" (i) : "cc"); } +#endif static inline void atomic64_add(u64 i, atomic64_t *v) { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c54cd1..bff71388e72 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) flush_cache_all(); } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path. It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ + char *_p = (char *)p; + + __cpuc_clean_dcache_area(_p, size); + outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU. We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ + char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE + if (outer_cache.flush_range) { + /* + * Ensure dirty data migrated from other CPUs into our cache + * are cleaned out safely before the outer cache is cleaned: + */ + __cpuc_clean_dcache_area(_p, size); + + /* Clean and invalidate stale data for *p from outer ... */ + outer_flush_range(__pa(_p), __pa(_p + size)); + } +#endif + + /* ... and inner cache: */ + __cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) + #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d8015a6..1f3262e99d8 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,6 +42,8 @@ #define vectors_high() (0) #endif +#ifdef CONFIG_CPU_CP15 + extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */ @@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val) isb(); } -#endif +#else /* ifdef CONFIG_CPU_CP15 */ + +/* + * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the + * minds of the developers). Yielding 0 for machines without a cp15 (and making + * it read-only) is fine for most cases and saves quite some #ifdeffery. + */ +#define cr_no_alignment UL(0) +#define cr_alignment UL(0) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#endif /* ifndef __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index ad41ec2471e..7652712d1d1 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -38,6 +38,24 @@ #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) +#define ARM_CPU_IMP_ARM 0x41 +#define ARM_CPU_IMP_INTEL 0x69 + +#define ARM_CPU_PART_ARM1136 0xB360 +#define ARM_CPU_PART_ARM1156 0xB560 +#define ARM_CPU_PART_ARM1176 0xB760 +#define ARM_CPU_PART_ARM11MPCORE 0xB020 +#define ARM_CPU_PART_CORTEX_A8 0xC080 +#define ARM_CPU_PART_CORTEX_A9 0xC090 +#define ARM_CPU_PART_CORTEX_A5 0xC050 +#define ARM_CPU_PART_CORTEX_A15 0xC0F0 +#define ARM_CPU_PART_CORTEX_A7 0xC070 + +#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 +#define ARM_CPU_XSCALE_ARCH_V1 0x2000 +#define ARM_CPU_XSCALE_ARCH_V2 0x4000 +#define ARM_CPU_XSCALE_ARCH_V3 0x6000 + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 @@ -50,6 +68,7 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) + #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ @@ -59,29 +78,24 @@ extern unsigned int processor_id; : "cc"); \ __val; \ }) -#else -#define read_cpuid(reg) (processor_id) -#define read_cpuid_ext(reg) 0 -#endif -#define ARM_CPU_IMP_ARM 0x41 -#define ARM_CPU_IMP_INTEL 0x69 +#else /* ifdef CONFIG_CPU_CP15 */ -#define ARM_CPU_PART_ARM1136 0xB360 -#define ARM_CPU_PART_ARM1156 0xB560 -#define ARM_CPU_PART_ARM1176 0xB760 -#define ARM_CPU_PART_ARM11MPCORE 0xB020 -#define ARM_CPU_PART_CORTEX_A8 0xC080 -#define ARM_CPU_PART_CORTEX_A9 0xC090 -#define ARM_CPU_PART_CORTEX_A5 0xC050 -#define ARM_CPU_PART_CORTEX_A15 0xC0F0 -#define ARM_CPU_PART_CORTEX_A7 0xC070 +/* + * read_cpuid and read_cpuid_ext should only ever be called on machines that + * have cp15 so warn on other usages. + */ +#define read_cpuid(reg) \ + ({ \ + WARN_ON_ONCE(1); \ + 0; \ + }) -#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 -#define ARM_CPU_XSCALE_ARCH_V1 0x2000 -#define ARM_CPU_XSCALE_ARCH_V2 0x4000 -#define ARM_CPU_XSCALE_ARCH_V3 0x6000 +#define read_cpuid_ext(reg) read_cpuid(reg) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +#ifdef CONFIG_CPU_CP15 /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID @@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } +#else /* ifdef CONFIG_CPU_CP15 */ + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ + return processor_id; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + static inline unsigned int __attribute_const__ read_cpuid_implementor(void) { return (read_cpuid_id() & 0xFF000000) >> 24; diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a8..dff714d886d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704e..ea289e1435e 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 8cacbcda76d..b6e9f2c108b 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -18,12 +18,12 @@ * ================ * * We have the following to choose from: - * arm6 - ARM6 style * arm7 - ARM7 style * v4_early - ARMv4 without Thumb early abort handler * v4t_late - ARMv4 with Thumb late abort handler * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler + * v5t_early - ARMv5 with Thumb early abort handler + * v5tj_early - ARMv5 with Thumb and Java early abort handler * xscale - ARMv5 with Thumb with Xscale extensions * v6_early - ARMv6 generic early abort handler * v7_early - ARMv7 generic early abort handler @@ -39,19 +39,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_LV4T +#ifdef CONFIG_CPU_ABRT_EV4 # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4t_late_abort +# define CPU_DABORT_HANDLER v4_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV4 +#ifdef CONFIG_CPU_ABRT_LV4T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v4_early_abort +# define CPU_DABORT_HANDLER v4t_late_abort # endif #endif @@ -63,19 +63,19 @@ # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5TJ +#ifdef CONFIG_CPU_ABRT_EV5T # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5tj_early_abort +# define CPU_DABORT_HANDLER v5t_early_abort # endif #endif -#ifdef CONFIG_CPU_ABRT_EV5T +#ifdef CONFIG_CPU_ABRT_EV5TJ # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 # else -# define CPU_DABORT_HANDLER v5t_early_abort +# define CPU_DABORT_HANDLER v5tj_early_abort # endif #endif diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe247..ed94b1a366a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484..91b99abe7a9 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 7c3d813e15d..124623e5ef1 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -211,4 +211,8 @@ #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_DABT_S1PTW (1U << 7) +#define HSR_DABT_CM (1U << 8) +#define HSR_DABT_EA (1U << 9) + #endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index e4956f4e23e..18d50322a9e 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[]; extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); #endif diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index fd611996bfb..82b4babead2 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -22,11 +22,12 @@ #include <linux/kvm_host.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#include <asm/kvm_arm.h> -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); +bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); @@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) return 1; } -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.regs.usr_regs.ARM_pc; } -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) return reg == 15; } +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hyp_pc; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; +} + +/* Get Access Size from a data abort */ +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ + switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { + case 0: + return 1; + case 1: + return 2; + case 2: + return 4; + default: + kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); + return -EFAULT; + } +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_IL; +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; +} + +static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; +} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d1736a53b12..0c4e643d939 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; +struct kvm_vcpu_fault_info { + u32 hsr; /* Hyp Syndrome Register */ + u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ + u32 hpfar; /* Hyp IPA Fault Address Register */ + u32 hyp_pc; /* PC when exception was taken from Hyp mode */ +}; + +typedef struct vfp_hard_struct kvm_kernel_vfp_t; + struct kvm_vcpu_arch { struct kvm_regs regs; @@ -93,13 +102,11 @@ struct kvm_vcpu_arch { u32 midr; /* Exception Information */ - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ + struct kvm_vcpu_fault_info fault; /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - struct vfp_hard_struct *vfp_host; + kvm_kernel_vfp_t vfp_guest; + kvm_kernel_vfp_t *vfp_host; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -122,9 +129,6 @@ struct kvm_vcpu_arch { /* Interrupt related fields */ u32 irq_lines; /* IRQ and FIQ levels */ - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; @@ -181,4 +185,26 @@ struct kvm_one_reg; int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index); + +static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, + unsigned long hyp_stack_ptr, + unsigned long vector_ptr) +{ + unsigned long pgd_low, pgd_high; + + pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); + pgd_high = (pgd_ptr >> 32ULL); + + /* + * Call initialization code, and switch to the full blown + * HYP code. The init code doesn't need to preserve these registers as + * r1-r3 and r12 are already callee save according to the AAPCS. + * Note that we slightly misuse the prototype by casing the pgd_low to + * a void *. + */ + kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 421a20b3487..970f3b5fa10 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -19,6 +19,18 @@ #ifndef __ARM_KVM_MMU_H__ #define __ARM_KVM_MMU_H__ +#include <asm/cacheflush.h> +#include <asm/pgalloc.h> +#include <asm/idmap.h> + +/* + * We directly use the kernel VA for the HYP, as we can directly share + * the mapping (HTTBR "covers" TTBR1). + */ +#define HYP_PAGE_OFFSET_MASK (~0UL) +#define HYP_PAGE_OFFSET PAGE_OFFSET +#define KERN_TO_HYP(kva) (kva) + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_hyp_pmds(void); @@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ + pte_val(*pte) = new_pte; + /* + * flush_pmd_entry just takes a void pointer and cleans the necessary + * cache entries, so we can reuse the function for ptes. + */ + flush_pmd_entry(pte); +} + static inline bool kvm_is_write_fault(unsigned long hsr) { unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; @@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr) return true; } +static inline void kvm_clean_pgd(pgd_t *pgd) +{ + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +} + +static inline void kvm_clean_pmd_entry(pmd_t *pmd) +{ + clean_pmd_entry(pmd); +} + +static inline void kvm_clean_pte(pte_t *pte) +{ + clean_pte_table(pte); +} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ + pte_val(*pte) |= L_PTE_S2_RDWR; +} + +struct kvm; + +static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ + /* + * If we are going to insert an instruction page and the icache is + * either VIPT or PIPT, there is a potential problem where the host + * (or another VM) may have used the same page as this guest, and we + * read incorrect data from the icache. If we're using a PIPT cache, + * we can invalidate just that page, but if we are using a VIPT cache + * we need to invalidate the entire icache - damn shame - as written + * in the ARM ARM (DDI 0406C.b - Page B3-1393). + * + * VIVT caches are tagged using both the ASID and the VMID and doesn't + * need any kind of flushing (DDI 0406C.b - Page B3-1392). + */ + if (icache_is_pipt()) { + unsigned long hva = gfn_to_hva(kvm, gfn); + __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); + } else if (!icache_is_vivt_asid_tagged()) { + /* any kind of VIPT cache */ + __flush_icache_all(); + } +} + #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index ab97207d9cd..343744e4809 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/kvm.h> -#include <linux/kvm_host.h> #include <linux/irqreturn.h> #include <linux/spinlock.h> #include <linux/types.h> diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 5cf2e979b4b..7d2c3c84380 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -30,6 +30,11 @@ struct hw_pci { void (*postinit)(void); u8 (*swizzle)(struct pci_dev *dev, u8 *pin); int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); }; /* @@ -51,6 +56,12 @@ struct pci_sys_data { u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ int (*map_irq)(const struct pci_dev *, u8, u8); + /* Resource alignement requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); void *private_data; /* platform controller private data */ }; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 00000000000..0f7b7620e9a --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,209 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler. When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER 4 +#define MAX_NR_CLUSTERS 2 + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/cacheflush.h> + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated. A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset. If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context. However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + * to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended. The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work. This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { + int (*power_up)(unsigned int cpu, unsigned int cluster); + void (*power_down)(void); + void (*suspend)(u64); + void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +extern unsigned long sync_phys; /* physical address of *mcpm_sync */ + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( + void (*power_up_setup)(unsigned int affinity_level)); + +void __init mcpm_smp_set_ops(void); + +#else + +/* + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files. Let's work around the conflict like this. + */ +#include <asm/asm-offsets.h> +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + +#endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN 0x11 +#define CPU_COMING_UP 0x12 +#define CPU_UP 0x13 +#define CPU_GOING_DOWN 0x14 + +#define CLUSTER_DOWN 0x21 +#define CLUSTER_UP 0x22 +#define CLUSTER_GOING_DOWN 0x23 + +#define INBOUND_NOT_COMING_UP 0x31 +#define INBOUND_COMING_UP 0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS 0 +#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ + (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ + (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ + (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + +#endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323..a7b85e0d0cc 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64..86b8fe398b9 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 80d6fc4dbe4..9bcd262a900 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel). + */ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING TASK_SIZE +#endif + +/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index cddda1f41f0..1995d1a8406 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0..a3625d141c1 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include <asm/glue.h> -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -169,7 +166,7 @@ # define v6wbi_always_flags (-1UL) #endif -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ TLB_V7_UIS_ASID | TLB_V7_UIS_BP) #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h new file mode 100644 index 00000000000..0e2949b0fae --- /dev/null +++ b/arch/arm/include/debug/uncompress.h @@ -0,0 +1,7 @@ +#ifdef CONFIG_DEBUG_UNCOMPRESS +extern void putc(int c); +#else +static inline void putc(int c) {} +#endif +static inline void flush(void) {} +static inline void arch_decomp_setup(void) {} diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 023bfeb367b..c1ee007523d 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -53,12 +53,12 @@ #define KVM_ARM_FIQ_spsr fiq_regs[7] struct kvm_regs { - struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ - __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ - __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ + struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ + unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ + unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ + unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ + unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ + unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ }; /* Supported Processor Types */ |