diff options
Diffstat (limited to 'arch/arm/kernel/smp_tlb.c')
| -rw-r--r-- | arch/arm/kernel/smp_tlb.c | 65 |
1 files changed, 33 insertions, 32 deletions
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40..95d063620b7 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -71,19 +71,36 @@ static inline void ipi_flush_bp_all(void *ignored) } #ifdef CONFIG_ARM_ERRATA_798181 -static int erratum_a15_798181(void) +bool (*erratum_a15_798181_handler)(void); + +static bool erratum_a15_798181_partial(void) { - unsigned int midr = read_cpuid_id(); + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(ish); + return false; +} - /* Cortex-A15 r0p0..r3p2 affected */ - if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) - return 0; - return 1; +static bool erratum_a15_798181_broadcast(void) +{ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(ish); + return true; } -#else -static int erratum_a15_798181(void) + +void erratum_a15_798181_init(void) { - return 0; + unsigned int midr = read_cpuid_id(); + unsigned int revidr = read_cpuid(CPUID_REVIDR); + + /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 || + (revidr & 0x210) == 0x210) { + return; + } + if (revidr & 0x10) + erratum_a15_798181_handler = erratum_a15_798181_partial; + else + erratum_a15_798181_handler = erratum_a15_798181_broadcast; } #endif @@ -97,35 +114,19 @@ static void broadcast_tlb_a15_erratum(void) if (!erratum_a15_798181()) return; - dummy_flush_tlb_a15_erratum(); smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); } static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) { - int cpu, this_cpu; + int this_cpu; cpumask_t mask = { CPU_BITS_NONE }; if (!erratum_a15_798181()) return; - dummy_flush_tlb_a15_erratum(); this_cpu = get_cpu(); - for_each_online_cpu(cpu) { - if (cpu == this_cpu) - continue; - /* - * We only need to send an IPI if the other CPUs are running - * the same ASID as the one being invalidated. There is no - * need for locking around the active_asids check since the - * switch_mm() function has at least one dmb() (as required by - * this workaround) in case a context switch happens on - * another CPU after the condition below. - */ - if (atomic64_read(&mm->context.id) == - atomic64_read(&per_cpu(active_asids, cpu))) - cpumask_set_cpu(cpu, &mask); - } + a15_erratum_get_cpumask(this_cpu, mm, &mask); smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); put_cpu(); } @@ -135,7 +136,7 @@ void flush_tlb_all(void) if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else - local_flush_tlb_all(); + __flush_tlb_all(); broadcast_tlb_a15_erratum(); } @@ -144,7 +145,7 @@ void flush_tlb_mm(struct mm_struct *mm) if (tlb_ops_need_broadcast()) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else - local_flush_tlb_mm(mm); + __flush_tlb_mm(mm); broadcast_tlb_mm_a15_erratum(mm); } @@ -157,7 +158,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); } else - local_flush_tlb_page(vma, uaddr); + __flush_tlb_page(vma, uaddr); broadcast_tlb_mm_a15_erratum(vma->vm_mm); } @@ -168,7 +169,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else - local_flush_tlb_kernel_page(kaddr); + __flush_tlb_kernel_page(kaddr); broadcast_tlb_a15_erratum(); } @@ -204,5 +205,5 @@ void flush_bp_all(void) if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_bp_all, NULL, 1); else - local_flush_bp_all(); + __flush_bp_all(); } |
