diff options
Diffstat (limited to 'arch/arm/mm/context.c')
| -rw-r--r-- | arch/arm/mm/context.c | 305 |
1 files changed, 204 insertions, 101 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfa..6eb97b3a748 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -2,6 +2,9 @@ * linux/arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -14,144 +17,244 @@ #include <linux/percpu.h> #include <asm/mmu_context.h> +#include <asm/smp_plat.h> +#include <asm/thread_notify.h> #include <asm/tlbflush.h> - -static DEFINE_SPINLOCK(cpu_asid_lock); -unsigned int cpu_last_asid = ASID_FIRST_VERSION; -#ifdef CONFIG_SMP -DEFINE_PER_CPU(struct mm_struct *, current_mm); -#endif +#include <asm/proc-fns.h> /* - * We fork()ed a process, and we need a new context for the child - * to run in. We reserve version 0 for initial tasks so we will - * always allocate an ASID. The ASID 0 is reserved for the TTBR - * register changing sequence. + * On ARMv6, we have the following structure in the Context ID: + * + * 31 7 0 + * +-------------------------+-----------+ + * | process ID | ASID | + * +-------------------------+-----------+ + * | context ID | + * +-------------------------------------+ + * + * The ASID is used to tag entries in the CPU caches and TLBs. + * The context ID is used by debuggers and trace logic, and + * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accessed + * by non-64-bit operations. */ -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - mm->context.id = 0; - spin_lock_init(&mm->context.id_lock); -} +#define ASID_FIRST_VERSION (1ULL << ASID_BITS) +#define NUM_USER_ASIDS ASID_FIRST_VERSION -static void flush_context(void) -{ - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); - isb(); - local_flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { - __flush_icache_all(); - dsb(); - } -} +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); +static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); +static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -#ifdef CONFIG_SMP +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); +static cpumask_t tlb_flush_pending; -static void set_mm_context(struct mm_struct *mm, unsigned int asid) +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) { + int cpu; unsigned long flags; + u64 context_id, asid; - /* - * Locking needed for multi-threaded applications where the - * same mm->context.id could be set from different CPUs during - * the broadcast. This function is also called via IPI so the - * mm->context.id_lock has to be IRQ-safe. - */ - spin_lock_irqsave(&mm->context.id_lock, flags); - if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + context_id = mm->context.id.counter; + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; /* - * Old version of ASID found. Set the new one and - * reset mm_cpumask(mm). + * We only need to send an IPI if the other CPUs are + * running the same ASID as the one being invalidated. */ - mm->context.id = asid; - cpumask_clear(mm_cpumask(mm)); + asid = per_cpu(active_asids, cpu).counter; + if (asid == 0) + asid = per_cpu(reserved_asids, cpu); + if (context_id == asid) + cpumask_set_cpu(cpu, mask); } - spin_unlock_irqrestore(&mm->context.id_lock, flags); - - /* - * Set the mm_cpumask(mm) bit for the current CPU. - */ - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); } +#endif +#ifdef CONFIG_ARM_LPAE /* - * Reset the ASID on the current CPU. This function call is broadcast - * from the CPU handling the ASID rollover and holding cpu_asid_lock. + * With LPAE, the ASID and page tables are updated atomicly, so there is + * no need for a reserved set of tables (the active ASID tracking prevents + * any issues across a rollover). */ -static void reset_context(void *info) +#define cpu_set_reserved_ttbr0() +#else +static void cpu_set_reserved_ttbr0(void) { - unsigned int asid; - unsigned int cpu = smp_processor_id(); - struct mm_struct *mm = per_cpu(current_mm, cpu); - + u32 ttb; /* - * Check if a current_mm was set on this CPU as it might still - * be in the early booting stages and using the reserved ASID. + * Copy TTBR1 into TTBR0. + * This points at swapper_pg_dir, which contains only global + * entries so any speculative walks are perfectly safe. */ - if (!mm) - return; + asm volatile( + " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" + " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" + : "=r" (ttb)); + isb(); +} +#endif - smp_rmb(); - asid = cpu_last_asid + cpu + 1; +#ifdef CONFIG_PID_IN_CONTEXTIDR +static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, + void *t) +{ + u32 contextidr; + pid_t pid; + struct thread_info *thread = t; - flush_context(); - set_mm_context(mm, asid); + if (cmd != THREAD_NOTIFY_SWITCH) + return NOTIFY_DONE; - /* set the new ASID */ - asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + pid = task_pid_nr(thread->task) << ASID_BITS; + asm volatile( + " mrc p15, 0, %0, c13, c0, 1\n" + " and %0, %0, %2\n" + " orr %0, %0, %1\n" + " mcr p15, 0, %0, c13, c0, 1\n" + : "=r" (contextidr), "+r" (pid) + : "I" (~ASID_MASK)); isb(); + + return NOTIFY_OK; } -#else +static struct notifier_block contextidr_notifier_block = { + .notifier_call = contextidr_notifier, +}; -static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +static int __init contextidr_notifier_init(void) { - mm->context.id = asid; - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); + return thread_register_notifier(&contextidr_notifier_block); } - +arch_initcall(contextidr_notifier_init); #endif -void __new_context(struct mm_struct *mm) +static void flush_context(unsigned int cpu) { - unsigned int asid; + int i; + u64 asid; - spin_lock(&cpu_asid_lock); -#ifdef CONFIG_SMP - /* - * Check the ASID again, in case the change was broadcast from - * another CPU before we acquired the lock. - */ - if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - spin_unlock(&cpu_asid_lock); - return; + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + for_each_possible_cpu(i) { + if (i == cpu) { + asid = 0; + } else { + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); + } + per_cpu(reserved_asids, i) = asid; } -#endif - /* - * At this point, it is guaranteed that the current mm (with - * an old ASID) isn't active on any other CPU since the ASIDs - * are changed simultaneously via IPI. - */ - asid = ++cpu_last_asid; - if (asid == 0) - asid = cpu_last_asid = ASID_FIRST_VERSION; + + /* Queue a TLB invalidate and flush the I-cache if necessary. */ + cpumask_setall(&tlb_flush_pending); + + if (icache_is_vivt_asid_tagged()) + __flush_icache_all(); +} + +static int is_reserved_asid(u64 asid) +{ + int cpu; + for_each_possible_cpu(cpu) + if (per_cpu(reserved_asids, cpu) == asid) + return 1; + return 0; +} + +static u64 new_context(struct mm_struct *mm, unsigned int cpu) +{ + static u32 cur_idx = 1; + u64 asid = atomic64_read(&mm->context.id); + u64 generation = atomic64_read(&asid_generation); + + if (asid != 0 && is_reserved_asid(asid)) { + /* + * Our current ASID was active during a rollover, we can + * continue to use it and this was just a false alarm. + */ + asid = generation | (asid & ~ASID_MASK); + } else { + /* + * Allocate a free ASID. If we can't find one, take a + * note of the currently active ASIDs and mark the TLBs + * as requiring flushes. We always count from ASID #1, + * as we reserve ASID #0 to switch via TTBR0 and to + * avoid speculative page table walks from hitting in + * any partial walk caches, which could be populated + * from overlapping level-1 descriptors used to map both + * the module area and the userspace stack. + */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + if (asid == NUM_USER_ASIDS) { + generation = atomic64_add_return(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); + } + __set_bit(asid, asid_map); + cur_idx = asid; + asid |= generation; + cpumask_clear(mm_cpumask(mm)); + } + + return asid; +} + +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) +{ + unsigned long flags; + unsigned int cpu = smp_processor_id(); + u64 asid; + + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); /* - * If we've used up all our ASIDs, we need - * to start a new version and flush the TLB. + * We cannot update the pgd and the ASID atomicly with classic + * MMU, so switch exclusively to global mappings to avoid + * speculative page table walking with the wrong TTBR. */ - if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id() + 1; - flush_context(); -#ifdef CONFIG_SMP - smp_wmb(); - smp_call_function(reset_context, NULL, 1); -#endif - cpu_last_asid += NR_CPUS; + cpu_set_reserved_ttbr0(); + + asid = atomic64_read(&mm->context.id); + if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) + goto switch_mm_fastpath; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(&mm->context.id); + if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); + } + + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { + local_flush_bp_all(); + local_flush_tlb_all(); } - set_mm_context(mm, asid); - spin_unlock(&cpu_asid_lock); + atomic64_set(&per_cpu(active_asids, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + +switch_mm_fastpath: + cpu_switch_mm(mm->pgd, mm); } |
