diff options
Diffstat (limited to 'arch/tile/kernel/tlb.c')
| -rw-r--r-- | arch/tile/kernel/tlb.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c index 2dffc1044d8..f23b5351567 100644 --- a/arch/tile/kernel/tlb.c +++ b/arch/tile/kernel/tlb.c @@ -15,6 +15,7 @@ #include <linux/cpumask.h> #include <linux/module.h> +#include <linux/hugetlb.h> #include <asm/tlbflush.h> #include <asm/homecache.h> #include <hv/hypervisor.h> @@ -34,13 +35,13 @@ void flush_tlb_mm(struct mm_struct *mm) { HV_Remote_ASID asids[NR_CPUS]; int i = 0, cpu; - for_each_cpu(cpu, &mm->cpu_vm_mask) { + for_each_cpu(cpu, mm_cpumask(mm)) { HV_Remote_ASID *asid = &asids[i++]; asid->y = cpu / smp_topology.width; asid->x = cpu % smp_topology.width; asid->asid = per_cpu(current_asid, cpu); } - flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, + flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm), 0, 0, 0, NULL, asids, i); } @@ -49,29 +50,29 @@ void flush_tlb_current_task(void) flush_tlb_mm(current->mm); } -void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, +void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long va) { - unsigned long size = hv_page_size(vma); + unsigned long size = vma_kernel_pagesize(vma); int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; - flush_remote(0, cache, &mm->cpu_vm_mask, - va, size, size, &mm->cpu_vm_mask, NULL, 0); + flush_remote(0, cache, mm_cpumask(mm), + va, size, size, mm_cpumask(mm), NULL, 0); } -void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { flush_tlb_page_mm(vma, vma->vm_mm, va); } EXPORT_SYMBOL(flush_tlb_page); -void flush_tlb_range(const struct vm_area_struct *vma, +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long size = hv_page_size(vma); + unsigned long size = vma_kernel_pagesize(vma); struct mm_struct *mm = vma->vm_mm; int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; - flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, - &mm->cpu_vm_mask, NULL, 0); + flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, + mm_cpumask(mm), NULL, 0); } void flush_tlb_all(void) @@ -90,8 +91,14 @@ void flush_tlb_all(void) } } +/* + * Callers need to flush the L1I themselves if necessary, e.g. for + * kernel module unload. Otherwise we assume callers are not using + * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus + * will get an unnecessary interrupt otherwise. + */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, + flush_remote(0, 0, NULL, start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); } |
