diff options
Diffstat (limited to 'arch/xtensa/mm/tlb.c')
| -rw-r--r-- | arch/xtensa/mm/tlb.c | 52 | 
1 files changed, 37 insertions, 15 deletions
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index ca9d2366bf1..5ece856c572 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void)  } -void flush_tlb_all (void) +void local_flush_tlb_all(void)  {  	__flush_itlb_all();  	__flush_dtlb_all(); @@ -60,19 +60,23 @@ void flush_tlb_all (void)   * a new context will be assigned to it.   */ -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm)  { +	int cpu = smp_processor_id(); +  	if (mm == current->active_mm) {  		unsigned long flags;  		local_irq_save(flags); -		__get_new_mmu_context(mm); -		__load_mmu_context(mm); +		mm->context.asid[cpu] = NO_CONTEXT; +		activate_context(mm, cpu);  		local_irq_restore(flags); +	} else { +		mm->context.asid[cpu] = NO_CONTEXT; +		mm->context.cpu = -1;  	} -	else -		mm->context = 0;  } +  #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)  #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)  #if _ITLB_ENTRIES > _DTLB_ENTRIES @@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm)  # define _TLB_ENTRIES _DTLB_ENTRIES  #endif -void flush_tlb_range (struct vm_area_struct *vma, -		      unsigned long start, unsigned long end) +void local_flush_tlb_range(struct vm_area_struct *vma, +		unsigned long start, unsigned long end)  { +	int cpu = smp_processor_id();  	struct mm_struct *mm = vma->vm_mm;  	unsigned long flags; -	if (mm->context == NO_CONTEXT) +	if (mm->context.asid[cpu] == NO_CONTEXT)  		return;  #if 0  	printk("[tlbrange<%02lx,%08lx,%08lx>]\n", -			(unsigned long)mm->context, start, end); +			(unsigned long)mm->context.asid[cpu], start, end);  #endif  	local_irq_save(flags);  	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {  		int oldpid = get_rasid_register(); -		set_rasid_register (ASID_INSERT(mm->context)); + +		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));  		start &= PAGE_MASK;  		if (vma->vm_flags & VM_EXEC)  			while(start < end) { @@ -114,24 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,  		set_rasid_register(oldpid);  	} else { -		flush_tlb_mm(mm); +		local_flush_tlb_mm(mm);  	}  	local_irq_restore(flags);  } -void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  { +	int cpu = smp_processor_id();  	struct mm_struct* mm = vma->vm_mm;  	unsigned long flags;  	int oldpid; -	if(mm->context == NO_CONTEXT) +	if (mm->context.asid[cpu] == NO_CONTEXT)  		return;  	local_irq_save(flags);  	oldpid = get_rasid_register(); -	set_rasid_register(ASID_INSERT(mm->context)); +	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));  	if (vma->vm_flags & VM_EXEC)  		invalidate_itlb_mapping(page); @@ -142,6 +149,21 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)  	local_irq_restore(flags);  } +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ +	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && +	    end - start < _TLB_ENTRIES << PAGE_SHIFT) { +		start &= PAGE_MASK; +		while (start < end) { +			invalidate_itlb_mapping(start); +			invalidate_dtlb_mapping(start); +			start += PAGE_SIZE; +		} +	} else { +		local_flush_tlb_all(); +	} +} +  #ifdef CONFIG_DEBUG_TLB_SANITY  static unsigned get_pte_for_vaddr(unsigned vaddr)  | 
