aboutsummaryrefslogtreecommitdiff
path: root/arch/cris/arch-v32/mm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris/arch-v32/mm/tlb.c')
-rw-r--r--arch/cris/arch-v32/mm/tlb.c67
1 files changed, 33 insertions, 34 deletions
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index 8233406798d..c030d020660 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -9,12 +9,12 @@
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
-#include <asm/arch/hwregs/supp_reg.h>
+#include <arch/hwregs/asm/mmu_defs_asm.h>
+#include <arch/hwregs/supp_reg.h>
#define UPDATE_TLB_SEL_IDX(val) \
-do { \
- unsigned long tlb_sel; \
+do { \
+ unsigned long tlb_sel; \
\
tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
@@ -54,8 +54,7 @@ __flush_tlb_all(void)
* Mask with 0xf so similar TLB entries aren't written in the same 4-way
* entry group.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu); /* Select the MMU */
@@ -92,8 +91,7 @@ __flush_tlb_mm(struct mm_struct *mm)
return;
/* Mark the TLB entries that match the page_id as invalid. */
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
@@ -140,8 +138,7 @@ __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
* Invalidate those TLB entries that match both the mm context and the
* requested virtual address.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
@@ -175,34 +172,36 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+static DEFINE_SPINLOCK(mmu_context_lock);
+
/* Called in schedule() just before actually doing the switch_to. */
void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- int cpu = smp_processor_id();
-
- /* Make sure there is a MMU context. */
- spin_lock(&next->page_table_lock);
- get_mmu_context(next);
- cpu_set(cpu, next->cpu_vm_mask);
- spin_unlock(&next->page_table_lock);
-
- /*
- * Remember the pgd for the fault handlers. Keep a seperate copy of it
- * because current and active_mm might be invalid at points where
- * there's still a need to derefer the pgd.
- */
- per_cpu(current_pgd, cpu) = next->pgd;
-
- /* Switch context in the MMU. */
- if (tsk && tsk->thread_info)
- {
- SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
- }
- else
- {
- SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
- }
+ if (prev != next) {
+ int cpu = smp_processor_id();
+
+ /* Make sure there is a MMU context. */
+ spin_lock(&mmu_context_lock);
+ get_mmu_context(next);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ spin_unlock(&mmu_context_lock);
+
+ /*
+ * Remember the pgd for the fault handlers. Keep a separate
+ * copy of it because current and active_mm might be invalid
+ * at points where * there's still a need to derefer the pgd.
+ */
+ per_cpu(current_pgd, cpu) = next->pgd;
+
+ /* Switch context in the MMU. */
+ if (tsk && task_thread_info(tsk)) {
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
+ task_thread_info(tsk)->tls);
+ } else {
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
+ }
+ }
}