aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c721
1 files changed, 572 insertions, 149 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 971f6c047b8..f2e8302fa70 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -3,16 +3,21 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
+#include <linux/preempt.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/bcache.h>
@@ -21,16 +26,17 @@
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
-#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
-
+#include <asm/traps.h>
+#include <asm/dma-coherence.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -39,19 +45,25 @@
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
+ * o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
- int retry, int wait)
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
preempt_disable();
-#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, retry, wait);
+#ifndef CONFIG_MIPS_MT_SMP
+ smp_call_function(func, info, 1);
#endif
func(info);
preempt_enable();
}
+#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
/*
* Must die.
*/
@@ -92,21 +104,66 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
blast_dcache32_page(addr);
}
-static void __init r4k_blast_dcache_page_setup(void)
+static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
+{
+ blast_dcache64_page(addr);
+}
+
+static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
+{
+ blast_dcache128_page(addr);
+}
+
+static void r4k_blast_dcache_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
- if (dc_lsize == 0)
+ switch (dc_lsize) {
+ case 0:
r4k_blast_dcache_page = (void *)cache_noop;
- else if (dc_lsize == 16)
+ break;
+ case 16:
r4k_blast_dcache_page = blast_dcache16_page;
- else if (dc_lsize == 32)
+ break;
+ case 32:
r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
+ break;
+ case 64:
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
+ break;
+ case 128:
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CONFIG_EVA
+#define r4k_blast_dcache_user_page r4k_blast_dcache_page
+#else
+
+static void (*r4k_blast_dcache_user_page)(unsigned long addr);
+
+static void r4k_blast_dcache_user_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_user_page = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_user_page = blast_dcache16_user_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_user_page = blast_dcache32_user_page;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_user_page = blast_dcache64_user_page;
}
+#endif
+
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_dcache_page_indexed_setup(void)
+static void r4k_blast_dcache_page_indexed_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -116,11 +173,16 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
else if (dc_lsize == 32)
r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
+ else if (dc_lsize == 128)
+ r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
}
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
-static void __init r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
@@ -130,6 +192,10 @@ static void __init r4k_blast_dcache_setup(void)
r4k_blast_dcache = blast_dcache16;
else if (dc_lsize == 32)
r4k_blast_dcache = blast_dcache32;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache = blast_dcache64;
+ else if (dc_lsize == 128)
+ r4k_blast_dcache = blast_dcache128;
}
/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
@@ -140,7 +206,7 @@ static void __init r4k_blast_dcache_setup(void)
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
@@ -157,7 +223,7 @@ static inline void tx49_blast_icache32(void)
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -188,7 +254,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -205,7 +271,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static void (* r4k_blast_icache_page)(unsigned long addr);
-static void __init r4k_blast_icache_page_setup(void)
+static void r4k_blast_icache_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -213,16 +279,41 @@ static void __init r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
else if (ic_lsize == 64)
r4k_blast_icache_page = blast_icache64_page;
+ else if (ic_lsize == 128)
+ r4k_blast_icache_page = blast_icache128_page;
+}
+
+#ifndef CONFIG_EVA
+#define r4k_blast_icache_user_page r4k_blast_icache_page
+#else
+
+static void (*r4k_blast_icache_user_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_user_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_user_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_user_page = blast_icache16_user_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_user_page = blast_icache32_user_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_user_page = blast_icache64_user_page;
}
+#endif
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_icache_page_indexed_setup(void)
+static void r4k_blast_icache_page_indexed_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -237,6 +328,9 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed;
+ else if (current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache_page_indexed =
+ loongson2_blast_icache32_page_indexed;
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed;
@@ -244,9 +338,10 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
}
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
-static void __init r4k_blast_icache_setup(void)
+static void r4k_blast_icache_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -259,15 +354,19 @@ static void __init r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32;
+ else if (current_cpu_type() == CPU_LOONGSON2)
+ r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
} else if (ic_lsize == 64)
r4k_blast_icache = blast_icache64;
+ else if (ic_lsize == 128)
+ r4k_blast_icache = blast_icache128;
}
static void (* r4k_blast_scache_page)(unsigned long addr);
-static void __init r4k_blast_scache_page_setup(void)
+static void r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -285,7 +384,7 @@ static void __init r4k_blast_scache_page_setup(void)
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_scache_page_indexed_setup(void)
+static void r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -303,7 +402,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
static void (* r4k_blast_scache)(void);
-static void __init r4k_blast_scache_setup(void)
+static void r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
@@ -321,14 +420,9 @@ static void __init r4k_blast_scache_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
-#if defined(CONFIG_CPU_LOONGSON2)
- r4k_blast_scache();
- return;
-#endif
- r4k_blast_dcache();
- r4k_blast_icache();
-
switch (current_cpu_type()) {
+ case CPU_LOONGSON2:
+ case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -336,39 +430,78 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ /*
+ * These caches are inclusive caches, that is, if something
+ * is not cached in the S-cache, we know it also won't be
+ * in one of the primary caches.
+ */
r4k_blast_scache();
+ break;
+
+ default:
+ r4k_blast_dcache();
+ r4k_blast_icache();
+ break;
}
}
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
+}
+
+static inline int has_valid_asid(const struct mm_struct *mm)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ int i;
+
+ for_each_online_cpu(i)
+ if (cpu_context(i, mm))
+ return 1;
+
+ return 0;
+#else
+ return cpu_context(smp_processor_id(), mm);
+#endif
+}
+
+static void r4k__flush_cache_vmap(void)
+{
+ r4k_blast_dcache();
+}
+
+static void r4k__flush_cache_vunmap(void)
+{
+ r4k_blast_dcache();
}
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
+ int exec = vma->vm_flags & VM_EXEC;
- if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
+ if (!(has_valid_asid(vma->vm_mm)))
return;
r4k_blast_dcache();
+ if (exec)
+ r4k_blast_icache();
}
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- if (!cpu_has_dc_aliases)
- return;
+ int exec = vma->vm_flags & VM_EXEC;
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
- if (!cpu_context(smp_processor_id(), mm))
+ if (!has_valid_asid(mm))
return;
/*
@@ -393,7 +526,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -410,6 +543,7 @@ static inline void local_r4k_flush_cache_page(void *args)
struct page *page = pfn_to_page(fcp_args->pfn);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
+ int map_coherent = 0;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
@@ -420,7 +554,7 @@ static inline void local_r4k_flush_cache_page(void *args)
* If ownes no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (cpu_context(smp_processor_id(), mm) == 0)
+ if (!has_valid_asid(mm))
return;
addr &= PAGE_MASK;
@@ -433,7 +567,7 @@ static inline void local_r4k_flush_cache_page(void *args)
* If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
- if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ if (!(pte_present(*ptep)))
return;
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
@@ -443,15 +577,18 @@ static inline void local_r4k_flush_cache_page(void *args)
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
- if (cpu_has_dc_aliases)
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page));
+ if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
- vaddr = kmap_atomic(page, KM_USER0);
+ vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr;
}
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(addr);
+ vaddr ? r4k_blast_dcache_page(addr) :
+ r4k_blast_dcache_user_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
r4k_blast_scache_page(addr);
}
@@ -462,14 +599,15 @@ static inline void local_r4k_flush_cache_page(void *args)
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
} else
- r4k_blast_icache_page(addr);
+ vaddr ? r4k_blast_icache_page(addr) :
+ r4k_blast_icache_user_page(addr);
}
if (vaddr) {
- if (cpu_has_dc_aliases)
+ if (map_coherent)
kunmap_coherent();
else
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
}
}
@@ -482,7 +620,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -492,7 +630,10 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
static void r4k_flush_data_cache_page(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+ if (in_atomic())
+ local_r4k_flush_data_cache_page((void *)addr);
+ else
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
}
struct flush_icache_range_args {
@@ -500,12 +641,8 @@ struct flush_icache_range_args {
unsigned long end;
};
-static inline void local_r4k_flush_icache_range(void *args)
+static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
{
- struct flush_icache_range_args *fir_args = args;
- unsigned long start = fir_args->start;
- unsigned long end = fir_args->end;
-
if (!cpu_has_ic_fills_f_dc) {
if (end - start >= dcache_size) {
r4k_blast_dcache();
@@ -513,19 +650,41 @@ static inline void local_r4k_flush_icache_range(void *args)
R4600_HIT_CACHEOP_WAR_IMPL;
protected_blast_dcache_range(start, end);
}
-
- if (!cpu_icache_snoops_remote_store && scache_size) {
- if (end - start > scache_size)
- r4k_blast_scache();
- else
- protected_blast_scache_range(start, end);
- }
}
if (end - start > icache_size)
r4k_blast_icache();
- else
- protected_blast_icache_range(start, end);
+ else {
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2:
+ protected_loongson2_blast_icache_range(start, end);
+ break;
+
+ default:
+ protected_blast_icache_range(start, end);
+ break;
+ }
+ }
+#ifdef CONFIG_EVA
+ /*
+ * Due to all possible segment mappings, there might cache aliases
+ * caused by the bootloader being in non-EVA mode, and the CPU switching
+ * to EVA during early kernel init. It's best to flush the scache
+ * to avoid having secondary cores fetching stale data and lead to
+ * kernel crashes.
+ */
+ bc_wback_inv(start, (end - start));
+ __sync();
+#endif
+}
+
+static inline void local_r4k_flush_icache_range_ipi(void *args)
+{
+ struct flush_icache_range_args *fir_args = args;
+ unsigned long start = fir_args->start;
+ unsigned long end = fir_args->end;
+
+ local_r4k_flush_icache_range(start, end);
}
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
@@ -535,22 +694,25 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
instruction_hazard();
}
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
else
blast_scache_range(addr, addr + size);
+ preempt_enable();
+ __sync();
return;
}
@@ -559,14 +721,16 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* subset property so we have to flush the primary caches
* explicitly
*/
- if (size >= dcache_size) {
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_wback_inv(addr, size);
+ __sync();
}
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
@@ -574,24 +738,38 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
- else
- blast_scache_range(addr, addr + size);
+ else {
+ /*
+ * There is no clearly documented alignment requirement
+ * for the cache instruction on MIPS processors and
+ * some processors, among them the RM5200 and RM7000
+ * QED processors will throw an address error for cache
+ * hit ops with insufficient alignment. Solved by
+ * aligning the address to cache line size.
+ */
+ blast_inv_scache_range(addr, addr + size);
+ }
+ preempt_enable();
+ __sync();
return;
}
- if (size >= dcache_size) {
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(addr, addr + size);
+ blast_inv_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_inv(addr, size);
+ __sync();
}
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
/*
* While we're protected against bad userland addresses we don't care
@@ -636,7 +814,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
}
static void r4k_flush_icache_all(void)
@@ -645,6 +823,39 @@ static void r4k_flush_icache_all(void)
r4k_blast_icache();
}
+struct flush_kernel_vmap_range_args {
+ unsigned long vaddr;
+ int size;
+};
+
+static inline void local_r4k_flush_kernel_vmap_range(void *args)
+{
+ struct flush_kernel_vmap_range_args *vmra = args;
+ unsigned long vaddr = vmra->vaddr;
+ int size = vmra->size;
+
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ if (cpu_has_safe_index_cacheops && size >= dcache_size)
+ r4k_blast_dcache();
+ else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
+ }
+}
+
+static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ struct flush_kernel_vmap_range_args args;
+
+ args.vaddr = (unsigned long) vaddr;
+ args.size = size;
+
+ r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+}
+
static inline void rm7k_erratum31(void)
{
const unsigned long ic_lsize = 32;
@@ -677,11 +888,40 @@ static inline void rm7k_erratum31(void)
}
}
-static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
+static inline void alias_74k_erratum(struct cpuinfo_mips *c)
+{
+ unsigned int imp = c->processor_id & PRID_IMP_MASK;
+ unsigned int rev = c->processor_id & PRID_REV_MASK;
+
+ /*
+ * Early versions of the 74K do not update the cache tags on a
+ * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
+ * aliases. In this case it is better to treat the cache as always
+ * having aliases.
+ */
+ switch (imp) {
+ case PRID_IMP_74K:
+ if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ if (rev == PRID_REV_ENCODE_332(2, 4, 0))
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ break;
+ case PRID_IMP_1074K:
+ if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
};
-static void __init probe_pcache(void)
+static void probe_pcache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -689,7 +929,7 @@ static void __init probe_pcache(void)
unsigned long config1;
unsigned int lsize;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_R4600: /* QED style two way caches? */
case CPU_R4700:
case CPU_R5000:
@@ -719,7 +959,7 @@ static void __init probe_pcache(void)
c->dcache.ways = 2;
c->dcache.waybit = 0;
- c->options |= MIPS_CPU_CACHE_CDEX_P;
+ c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
break;
case CPU_TX49XX:
@@ -747,7 +987,7 @@ static void __init probe_pcache(void)
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -806,7 +1046,7 @@ static void __init probe_pcache(void)
icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -819,7 +1059,6 @@ static void __init probe_pcache(void)
case CPU_RM7000:
rm7k_erratum31();
- case CPU_RM9000:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 4;
@@ -830,9 +1069,7 @@ static void __init probe_pcache(void)
c->dcache.ways = 4;
c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
-#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
c->options |= MIPS_CPU_CACHE_CDEX_P;
-#endif
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -854,6 +1091,48 @@ static void __init probe_pcache(void)
c->dcache.waybit = 0;
break;
+ case CPU_LOONGSON3:
+ config1 = read_c0_config1();
+ lsize = (config1 >> 19) & 7;
+ if (lsize)
+ c->icache.linesz = 2 << lsize;
+ else
+ c->icache.linesz = 0;
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+ icache_size = c->icache.sets *
+ c->icache.ways *
+ c->icache.linesz;
+ c->icache.waybit = 0;
+
+ lsize = (config1 >> 10) & 7;
+ if (lsize)
+ c->dcache.linesz = 2 << lsize;
+ else
+ c->dcache.linesz = 0;
+ c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.ways = 1 + ((config1 >> 7) & 7);
+ dcache_size = c->dcache.sets *
+ c->dcache.ways *
+ c->dcache.linesz;
+ c->dcache.waybit = 0;
+ break;
+
+ case CPU_CAVIUM_OCTEON3:
+ /* For now lie about the number of ways. */
+ c->icache.linesz = 128;
+ c->icache.sets = 16;
+ c->icache.ways = 8;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 8;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
default:
if (!(config & MIPS_CONF_M))
panic("Don't know how to probe P-caches on this cpu.");
@@ -864,16 +1143,20 @@ static void __init probe_pcache(void)
*/
config1 = read_c0_config1();
- if ((lsize = ((config1 >> 19) & 7)))
- c->icache.linesz = 2 << lsize;
- else
- c->icache.linesz = lsize;
- c->icache.sets = 64 << ((config1 >> 22) & 7);
+ lsize = (config1 >> 19) & 7;
+
+ /* IL == 7 is reserved */
+ if (lsize == 7)
+ panic("Invalid icache line size");
+
+ c->icache.linesz = lsize ? 2 << lsize : 0;
+
+ c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
+ c->icache.ways *
+ c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & 0x8) /* VI bit */
@@ -884,16 +1167,20 @@ static void __init probe_pcache(void)
*/
c->dcache.flags = 0;
- if ((lsize = ((config1 >> 10) & 7)))
- c->dcache.linesz = 2 << lsize;
- else
- c->dcache.linesz= lsize;
- c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ lsize = (config1 >> 10) & 7;
+
+ /* DL == 7 is reserved */
+ if (lsize == 7)
+ panic("Invalid dcache line size");
+
+ c->dcache.linesz = lsize ? 2 << lsize : 0;
+
+ c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
+ c->dcache.ways *
+ c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
@@ -902,13 +1189,14 @@ static void __init probe_pcache(void)
/*
* Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
+ * #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
* configuration.
*/
- if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
+ if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
+ (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
!(config & CONF_SC) && c->icache.linesz != 16 &&
PAGE_SIZE <= 0x8000)
panic("Improper R4000SC processor configuration detected");
@@ -928,11 +1216,12 @@ static void __init probe_pcache(void)
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
case CPU_SB1:
case CPU_SB1A:
+ case CPU_XLR:
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
@@ -941,12 +1230,27 @@ static void __init probe_pcache(void)
case CPU_R14000:
break;
+ case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
- if ((read_c0_config7() & (1 << 16))) {
- /* effectively physically indexed dcache,
- thus no virtual aliases. */
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_P5600:
+ case CPU_PROAPTIV:
+ case CPU_M5150:
+ if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
+ alias_74k_erratum(c);
+ if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
+ (c->icache.waysize > PAGE_SIZE))
+ c->icache.flags |= MIPS_CACHE_ALIASES;
+ if (read_c0_config7() & MIPS_CONF7_AR) {
+ /*
+ * Effectively physically indexed dcache,
+ * thus no virtual aliases.
+ */
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
@@ -955,7 +1259,7 @@ static void __init probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_20KC:
/*
* Some older 20Kc chips doesn't have the 'VI' bit in
@@ -964,30 +1268,29 @@ static void __init probe_pcache(void)
c->icache.flags |= MIPS_CACHE_VTAG;
break;
- case CPU_AU1000:
- case CPU_AU1500:
- case CPU_AU1100:
- case CPU_AU1550:
- case CPU_AU1200:
+ case CPU_ALCHEMY:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
- }
-#ifdef CONFIG_CPU_LOONGSON2
- /*
- * LOONGSON2 has 4 way icache, but when using indexed cache op,
- * one op will act on all 4 ways
- */
- c->icache.ways = 1;
-#endif
+ case CPU_LOONGSON2:
+ /*
+ * LOONGSON2 has 4 way icache, but when using indexed cache op,
+ * one op will act on all 4 ways
+ */
+ c->icache.ways = 1;
+ }
printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10,
- cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
+ c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
way_string[c->icache.ways], c->icache.linesz);
- printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
- dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
+ printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways],
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+ "cache aliases" : "no aliases",
+ c->dcache.linesz);
}
/*
@@ -996,12 +1299,11 @@ static void __init probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
-static int __init probe_scache(void)
+static int probe_scache(void)
{
unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config();
struct cpuinfo_mips *c = &current_cpu_data;
- int tmp;
if (config & CONF_SC)
return 0;
@@ -1034,7 +1336,6 @@ static int __init probe_scache(void)
/* Now search for the wrap around point. */
pow2 = (128 * 1024);
- tmp = 0;
for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
cache_op(Index_Load_Tag_SD, addr);
__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
@@ -1053,7 +1354,6 @@ static int __init probe_scache(void)
return 1;
}
-#if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1069,13 +1369,39 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-#endif
+
+static void __init loongson3_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config2, lsize;
+
+ config2 = read_c0_config2();
+ lsize = (config2 >> 4) & 15;
+ if (lsize)
+ c->scache.linesz = 2 << lsize;
+ else
+ c->scache.linesz = 0;
+ c->scache.sets = 64 << ((config2 >> 8) & 15);
+ c->scache.ways = 1 + (config2 & 15);
+
+ scache_size = c->scache.sets *
+ c->scache.ways *
+ c->scache.linesz;
+ /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
+ scache_size *= 4;
+ c->scache.waybit = 0;
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+ if (scache_size)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+ return;
+}
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
-static void __init setup_scache(void)
+static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
@@ -1084,9 +1410,9 @@ static void __init setup_scache(void)
/*
* Do the probing thing on R4000SC and R4400SC processors. Other
* processors don't have a S-cache that would be relevant to the
- * Linux memory managment.
+ * Linux memory management.
*/
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -1111,26 +1437,30 @@ static void __init setup_scache(void)
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
- return;
+ return;
case CPU_RM7000:
- case CPU_RM9000:
#ifdef CONFIG_RM7000_CPU_SCACHE
rm7k_sc_init();
#endif
return;
-#if defined(CONFIG_CPU_LOONGSON2)
case CPU_LOONGSON2:
loongson2_sc_init();
return;
-#endif
+
+ case CPU_LOONGSON3:
+ loongson3_sc_init();
+ return;
+
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_XLP:
+ /* don't need to worry about L2, fully coherent */
+ return;
default:
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
@@ -1176,7 +1506,7 @@ void au1x00_fixup_config_od(void)
/*
* Au1100 errata actually keeps silence about this bit, so we set it
* just in case for those revisions that require it to be set according
- * to arch/mips/au1000/common/cputable.c
+ * to the (now gone) cpu table.
*/
case 0x02030200: /* Au1100 AB */
case 0x02030201: /* Au1100 BA */
@@ -1186,9 +1516,47 @@ void au1x00_fixup_config_od(void)
}
}
-static void __init coherency_setup(void)
+/* CP0 hazard avoidance. */
+#define NXP_BARRIER() \
+ __asm__ __volatile__( \
+ ".set noreorder\n\t" \
+ "nop; nop; nop; nop; nop; nop;\n\t" \
+ ".set reorder\n\t")
+
+static void nxp_pr4450_fixup_config(void)
+{
+ unsigned long config0;
+
+ config0 = read_c0_config();
+
+ /* clear all three cache coherency fields */
+ config0 &= ~(0x7 | (7 << 25) | (7 << 28));
+ config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 28));
+ write_c0_config(config0);
+ NXP_BARRIER();
+}
+
+static int cca = -1;
+
+static int __init cca_setup(char *str)
+{
+ get_option(&str, &cca);
+
+ return 0;
+}
+
+early_param("cca", cca_setup);
+
+static void coherency_setup(void)
{
- change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
+ if (cca < 0 || cca > 7)
+ cca = read_c0_config() & CONF_CM_CMASK;
+ _page_cachable_default = cca << _CACHE_SHIFT;
+
+ pr_debug("Using cache attribute %d\n", cca);
+ change_c0_config(CONF_CM_CMASK, cca);
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
@@ -1208,25 +1576,25 @@ static void __init coherency_setup(void)
break;
/*
* We need to catch the early Alchemy SOCs with
- * the write-only co_config.od bit and set it back to one...
+ * the write-only co_config.od bit and set it back to one on:
+ * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
*/
- case CPU_AU1000: /* rev. DA, HA, HB */
- case CPU_AU1100: /* rev. AB, BA, BC ?? */
- case CPU_AU1500: /* rev. AB */
+ case CPU_ALCHEMY:
au1x00_fixup_config_od();
break;
+
+ case PRID_IMP_PR4450:
+ nxp_pr4450_fixup_config();
+ break;
}
}
-void __init r4k_cache_init(void)
+static void r4k_cache_error_setup(void)
{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
- struct cpuinfo_mips *c = &current_cpu_data;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_SB1:
case CPU_SB1A:
set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
@@ -1236,6 +1604,13 @@ void __init r4k_cache_init(void)
set_uncached_handler(0x100, &except_vec2_generic, 0x80);
break;
}
+}
+
+void r4k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ struct cpuinfo_mips *c = &current_cpu_data;
probe_pcache();
setup_scache();
@@ -1249,6 +1624,10 @@ void __init r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+#ifdef CONFIG_EVA
+ r4k_blast_dcache_user_page_setup();
+ r4k_blast_icache_user_page_setup();
+#endif
/*
* Some MIPS32 and MIPS64 processors have physically indexed caches.
@@ -1261,26 +1640,70 @@ void __init r4k_cache_init(void)
PAGE_SIZE - 1);
else
shm_align_mask = PAGE_SIZE-1;
+
+ __flush_cache_vmap = r4k__flush_cache_vmap;
+ __flush_cache_vunmap = r4k__flush_cache_vunmap;
+
flush_cache_all = cache_noop;
__flush_cache_all = r4k___flush_cache_all;
flush_cache_mm = r4k_flush_cache_mm;
flush_cache_page = r4k_flush_cache_page;
flush_cache_range = r4k_flush_cache_range;
+ __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
+
flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
+ local_flush_icache_range = local_r4k_flush_icache_range;
-#ifdef CONFIG_DMA_NONCOHERENT
- _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
- _dma_cache_wback = r4k_dma_cache_wback_inv;
- _dma_cache_inv = r4k_dma_cache_inv;
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+ if (coherentio) {
+ _dma_cache_wback_inv = (void *)cache_noop;
+ _dma_cache_wback = (void *)cache_noop;
+ _dma_cache_inv = (void *)cache_noop;
+ } else {
+ _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
+ _dma_cache_wback = r4k_dma_cache_wback_inv;
+ _dma_cache_inv = r4k_dma_cache_inv;
+ }
#endif
build_clear_page();
build_copy_page();
+
+ /*
+ * We want to run CMP kernels on core with and without coherent
+ * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+ * or not to flush caches.
+ */
local_r4k___flush_cache_all(NULL);
+
coherency_setup();
+ board_cache_error_setup = r4k_cache_error_setup;
+}
+
+static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ coherency_setup();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block r4k_cache_pm_notifier_block = {
+ .notifier_call = r4k_cache_pm_notifier,
+};
+
+int __init r4k_cache_init_pm(void)
+{
+ return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}
+arch_initcall(r4k_cache_init_pm);