aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/netlogic/common/reset.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/common/reset.S')
-rw-r--r--arch/mips/netlogic/common/reset.S98
1 files changed, 74 insertions, 24 deletions
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index adb18288a6c..701c4bcb9e4 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -32,10 +32,11 @@
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
+#include <asm/cpu.h>
+#include <asm/cacheops.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -50,8 +51,8 @@
#include <asm/netlogic/xlp-hal/cpucontrol.h>
#define CP0_EBASE $15
-#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
- XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
+#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+ XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
/* Enable XLP features and workarounds in the LSU */
@@ -74,35 +75,67 @@
.endm
/*
- * Low level flush for L1D cache on XLP, the normal cache ops does
- * not do the complete and correct cache flush.
+ * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
+ * register. This is needed before going to C code since the SP can
+ * in this region. Called from all HW threads.
+ */
+.macro xlp_early_mmu_init
+ mfc0 t0, CP0_PAGEMASK, 1
+ li t1, (1 << 29) /* ELPA bit */
+ or t0, t1
+ mtc0 t0, CP0_PAGEMASK, 1
+.endm
+
+/*
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
*/
.macro xlp_flush_l1_dcache
+ mfc0 t0, CP0_EBASE, 0
+ andi t0, t0, PRID_IMP_MASK
+ slt t1, t0, 0x1200
+ beqz t1, 15f
+ nop
+
+ /* XLP8xx low level cache flush */
li t0, LSU_DEBUG_DATA0
li t1, LSU_DEBUG_ADDR
li t2, 0 /* index */
li t3, 0x1000 /* loop count */
-1:
+11:
sll v0, t2, 5
mtcr zero, t0
ori v1, v0, 0x3 /* way0 | write_enable | write_active */
mtcr v1, t1
-2:
+12:
mfcr v1, t1
andi v1, 0x1 /* wait for write_active == 0 */
- bnez v1, 2b
+ bnez v1, 12b
nop
mtcr zero, t0
ori v1, v0, 0x7 /* way1 | write_enable | write_active */
mtcr v1, t1
-3:
+13:
mfcr v1, t1
andi v1, 0x1 /* wait for write_active == 0 */
- bnez v1, 3b
+ bnez v1, 13b
nop
addi t2, 1
- bne t3, t2, 1b
+ bne t3, t2, 11b
+ nop
+ b 17f
+ nop
+
+ /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+ li t0, 0x80000000
+ li t1, 0x80010000
+16: cache Index_Writeback_Inv_D, 0(t0)
+ addiu t0, t0, 32
+ bne t0, t1, 16b
nop
+17:
.endm
/*
@@ -138,6 +171,17 @@ FEXPORT(nlm_reset_entry)
nop
1: /* Entry point on core wakeup */
+ mfc0 t0, CP0_EBASE, 0 /* processor ID */
+ andi t0, PRID_IMP_MASK
+ li t1, 0x1500 /* XLP 9xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ li t1, 0x1300 /* XLP 5xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ /* set bit in SYS coherent register for the core */
mfc0 t0, CP0_EBASE, 1
mfc0 t1, CP0_EBASE, 1
srl t1, 5
@@ -149,7 +193,7 @@ FEXPORT(nlm_reset_entry)
li t1, 0x1
sll t0, t1, t0
nor t0, t0, zero /* t0 <- ~(1 << core) */
- li t2, SYS_CPU_COHERENT_BASE(0)
+ li t2, SYS_CPU_COHERENT_BASE
add t2, t2, t3 /* t2 <- SYS offset for node */
lw t1, 0(t2)
and t1, t1, t0
@@ -159,17 +203,20 @@ FEXPORT(nlm_reset_entry)
lw t1, 0(t2)
sync
+2:
/* Configure LSU on Non-0 Cores. */
xlp_config_lsu
/* FALL THROUGH */
/*
- * Wake up sibling threads from the initial thread in
- * a core.
+ * Wake up sibling threads from the initial thread in a core.
*/
EXPORT(nlm_boot_siblings)
/* core L1D flush before enable threads */
xlp_flush_l1_dcache
+ /* save ra and sp, will be used later (only for boot cpu) */
+ dmtc0 ra, $22, 6
+ dmtc0 sp, $22, 7
/* Enable hw threads by writing to MAP_THREADMODE of the core */
li t0, CKSEG1ADDR(RESET_DATA_PHYS)
lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
@@ -181,8 +228,10 @@ EXPORT(nlm_boot_siblings)
/*
* The new hardware thread starts at the next instruction
* For all the cases other than core 0 thread 0, we will
- * jump to the secondary wait function.
- */
+ * jump to the secondary wait function.
+
+ * NOTE: All GPR contents are lost after the mtcr above!
+ */
mfc0 v0, CP0_EBASE, 1
andi v0, 0x3ff /* v0 <- node/core */
@@ -196,7 +245,9 @@ EXPORT(nlm_boot_siblings)
#endif
mtc0 t1, CP0_STATUS
- /* mark CPU ready, careful here, previous mtcr trashed registers */
+ xlp_early_mmu_init
+
+ /* mark CPU ready */
li t3, CKSEG1ADDR(RESET_DATA_PHYS)
ADDIU t1, t3, BOOT_CPU_READY
sll v1, v0, 2
@@ -209,14 +260,12 @@ EXPORT(nlm_boot_siblings)
nop
/*
- * For the boot CPU, we have to restore registers and
- * return
+ * For the boot CPU, we have to restore ra and sp and return, rest
+ * of the registers will be restored by the caller
*/
-4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
- li t1, 0xfadebeef
- dmtc0 t1, $4, 2 /* restore SP from UserLocal */
- PTR_SUBU sp, t0, PT_SIZE
- RESTORE_ALL
+4:
+ dmfc0 ra, $22, 6
+ dmfc0 sp, $22, 7
jr ra
nop
EXPORT(nlm_reset_entry_end)
@@ -224,6 +273,7 @@ EXPORT(nlm_reset_entry_end)
LEAF(nlm_init_boot_cpu)
#ifdef CONFIG_CPU_XLP
xlp_config_lsu
+ xlp_early_mmu_init
#endif
jr ra
nop