aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-20 10:41:44 -0700
committerTony Luck <tony.luck@intel.com>2005-10-20 10:41:44 -0700
commit9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch)
tree4fe1cca94fdba8b705c87615bee06d3346f687ce /arch/sparc64/kernel
parent17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff)
parentac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff)
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341 since this file has been moved from arch/ia64/lib/swiotlb.c to lib/swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/cpu.c4
-rw-r--r--arch/sparc64/kernel/devices.c22
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S13
-rw-r--r--arch/sparc64/kernel/dtlb_base.S18
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S12
-rw-r--r--arch/sparc64/kernel/entry.S263
-rw-r--r--arch/sparc64/kernel/etrap.S51
-rw-r--r--arch/sparc64/kernel/head.S748
-rw-r--r--arch/sparc64/kernel/irq.c1
-rw-r--r--arch/sparc64/kernel/itlb_base.S26
-rw-r--r--arch/sparc64/kernel/ktlb.S194
-rw-r--r--arch/sparc64/kernel/pci_iommu.c363
-rw-r--r--arch/sparc64/kernel/pci_psycho.c44
-rw-r--r--arch/sparc64/kernel/pci_sabre.c39
-rw-r--r--arch/sparc64/kernel/pci_schizo.c59
-rw-r--r--arch/sparc64/kernel/power.c64
-rw-r--r--arch/sparc64/kernel/ptrace.c21
-rw-r--r--arch/sparc64/kernel/rtrap.S30
-rw-r--r--arch/sparc64/kernel/setup.c58
-rw-r--r--arch/sparc64/kernel/smp.c28
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/kernel/sys32.S170
-rw-r--r--arch/sparc64/kernel/trampoline.S31
-rw-r--r--arch/sparc64/kernel/traps.c100
-rw-r--r--arch/sparc64/kernel/una_asm.S67
-rw-r--r--arch/sparc64/kernel/unaligned.c101
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c5
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/kernel/winfixup.S33
29 files changed, 1114 insertions, 1457 deletions
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 48756958116..77ef5df4e5a 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
{ 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
{ 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
+ { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"},
+ { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"},
};
#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
{ 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
{ 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
{ 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
+ { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"},
+ { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"},
};
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index d710274e516..df9a1ca8fd7 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -135,6 +135,28 @@ void __init device_scan(void)
cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency",
0);
+ cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
+ "dcache-size",
+ 16 * 1024);
+ cpu_data(0).dcache_line_size =
+ prom_getintdefault(cpu_node, "dcache-line-size", 32);
+ cpu_data(0).icache_size = prom_getintdefault(cpu_node,
+ "icache-size",
+ 16 * 1024);
+ cpu_data(0).icache_line_size =
+ prom_getintdefault(cpu_node, "icache-line-size", 32);
+ cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
+ "ecache-size",
+ 4 * 1024 * 1024);
+ cpu_data(0).ecache_line_size =
+ prom_getintdefault(cpu_node, "ecache-line-size", 64);
+ printk("CPU[0]: Caches "
+ "D[sz(%d):line_sz(%d)] "
+ "I[sz(%d):line_sz(%d)] "
+ "E[sz(%d):line_sz(%d)]\n",
+ cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
+ cpu_data(0).icache_size, cpu_data(0).icache_line_size,
+ cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
}
#endif
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
index 538522848ad..acc889a7f9c 100644
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -9,17 +9,7 @@
#include <asm/pgtable.h>
#include <asm/mmu.h>
-#if PAGE_SHIFT == 13
-#define SZ_BITS _PAGE_SZ8K
-#elif PAGE_SHIFT == 16
-#define SZ_BITS _PAGE_SZ64K
-#elif PAGE_SHIFT == 19
-#define SZ_BITS _PAGE_SZ512K
-#elif PAGE_SHIFT == 22
-#define SZ_BITS _PAGE_SZ4MB
-#endif
-
-#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
+#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS)
#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_SHIFT (PAGE_SHIFT - 3)
@@ -163,7 +153,6 @@ sparc64_vpte_continue:
stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
retry ! Load PTE once again
-#undef SZ_BITS
#undef VALID_SZ_BITS
#undef VPTE_SHIFT
#undef VPTE_BITS
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index ded2fed23fc..6528786840c 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -53,39 +53,36 @@
* be guaranteed to be 0 ... mmu_context.h does guarantee this
* by only using 10 bits in the hwcontext value.
*/
-#define CREATE_VPTE_OFFSET1(r1, r2)
+#define CREATE_VPTE_OFFSET1(r1, r2) nop
#define CREATE_VPTE_OFFSET2(r1, r2) \
srax r1, 10, r2
-#define CREATE_VPTE_NOP nop
#else
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx r2, 3, r2
-#define CREATE_VPTE_NOP
#endif
/* DTLB ** ICACHE line 1: Quick user TLB misses */
+ mov TLB_SFSR, %g1
ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
from_tl1_trap:
rdpr %tl, %g5 ! For TL==3 test
CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
- be,pn %xcc, 3f ! Yep, special processing
+ be,pn %xcc, kvmap ! Yep, special processing
CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
cmp %g5, 4 ! Last trap level?
- be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
- nop ! delay slot
/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
+ be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
+ nop ! delay slot
ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
1: brgez,pn %g5, longpath ! Invalid, branch out
nop ! Delay-slot
9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
retry ! Trap return
-3: brlz,pt %g4, 9b ! Kernel virtual map?
- xor %g2, %g4, %g5 ! Finish bit twiddles
- ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
+ nop
/* DTLB ** ICACHE line 3: winfixups+real_faults */
longpath:
@@ -106,8 +103,7 @@ longpath:
nop
nop
nop
- CREATE_VPTE_NOP
+ nop
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
-#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
index d848bb7374b..e0a92016260 100644
--- a/arch/sparc64/kernel/dtlb_prot.S
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -14,14 +14,14 @@
*/
/* PROT ** ICACHE line 1: User DTLB protection trap */
- stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit
- membar #Sync ! Synchronize ASI stores
- rdpr %pstate, %g5 ! Move into alternate globals
+ mov TLB_SFSR, %g1
+ stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync ! Synchronize stores
+ rdpr %pstate, %g5 ! Move into alt-globals
wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
- rdpr %tl, %g1 ! Need to do a winfixup?
+ rdpr %tl, %g1 ! Need a winfixup?
cmp %g1, 1 ! Trap level >1?
- mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr
- nop
+ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
/* PROT ** ICACHE line 2: More real fault processing */
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c..11a848402fb 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,164 +30,10 @@
.text
.align 32
- .globl sparc64_vpte_patchme1
- .globl sparc64_vpte_patchme2
-/*
- * On a second level vpte miss, check whether the original fault is to the OBP
- * range (note that this is only possible for instruction miss, data misses to
- * obp range do not use vpte). If so, go back directly to the faulting address.
- * This is because we want to read the tpc, otherwise we have no way of knowing
- * the 8k aligned faulting address if we are using >8k kernel pagesize. This
- * also ensures no vpte range addresses are dropped into tlb while obp is
- * executing (see inherit_locked_prom_mappings() rant).
- */
-sparc64_vpte_nucleus:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, sparc64_vpte_patchme1
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_iaddr_patch
- nop
-
- /* These two instructions are patched by paginig_init(). */
-sparc64_vpte_patchme1:
- sethi %hi(0), %g5
-sparc64_vpte_patchme2:
- or %g5, %lo(0), %g5
-
- /* With kernel PGD in %g5, branch back into dtlb_backend. */
- ba,pt %xcc, sparc64_kpte_continue
- andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
-
-vpte_noent:
- /* Restore previous TAG_ACCESS, %g5 is zero, and we will
- * skip over the trap instruction so that the top level
- * TLB miss handler will thing this %g5 value is just an
- * invalid PTE, thus branching to full fault processing.
- */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_DMMU
- done
-
- .globl obp_iaddr_patch
-obp_iaddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Behave as if we are at TL0. */
- wrpr %g0, 1, %tl
- rdpr %tpc, %g4 /* Find original faulting iaddr */
- srlx %g4, 13, %g4 /* Throw out context bits */
- sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
-
- /* Restore previous TAG_ACCESS. */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_IMMU
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_ITLB_DATA_IN
- retry
-
- .globl obp_daddr_patch
-obp_daddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN
- retry
-
-/*
- * On a first level data miss, check whether this is to the OBP range (note
- * that such accesses can be made by prom, as well as by kernel using
- * prom_getproperty on "address"), and if so, do not use vpte access ...
- * rather, use information saved during inherit_prom_mappings() using 8k
- * pagesize.
- */
-kvmap:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, vmalloc_addr
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_daddr_patch
- nop
-
-vmalloc_addr:
- /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
- ldxa [%g3 + %g6] ASI_N, %g5
- brgez,pn %g5, longpath
- nop
-
- /* PTE is valid, load into TLB and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
- retry
-
/* This is trivial with the new code... */
.globl do_fpdis
do_fpdis:
- sethi %hi(TSTATE_PEF), %g4 ! IEU0
+ sethi %hi(TSTATE_PEF), %g4
rdpr %tstate, %g5
andcc %g5, %g4, %g0
be,pt %xcc, 1f
@@ -204,18 +50,18 @@ do_fpdis:
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap_clr_l6
-1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
- wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
- andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
- be,a,pt %icc, 1f ! CTI
- clr %g7 ! IEU0
- ldx [%g6 + TI_GSR], %g7 ! Load Group
-1: andcc %g5, FPRS_DL, %g0 ! IEU1
- bne,pn %icc, 2f ! CTI
- fzero %f0 ! FPA
- andcc %g5, FPRS_DU, %g0 ! IEU1 Group
- bne,pn %icc, 1f ! CTI
- fzero %f2 ! FPA
+1: ldub [%g6 + TI_FPSAVED], %g5
+ wr %g0, FPRS_FEF, %fprs
+ andcc %g5, FPRS_FEF, %g0
+ be,a,pt %icc, 1f
+ clr %g7
+ ldx [%g6 + TI_GSR], %g7
+1: andcc %g5, FPRS_DL, %g0
+ bne,pn %icc, 2f
+ fzero %f0
+ andcc %g5, FPRS_DU, %g0
+ bne,pn %icc, 1f
+ fzero %f2
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
@@ -251,15 +97,17 @@ do_fpdis:
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_1:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
- ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f32
ldda [%g2] ASI_BLK_S, %f48
+ membar #Sync
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
@@ -270,7 +118,6 @@ cplus_fptrap_insn_1:
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- membar #Sync
b,pt %xcc, fpdis_exit
nop
2: andcc %g5, FPRS_DU, %g0
@@ -280,15 +127,17 @@ cplus_fptrap_insn_1:
fzero %f34
ldxa [%g3] ASI_DMMU, %g5
add %g6, TI_FPREGS, %g1
-cplus_fptrap_insn_2:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g2] ASI_BLK_S, %f16
+ membar #Sync
faddd %f32, %f34, %f40
fmuld %f32, %f34, %f42
faddd %f32, %f34, %f44
@@ -301,18 +150,18 @@ cplus_fptrap_insn_2:
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- membar #Sync
ba,pt %xcc, fpdis_exit
nop
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_3:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
mov 0x40, %g2
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g1 + %g2] ASI_BLK_S, %f16
add %g1, 0x80, %g1
ldda [%g1] ASI_BLK_S, %f32
@@ -473,8 +322,8 @@ do_fptrap_after_fsr:
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_4:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS, %g2
@@ -495,45 +344,17 @@ cplus_fptrap_insn_4:
ba,pt %xcc, etrap
wr %g0, 0, %fprs
-cplus_fptrap_1:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_fpdis
-cheetah_plus_patch_fpdis:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_fptrap_1), %o0
- lduw [%o0 + %lo(cplus_fptrap_1)], %o1
-
- set cplus_fptrap_insn_1, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_2, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_3, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_4, %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
-
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
- * DATA 1: Address Argument 1, place in %g6
+ * DATA 1: Address Argument 1, place in %g1
* DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*
- * Current CPU's IRQ worklist table is locked into %g1,
- * don't touch.
+ * Current CPU's IRQ worklist table is locked into %g6, don't touch.
*/
.text
.align 32
@@ -1007,13 +828,14 @@ cheetah_plus_dcpe_trap_vector:
nop
do_cheetah_plus_data_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x0, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_dcpe_trap_vector_tl1:
membar #Sync
@@ -1037,13 +859,14 @@ cheetah_plus_icpe_trap_vector:
nop
do_cheetah_plus_insn_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x1, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_icpe_trap_vector_tl1:
membar #Sync
@@ -1076,6 +899,10 @@ do_dcpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(dcache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
/* Reset D-cache parity */
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
@@ -1122,6 +949,10 @@ do_icpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(icache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
/* Flush I-cache */
sethi %hi(1 << 15), %g1 ! I-cache size
mov (1 << 5), %g2 ! I-cache line size
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 50d2af1d98a..0d8eba21111 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -68,12 +68,8 @@ etrap_irq:
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_1:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_2:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
wr %g0, ASI_AIUS, %asi
@@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2
mov PRIMARY_CONTEXT, %l4
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_3:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_4:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
@@ -264,38 +256,3 @@ cplus_etrap_insn_4:
#undef TASK_REGOFF
#undef ETRAP_PSTATE1
-
-cplus_einsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
-cplus_einsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_etrap
-cheetah_plus_patch_etrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_einsn_1), %o0
- sethi %hi(cplus_etrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_einsn_1)], %o1
- or %o2, %lo(cplus_etrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_3), %o2
- or %o2, %lo(cplus_etrap_insn_3), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_einsn_2), %o0
- sethi %hi(cplus_etrap_insn_2), %o2
- lduw [%o0 + %lo(cplus_einsn_2)], %o1
- or %o2, %lo(cplus_etrap_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_4), %o2
- or %o2, %lo(cplus_etrap_insn_4), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 1fa06c4e3bd..b49dcd4504b 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -28,19 +28,14 @@
#include <asm/mmu.h>
/* This section from from _start to sparc64_boot_end should fit into
- * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
- * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
- * 0x0000.0000.0040.6000 and empty_bad_page, which is from
- * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
+ * 0x0000000000404000 to 0x0000000000408000.
*/
-
.text
.globl start, _start, stext, _stext
_start:
start:
_stext:
stext:
-bootup_user_stack:
! 0x0000000000404000
b sparc64_boot
flushw /* Flush register file. */
@@ -80,15 +75,169 @@ sparc_ramdisk_image64:
.xword 0
.word _end
- /* We must be careful, 32-bit OpenBOOT will get confused if it
- * tries to save away a register window to a 64-bit kernel
- * stack address. Flush all windows, disable interrupts,
- * remap if necessary, jump onto kernel trap table, then kernel
- * stack, or else we die.
+ /* PROM cif handler code address is in %o4. */
+sparc64_boot:
+1: rd %pc, %g7
+ set 1b, %g1
+ cmp %g1, %g7
+ be,pn %xcc, sparc64_boot_after_remap
+ mov %o4, %l7
+
+ /* We need to remap the kernel. Use position independant
+ * code to remap us to KERNBASE.
*
- * PROM entry point is on %o4
+ * SILO can invoke us with 32-bit address masking enabled,
+ * so make sure that's clear.
*/
-sparc64_boot:
+ rdpr %pstate, %g1
+ andn %g1, PSTATE_AM, %g1
+ wrpr %g1, 0x0, %pstate
+ ba,a,pt %xcc, 1f
+
+ .globl prom_finddev_name, prom_chosen_path
+ .globl prom_getprop_name, prom_mmu_name
+ .globl prom_callmethod_name, prom_translate_name
+ .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
+ .globl prom_boot_mapped_pc, prom_boot_mapping_mode
+ .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
+prom_finddev_name:
+ .asciz "finddevice"
+prom_chosen_path:
+ .asciz "/chosen"
+prom_getprop_name:
+ .asciz "getprop"
+prom_mmu_name:
+ .asciz "mmu"
+prom_callmethod_name:
+ .asciz "call-method"
+prom_translate_name:
+ .asciz "translate"
+prom_map_name:
+ .asciz "map"
+prom_unmap_name:
+ .asciz "unmap"
+ .align 4
+prom_mmu_ihandle_cache:
+ .word 0
+prom_boot_mapped_pc:
+ .word 0
+prom_boot_mapping_mode:
+ .word 0
+ .align 8
+prom_boot_mapping_phys_high:
+ .xword 0
+prom_boot_mapping_phys_low:
+ .xword 0
+1:
+ rd %pc, %l0
+ mov (1b - prom_finddev_name), %l1
+ mov (1b - prom_chosen_path), %l2
+ mov (1b - prom_boot_mapped_pc), %l3
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %l0, %l3, %l3
+ stw %l0, [%l3]
+ sub %sp, (192 + 128), %sp
+
+ /* chosen_node = prom_finddevice("/chosen") */
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
+ stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
+
+ mov (1b - prom_getprop_name), %l1
+ mov (1b - prom_mmu_name), %l2
+ mov (1b - prom_mmu_ihandle_cache), %l5
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %l0, %l5, %l5
+
+ /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
+ mov 4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
+ stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
+ stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
+ mov 4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
+ stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ mov (1b - prom_callmethod_name), %l1
+ mov (1b - prom_translate_name), %l2
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ lduw [%l5], %l5 ! prom_mmu_ihandle_cache
+
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
+ mov 3, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
+ mov 5, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
+ stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
+ stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
+ /* PAGE align */
+ srlx %l0, 13, %l3
+ sllx %l3, 13, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
+ stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
+ stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
+ stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
+ stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
+ stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
+ mov (1b - prom_boot_mapping_mode), %l4
+ sub %l0, %l4, %l4
+ stw %l1, [%l4]
+ mov (1b - prom_boot_mapping_phys_high), %l4
+ sub %l0, %l4, %l4
+ ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
+ stx %l2, [%l4 + 0x0]
+ ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
+ /* 4MB align */
+ srlx %l3, 22, %l3
+ sllx %l3, 22, %l3
+ stx %l3, [%l4 + 0x8]
+
+ /* Leave service as-is, "call-method" */
+ mov 7, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ mov (1b - prom_map_name), %l3
+ sub %l0, %l3, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
+ /* Leave arg2 as-is, prom_mmu_ihandle_cache */
+ mov -1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
+ sethi %hi(8 * 1024 * 1024), %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
+ sethi %hi(KERNBASE), %l3
+ stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
+ stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
+ mov (1b - prom_boot_mapping_phys_low), %l3
+ sub %l0, %l3, %l3
+ ldx [%l3], %l3
+ stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ add %sp, (192 + 128), %sp
+
+sparc64_boot_after_remap:
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
ba,pt %xcc, spitfire_boot
@@ -125,185 +274,7 @@ cheetah_generic_boot:
stxa %g0, [%g3] ASI_IMMU
membar #Sync
- wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
- wr %g0, 0, %fprs
-
- /* Just like for Spitfire, we probe itlb-2 for a mapping which
- * matches our current %pc. We take the physical address in
- * that mapping and use it to make our own.
- */
-
- /* %g5 holds the tlb data */
- sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
- sllx %g5, 32, %g5
- or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
-
- /* Put PADDR tlb data mask into %g3. */
- sethi %uhi(_PAGE_PADDR), %g3
- or %g3, %ulo(_PAGE_PADDR), %g3
- sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR), %g7
- or %g7, %lo(_PAGE_PADDR), %g7
- or %g3, %g7, %g3
-
- set 2 << 16, %l0 /* TLB entry walker. */
- set 0x1fff, %l2 /* Page mask. */
- rd %pc, %l3
- andn %l3, %l2, %g2 /* vaddr comparator */
-
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g2
- be,pn %xcc, cheetah_got_tlbentry
- nop
- and %l0, (127 << 3), %g1
- cmp %g1, (127 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* Search the small TLB. OBP never maps us like that but
- * newer SILO can.
- */
- clr %l0
-
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g2
- be,pn %xcc, cheetah_got_tlbentry
- nop
- cmp %l0, (15 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* BUG() if we get here... */
- ta 0x5
-
-cheetah_got_tlbentry:
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- membar #Sync
- and %g1, %g3, %g1
- set 0x5fff, %l0
- andn %g1, %l0, %g1
- or %g5, %g1, %g5
-
- /* Clear out any KERNBASE area entries. */
- set 2 << 16, %l0
- sethi %hi(KERNBASE), %g3
- sethi %hi(KERNBASE<<1), %g7
- mov TLB_TAG_ACCESS, %l7
-
- /* First, check ITLB */
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_IMMU
- membar #Sync
- stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (127 << 3), %g1
- cmp %g1, (127 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* Next, check DTLB */
- set 2 << 16, %l0
-1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- membar #Sync
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (511 << 3), %g1
- cmp %g1, (511 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* On Cheetah+, have to check second DTLB. */
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
- ba,pt %xcc, 9f
- nop
-
-2: set 3 << 16, %l0
-1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- membar #Sync
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (511 << 3), %g1
- cmp %g1, (511 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
-9:
-
- /* Now lock the TTE we created into ITLB-0 and DTLB-0,
- * entry 15 (and maybe 14 too).
- */
- sethi %hi(KERNBASE), %g3
- set (0 << 16) | (15 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- membar #Sync
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- membar #Sync
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(_end), %g3 /* Check for bigkernel case */
- or %g3, %lo(_end), %g3
- srl %g3, 23, %g3 /* Check if _end > 8M */
- brz,pt %g3, 1f
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- sethi %hi(0x400000), %g3
- or %g3, %lo(0x400000), %g3
- add %g5, %g3, %g5 /* New tte data */
- andn %g5, (_PAGE_G), %g5
- sethi %hi(KERNBASE+0x400000), %g3
- or %g3, %lo(KERNBASE+0x400000), %g3
- set (0 << 16) | (14 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- membar #Sync
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- membar #Sync
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- ba,pt %xcc, 1f
- nop
-
-1: set sun4u_init, %g2
- jmpl %g2 + %g0, %g0
- nop
+ ba,a,pt %xcc, jump_to_sun4u_init
spitfire_boot:
/* Typically PROM has already enabled both MMU's and both on-chip
@@ -313,6 +284,7 @@ spitfire_boot:
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
+jump_to_sun4u_init:
/*
* Make sure we are in privileged mode, have address masking,
* using the ordinary globals and have enabled floating
@@ -324,151 +296,6 @@ spitfire_boot:
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
-spitfire_create_mappings:
- /* %g5 holds the tlb data */
- sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
- sllx %g5, 32, %g5
- or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
-
- /* Base of physical memory cannot reliably be assumed to be
- * at 0x0! Figure out where it happens to be. -DaveM
- */
-
- /* Put PADDR tlb data mask into %g3. */
- sethi %uhi(_PAGE_PADDR_SF), %g3
- or %g3, %ulo(_PAGE_PADDR_SF), %g3
- sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR_SF), %g7
- or %g7, %lo(_PAGE_PADDR_SF), %g7
- or %g3, %g7, %g3
-
- /* Walk through entire ITLB, looking for entry which maps
- * our %pc currently, stick PADDR from there into %g5 tlb data.
- */
- clr %l0 /* TLB entry walker. */
- set 0x1fff, %l2 /* Page mask. */
- rd %pc, %l3
- andn %l3, %l2, %g2 /* vaddr comparator */
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g2
- be,a,pn %xcc, spitfire_got_tlbentry
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* BUG() if we get here... */
- ta 0x5
-
-spitfire_got_tlbentry:
- /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
- nop
- nop
- nop
- and %g1, %g3, %g1 /* Mask to just get paddr bits. */
- set 0x5fff, %l3 /* Mask offset to get phys base. */
- andn %g1, %l3, %g1
-
- /* NOTE: We hold on to %g1 paddr base as we need it below to lock
- * NOTE: the PROM cif code into the TLB.
- */
-
- or %g5, %g1, %g5 /* Or it into TAG being built. */
-
- clr %l0 /* TLB entry walker. */
- sethi %hi(KERNBASE), %g3 /* 4M lower limit */
- sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
- mov TLB_TAG_ACCESS, %l7
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_IMMU
- stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
- membar #Sync
-2:
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop; nop; nop
-
- clr %l0 /* TLB entry walker. */
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-2:
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop; nop; nop
-
-
- /* PROM never puts any TLB entries into the MMU with the lock bit
- * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
- */
-
- sethi %hi(KERNBASE), %g3
- mov (63 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(_end), %g3 /* Check for bigkernel case */
- or %g3, %lo(_end), %g3
- srl %g3, 23, %g3 /* Check if _end > 8M */
- brz,pt %g3, 2f
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- sethi %hi(0x400000), %g3
- or %g3, %lo(0x400000), %g3
- add %g5, %g3, %g5 /* New tte data */
- andn %g5, (_PAGE_G), %g5
- sethi %hi(KERNBASE+0x400000), %g3
- or %g3, %lo(KERNBASE+0x400000), %g3
- mov (62 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
-2: ba,pt %xcc, 1f
- nop
-1:
set sun4u_init, %g2
jmpl %g2 + %g0, %g0
nop
@@ -483,38 +310,12 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- /* We are now safely (we hope) in Nucleus context (0), rewrite
- * the KERNBASE TTE's so they no longer have the global bit set.
- * Don't forget to setup TAG_ACCESS first 8-)
- */
- mov TLB_TAG_ACCESS, %g2
- stxa %g3, [%g2] ASI_IMMU
- stxa %g3, [%g2] ASI_DMMU
- membar #Sync
-
BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
ba,pt %xcc, spitfire_tlb_fixup
nop
cheetah_tlb_fixup:
- set (0 << 16) | (15 << 3), %g7
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g3
- membar #Sync
-
mov 2, %g2 /* Set TLB type to cheetah+. */
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
@@ -523,23 +324,7 @@ cheetah_tlb_fixup:
1: sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Patch context register writes to support nucleus page
- * size correctly.
- */
- call cheetah_plus_patch_etrap
- nop
- call cheetah_plus_patch_rtrap
- nop
- call cheetah_plus_patch_fpdis
- nop
- call cheetah_plus_patch_winfixup
- nop
-
-2: /* Patch copy/page operations to cheetah optimized versions. */
+ /* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
call cheetah_patch_copy_page
@@ -551,21 +336,6 @@ cheetah_tlb_fixup:
nop
spitfire_tlb_fixup:
- mov (63 << 3), %g7
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g3
- membar #Sync
-
/* Set TLB type to spitfire. */
mov 0, %g2
sethi %hi(tlb_type), %g1
@@ -578,24 +348,6 @@ tlb_fixup_done:
mov %sp, %l6
mov %o4, %l7
-#if 0 /* We don't do it like this anymore, but for historical hack value
- * I leave this snippet here to show how crazy we can be sometimes. 8-)
- */
-
- /* Setup "Linux Current Register", thanks Sun 8-) */
- wr %g0, 0x1, %pcr
-
- /* Blackbird errata workaround. See commentary in
- * smp.c:smp_percpu_timer_interrupt() for more
- * information.
- */
- ba,pt %xcc, 99f
- nop
- .align 64
-99: wr %g6, %g0, %pic
- rd %pic, %g0
-#endif
-
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
@@ -629,32 +381,78 @@ tlb_fixup_done:
nop
/* Not reached... */
-/* IMPORTANT NOTE: Whenever making changes here, check
- * trampoline.S as well. -jj */
- .globl setup_tba
-setup_tba: /* i0 = is_starfire */
- save %sp, -160, %sp
+ /* This is meant to allow the sharing of this code between
+ * boot processor invocation (via setup_tba() below) and
+ * secondary processor startup (via trampoline.S). The
+ * former does use this code, the latter does not yet due
+ * to some complexities. That should be fixed up at some
+ * point.
+ *
+ * There used to be enormous complexity wrt. transferring
+ * over from the firwmare's trap table to the Linux kernel's.
+ * For example, there was a chicken & egg problem wrt. building
+ * the OBP page tables, yet needing to be on the Linux kernel
+ * trap table (to translate PAGE_OFFSET addresses) in order to
+ * do that.
+ *
+ * We now handle OBP tlb misses differently, via linear lookups
+ * into the prom_trans[] array. So that specific problem no
+ * longer exists. Yet, unfortunately there are still some issues
+ * preventing trampoline.S from using this code... ho hum.
+ */
+ .globl setup_trap_table
+setup_trap_table:
+ save %sp, -192, %sp
- rdpr %tba, %g7
- sethi %hi(prom_tba), %o1
- or %o1, %lo(prom_tba), %o1
- stx %g7, [%o1]
+ /* Force interrupts to be disabled. */
+ rdpr %pstate, %o1
+ andn %o1, PSTATE_IE, %o1
+ wrpr %o1, 0x0, %pstate
+ wrpr %g0, 15, %pil
+
+ /* Make the firmware call to jump over to the Linux trap table. */
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
+
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
+ mov PRIMARY_CONTEXT, %g1
+ stxa %g2, [%g1] ASI_DMMU
+ membar #Sync
+
+ /* The Linux trap handlers expect various trap global registers
+ * to be setup with some fixed values. So here we set these
+ * up very carefully. These globals are:
+ *
+ * Alternate Globals (PSTATE_AG):
+ *
+ * %g6 --> current_thread_info()
+ *
+ * MMU Globals (PSTATE_MG):
+ *
+ * %g1 --> TLB_SFSR
+ * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB |
+ * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+ * ^ 0xfffff80000000000)
+ * (this %g2 value is used for computing the PAGE_OFFSET kernel
+ * TLB entries quickly, the virtual address of the fault XOR'd
+ * with this %g2 value is the PTE to load into the TLB)
+ * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE
+ *
+ * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()):
+ *
+ * %g6 --> __irq_work[smp_processor_id()]
+ */
- /* Setup "Linux" globals 8-) */
rdpr %pstate, %o1
mov %g6, %o2
- wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
- sethi %hi(sparc64_ttable_tl0), %g1
- wrpr %g1, %tba
+ wrpr %o1, PSTATE_AG, %pstate
mov %o2, %g6
- /* Set up MMU globals */
- wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
-
- /* Set fixed globals used by dTLB miss handler. */
#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-
+ wrpr %o1, PSTATE_MG, %pstate
mov TSB_REG, %g1
stxa %g0, [%g1] ASI_DMMU
membar #Sync
@@ -666,17 +464,17 @@ setup_tba: /* i0 = is_starfire */
sllx %g2, 32, %g2
or %g2, KERN_LOWBITS, %g2
- BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
- ba,pt %xcc, spitfire_vpte_base
+ BRANCH_IF_ANY_CHEETAH(g3,g7,8f)
+ ba,pt %xcc, 9f
nop
-cheetah_vpte_base:
+8:
sethi %uhi(VPTE_BASE_CHEETAH), %g3
or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
ba,pt %xcc, 2f
sllx %g3, 32, %g3
-spitfire_vpte_base:
+9:
sethi %uhi(VPTE_BASE_SPITFIRE), %g3
or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
sllx %g3, 32, %g3
@@ -702,48 +500,55 @@ spitfire_vpte_base:
sllx %o2, 32, %o2
wr %o2, %asr25
- /* Ok, we're done setting up all the state our trap mechanims needs,
- * now get back into normal globals and let the PROM know what is up.
- */
2:
wrpr %g0, %g0, %wstate
- wrpr %o1, PSTATE_IE, %pstate
+ wrpr %o1, 0x0, %pstate
call init_irqwork_curcpu
nop
- call prom_set_trap_table
- sethi %hi(sparc64_ttable_tl0), %o0
-
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
- mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
- membar #Sync
-
-2:
+ /* Now we can turn interrupts back on. */
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
+ wrpr %g0, 0x0, %pil
+
+ ret
+ restore
+
+ .globl setup_tba
+setup_tba: /* i0 = is_starfire */
+ save %sp, -192, %sp
+
+ /* The boot processor is the only cpu which invokes this
+ * routine, the other cpus set things up via trampoline.S.
+ * So save the OBP trap table address here.
+ */
+ rdpr %tba, %g7
+ sethi %hi(prom_tba), %o1
+ or %o1, %lo(prom_tba), %o1
+ stx %g7, [%o1]
+
+ call setup_trap_table
+ nop
ret
restore
+sparc64_boot_end:
+
+#include "systbls.S"
+#include "ktlb.S"
+#include "etrap.S"
+#include "rtrap.S"
+#include "winfixup.S"
+#include "entry.S"
/*
- * The following skips make sure the trap table in ttable.S is aligned
+ * The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register.
*/
-sparc64_boot_end:
- .skip 0x2000 + _start - sparc64_boot_end
-bootup_user_stack_end:
- .skip 0x2000
+1:
+ .skip 0x4000 + _start - 1b
#ifdef CONFIG_SBUS
/* This is just a hack to fool make depend config.h discovering
@@ -755,20 +560,6 @@ bootup_user_stack_end:
! 0x0000000000408000
#include "ttable.S"
-#include "systbls.S"
-
- .align 1024
- .globl swapper_pg_dir
-swapper_pg_dir:
- .word 0
-
-#include "etrap.S"
-#include "rtrap.S"
-#include "winfixup.S"
-#include "entry.S"
-
- /* This is just anal retentiveness on my part... */
- .align 16384
.data
.align 8
@@ -776,8 +567,11 @@ swapper_pg_dir:
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault
+
+ .globl __ret_efault, __retl_efault
__ret_efault:
ret
restore %g0, -EFAULT, %o0
-
+__retl_efault:
+ retl
+ mov -EFAULT, %o0
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c9b69167632..233526ba3ab 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -27,6 +27,7 @@
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/io.h>
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
index b5e32dfa4fb..4951ff8f687 100644
--- a/arch/sparc64/kernel/itlb_base.S
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -15,14 +15,12 @@
*/
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, 10, r2
-#define CREATE_VPTE_OFFSET2(r1, r2)
-#define CREATE_VPTE_NOP nop
+#define CREATE_VPTE_OFFSET2(r1, r2) nop
#else /* PAGE_SHIFT */
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx r2, 3, r2
-#define CREATE_VPTE_NOP
#endif /* PAGE_SHIFT */
@@ -36,6 +34,7 @@
*/
/* ITLB ** ICACHE line 1: Quick user TLB misses */
+ mov TLB_SFSR, %g1
ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
@@ -43,41 +42,38 @@
1: brgez,pn %g5, 3f ! Not valid, branch out
sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
andcc %g5, %g4, %g0 ! Executable?
+
+/* ITLB ** ICACHE line 2: Real faults */
be,pn %xcc, 3f ! Nope, branch.
nop ! Delay-slot
2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
retry ! Trap return
-3: rdpr %pstate, %g4 ! Move into alternate globals
-
-/* ITLB ** ICACHE line 2: Real faults */
+3: rdpr %pstate, %g4 ! Move into alt-globals
wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tpc, %g5 ! And load faulting VA
mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
-sparc64_realfault_common: ! Called by TL0 dtlb_miss too
+
+/* ITLB ** ICACHE line 3: Finish faults */
+sparc64_realfault_common: ! Called by dtlb_miss
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
ba,pt %xcc, etrap ! Save state
1: rd %pc, %g7 ! ...
- nop
-
-/* ITLB ** ICACHE line 3: Finish faults + window fixups */
call do_sparc64_fault ! Call fault handler
add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
nop
+
+/* ITLB ** ICACHE line 4: Window fixups */
winfix_trampoline:
rdpr %tpc, %g3 ! Prepare winfixup TNPC
- or %g3, 0x7c, %g3 ! Compute offset to branch
+ or %g3, 0x7c, %g3 ! Compute branch offset
wrpr %g3, %tnpc ! Write it into TNPC
done ! Do it to it
-
-/* ITLB ** ICACHE line 4: Unused... */
nop
nop
nop
nop
- CREATE_VPTE_NOP
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
-#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
new file mode 100644
index 00000000000..d9244d3c9f7
--- /dev/null
+++ b/arch/sparc64/kernel/ktlb.S
@@ -0,0 +1,194 @@
+/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
+ *
+ * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+*/
+
+#include <linux/config.h>
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+ .text
+ .align 32
+
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+ /* Note that kvmap below has verified that the address is
+ * in the range MODULES_VADDR --> VMALLOC_END already. So
+ * here we need only check if it is an OBP address or not.
+ */
+ sethi %hi(LOW_OBP_ADDRESS), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kern_vpte
+ mov 0x1, %g5
+ sllx %g5, 32, %g5
+ cmp %g4, %g5
+ blu,pn %xcc, vpte_insn_obp
+ nop
+
+ /* These two instructions are patched by paginig_init(). */
+kern_vpte:
+ sethi %hi(swapper_pgd_zero), %g5
+ lduw [%g5 + %lo(swapper_pgd_zero)], %g5
+
+ /* With kernel PGD in %g5, branch back into dtlb_backend. */
+ ba,pt %xcc, sparc64_kpte_continue
+ andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
+
+vpte_noent:
+ /* Restore previous TAG_ACCESS, %g5 is zero, and we will
+ * skip over the trap instruction so that the top level
+ * TLB miss handler will thing this %g5 value is just an
+ * invalid PTE, thus branching to full fault processing.
+ */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_DMMU
+ done
+
+vpte_insn_obp:
+ /* Behave as if we are at TL0. */
+ wrpr %g0, 1, %tl
+ rdpr %tpc, %g4 /* Find original faulting iaddr */
+ srlx %g4, 13, %g4 /* Throw out context bits */
+ sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
+
+ /* Restore previous TAG_ACCESS. */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_IMMU
+
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_ITLB_DATA_IN
+ retry
+
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+
+kvmap_do_obp:
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+ srlx %g4, 13, %g4
+ sllx %g4, 13, %g4
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN
+ retry
+
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
+ .align 32
+kvmap:
+ brgez,pn %g4, kvmap_nonlinear
+ nop
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ .globl kvmap_linear_patch
+kvmap_linear_patch:
+#endif
+ ba,pt %xcc, kvmap_load
+ xor %g2, %g4, %g5
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ sethi %hi(swapper_pg_dir), %g5
+ or %g5, %lo(swapper_pg_dir), %g5
+ sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ andn %g6, 0x3, %g6
+ lduw [%g5 + %g6], %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x3, %g6
+ lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - PMD_SHIFT, %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x7, %g6
+ ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ nop
+ ba,a,pt %xcc, kvmap_load
+#endif
+
+kvmap_nonlinear:
+ sethi %hi(MODULES_VADDR), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, longpath
+ mov (VMALLOC_END >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4, %g5
+ bgeu,pn %xcc, longpath
+ nop
+
+kvmap_check_obp:
+ sethi %hi(LOW_OBP_ADDRESS), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_vmalloc_addr
+ mov 0x1, %g5
+ sllx %g5, 32, %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_do_obp
+ nop
+
+kvmap_vmalloc_addr:
+ /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
+ ldxa [%g3 + %g6] ASI_N, %g5
+ brgez,pn %g5, longpath
+ nop
+
+kvmap_load:
+ /* PTE is valid, load into TLB and return from trap. */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
+ retry
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 425c60cfea1..a11910be101 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
/* Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
-
- /* Now update everyone's flush point. */
- for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
- iommu->alloc_info[entry].flush =
- iommu->alloc_info[entry].next;
- }
}
#define IOPTE_CONSISTENT(CTX) \
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
+/* Based largely upon the ppc64 iommu allocator. */
+static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
{
- int i;
-
- tsbsize /= sizeof(iopte_t);
-
- for (i = 0; i < tsbsize; i++)
- iopte_make_dummy(iommu, &iommu->page_table[i]);
-}
-
-static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
-{
- iopte_t *iopte, *limit, *first;
- unsigned long cnum, ent, flush_point;
-
- cnum = 0;
- while ((1UL << cnum) < npages)
- cnum++;
- iopte = (iommu->page_table +
- (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
-
- if (cnum == 0)
- limit = (iommu->page_table +
- iommu->lowest_consistent_map);
- else
- limit = (iopte +
- (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
-
- iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
- flush_point = iommu->alloc_info[cnum].flush;
-
- first = iopte;
- for (;;) {
- if (IOPTE_IS_DUMMY(iommu, iopte)) {
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
- break;
+ struct pci_iommu_arena *arena = &iommu->arena;
+ unsigned long n, i, start, end, limit;
+ int pass;
+
+ limit = arena->limit;
+ start = arena->hint;
+ pass = 0;
+
+again:
+ n = find_next_zero_bit(arena->map, limit, start);
+ end = n + npages;
+ if (unlikely(end >= limit)) {
+ if (likely(pass < 1)) {
+ limit = start;
+ start = 0;
+ __iommu_flushall(iommu);
+ pass++;
+ goto again;
+ } else {
+ /* Scanned the whole thing, give up. */
+ return -1;
}
- iopte += (1 << cnum);
- ent++;
- if (iopte >= limit) {
- iopte = (iommu->page_table +
- (cnum <<
- (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
- ent = 0;
+ }
+
+ for (i = n; i < end; i++) {
+ if (test_bit(i, arena->map)) {
+ start = i + 1;
+ goto again;
}
- if (ent == flush_point)
- __iommu_flushall(iommu);
- if (iopte == first)
- goto bad;
}
- /* I've got your streaming cluster right here buddy boy... */
- return iopte;
+ for (i = n; i < end; i++)
+ __set_bit(i, arena->map);
-bad:
- printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
- npages);
- return NULL;
+ arena->hint = end;
+
+ return n;
}
-static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
- unsigned long npages, unsigned long ctx)
+static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
{
- unsigned long cnum, ent;
+ unsigned long i;
- cnum = 0;
- while ((1UL << cnum) < npages)
- cnum++;
+ for (i = base; i < (base + npages); i++)
+ __clear_bit(i, arena->map);
+}
- ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
- >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
+void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
+{
+ unsigned long i, tsbbase, order, sz, num_tsb_entries;
+
+ num_tsb_entries = tsbsize / sizeof(iopte_t);
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->ctx_lowest_free = 1;
+ iommu->page_table_map_base = dma_offset;
+ iommu->dma_addr_mask = dma_addr_mask;
+
+ /* Allocate and initialize the free area map. */
+ sz = num_tsb_entries / 8;
+ sz = (sz + 7UL) & ~7UL;
+ iommu->arena.map = kmalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
+ prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
+ prom_halt();
+ }
+ memset(iommu->arena.map, 0, sz);
+ iommu->arena.limit = num_tsb_entries;
- /* If the global flush might not have caught this entry,
- * adjust the flush point such that we will flush before
- * ever trying to reuse it.
+ /* Allocate and initialize the dummy page which we
+ * set inactive IO PTEs to point to.
*/
-#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
- if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
- iommu->alloc_info[cnum].flush = ent;
-#undef between
+ iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ if (!iommu->dummy_page) {
+ prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
+ prom_halt();
+ }
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+ iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+ /* Now allocate and setup the IOMMU page table itself. */
+ order = get_order(tsbsize);
+ tsbbase = __get_free_pages(GFP_KERNEL, order);
+ if (!tsbbase) {
+ prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tsbbase;
+
+ for (i = 0; i < num_tsb_entries; i++)
+ iopte_make_dummy(iommu, &iommu->page_table[i]);
}
-/* We allocate consistent mappings from the end of cluster zero. */
-static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
{
- iopte_t *iopte;
+ long entry;
- iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
- while (iopte > iommu->page_table) {
- iopte--;
- if (IOPTE_IS_DUMMY(iommu, iopte)) {
- unsigned long tmp = npages;
+ entry = pci_arena_alloc(iommu, npages);
+ if (unlikely(entry < 0))
+ return NULL;
- while (--tmp) {
- iopte--;
- if (!IOPTE_IS_DUMMY(iommu, iopte))
- break;
- }
- if (tmp == 0) {
- u32 entry = (iopte - iommu->page_table);
+ return iommu->page_table + entry;
+}
- if (entry < iommu->lowest_consistent_map)
- iommu->lowest_consistent_map = entry;
- return iopte;
- }
- }
- }
- return NULL;
+static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
+{
+ pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
static int iommu_alloc_ctx(struct pci_iommu *iommu)
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
struct pcidev_cookie *pcp;
struct pci_iommu *iommu;
iopte_t *iopte;
- unsigned long flags, order, first_page, ctx;
+ unsigned long flags, order, first_page;
void *ret;
int npages;
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
iommu = pcp->pbm->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
- if (iopte == NULL) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
first_page = __pa(first_page);
while (npages--) {
- iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
+ iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
- {
- int i;
- u32 daddr = *dma_addrp;
-
- npages = size >> IO_PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- pci_iommu_write(iommu->iommu_flush, daddr);
- daddr += IO_PAGE_SIZE;
- }
- }
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return ret;
}
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
struct pcidev_cookie *pcp;
struct pci_iommu *iommu;
iopte_t *iopte;
- unsigned long flags, order, npages, i, ctx;
+ unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
pcp = pdev->sysdata;
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
spin_lock_irqsave(&iommu->lock, flags);
- if ((iopte - iommu->page_table) ==
- iommu->lowest_consistent_map) {
- iopte_t *walk = iopte + npages;
- iopte_t *limit;
-
- limit = (iommu->page_table +
- (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
- while (walk < limit) {
- if (!IOPTE_IS_DUMMY(iommu, walk))
- break;
- walk++;
- }
- iommu->lowest_consistent_map =
- (walk - iommu->page_table);
- }
-
- /* Data for consistent mappings cannot enter the streaming
- * buffers, so we only need to update the TSB. We flush
- * the IOMMU here as well to prevent conflicts with the
- * streaming mapping deferred tlb flush scheme.
- */
-
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
-
- for (i = 0; i < npages; i++, iopte++)
- iopte_make_dummy(iommu, iopte);
-
- if (iommu->iommu_ctxflush) {
- pci_iommu_write(iommu->iommu_ctxflush, ctx);
- } else {
- for (i = 0; i < npages; i++) {
- u32 daddr = dvma + (i << IO_PAGE_SHIFT);
-
- pci_iommu_write(iommu->iommu_flush, daddr);
- }
- }
-
- iommu_free_ctx(iommu, ctx);
+ free_npages(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE))
+ goto bad_no_ctx;
oaddr = (unsigned long)ptr;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
+ base = alloc_npages(iommu, npages);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu_alloc_ctx(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
- base = alloc_streaming_cluster(iommu, npages);
- if (base == NULL)
+ if (unlikely(!base))
goto bad;
+
bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return ret;
bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_free_ctx(iommu, ctx);
+bad_no_ctx:
+ if (printk_ratelimit())
+ WARN_ON(1);
return PCI_DMA_ERROR_CODE;
}
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
struct pci_iommu *iommu;
struct pci_strbuf *strbuf;
iopte_t *base;
- unsigned long flags, npages, ctx;
+ unsigned long flags, npages, ctx, i;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE)) {
+ if (printk_ratelimit())
+ WARN_ON(1);
+ return;
+ }
pcp = pdev->sysdata;
iommu = pcp->pbm->iommu;
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
+ pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
+ npages, direction);
- /* Step 2: Clear out first TSB entry. */
- iopte_make_dummy(iommu, base);
+ /* Step 2: Clear out TSB entries. */
+ for (i = 0; i < npages; i++)
+ iopte_make_dummy(iommu, base + i);
- free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
- npages, ctx);
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
iommu_free_ctx(iommu, ctx);
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
pci_map_single(pdev,
(page_address(sglist->page) + sglist->offset),
sglist->length, direction);
+ if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
+ return 0;
sglist->dma_length = sglist->length;
return 1;
}
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE))
+ goto bad_no_ctx;
/* Step 1: Prepare scatter list. */
npages = prepare_sg(sglist, nelems);
- /* Step 2: Allocate a cluster. */
+ /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
- base = alloc_streaming_cluster(iommu, npages);
+ base = alloc_npages(iommu, npages);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu_alloc_ctx(iommu);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
if (base == NULL)
goto bad;
- dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
+
+ dma_base = iommu->page_table_map_base +
+ ((base - iommu->page_table) << IO_PAGE_SHIFT);
/* Step 3: Normalize DMA addresses. */
used = nelems;
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
}
used = nelems - used;
- /* Step 4: Choose a context if necessary. */
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
-
- /* Step 5: Create the mappings. */
+ /* Step 4: Create the mappings. */
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != PCI_DMA_TODEVICE)
iopte_protection |= IOPTE_WRITE;
- fill_sg (base, sglist, used, nelems, iopte_protection);
+
+ fill_sg(base, sglist, used, nelems, iopte_protection);
+
#ifdef VERIFY_SG
verify_sglist(sglist, nelems, base, npages);
#endif
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return used;
bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
- return PCI_DMA_ERROR_CODE;
+ iommu_free_ctx(iommu, ctx);
+bad_no_ctx:
+ if (printk_ratelimit())
+ WARN_ON(1);
+ return 0;
}
/* Unmap a set of streaming mode DMA translations. */
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
unsigned long flags, ctx, i, npages;
u32 bus_addr;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE)) {
+ if (printk_ratelimit())
+ WARN_ON(1);
+ }
pcp = pdev->sysdata;
iommu = pcp->pbm->iommu;
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if (sglist[i].dma_length == 0)
break;
i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+ bus_addr) >> IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if (strbuf->strbuf_enabled)
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
- /* Step 2: Clear out first TSB entry. */
- iopte_make_dummy(iommu, base);
+ /* Step 2: Clear out the TSB entries. */
+ for (i = 0; i < npages; i++)
+ iopte_make_dummy(iommu, base + i);
- free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
- npages, ctx);
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
iommu_free_ctx(iommu, ctx);
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 6ed1ef25e0a..c03ed5f49d3 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
static void psycho_iommu_init(struct pci_controller_info *p)
{
struct pci_iommu *iommu = p->pbm_A.iommu;
- unsigned long tsbbase, i;
+ unsigned long i;
u64 control;
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- /* Using assumed page size 8K with 128K entries we need 1MB iommu page
- * table (128K ioptes * 8 bytes per iopte). This is
- * page order 7 on UltraSparc.
- */
- tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
- if (!tsbbase) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_sz_bits = 17;
- iommu->page_table_map_base = 0xc0000000;
- iommu->dma_addr_mask = 0xffffffff;
- pci_iommu_table_init(iommu, IO_TSB_SIZE);
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
-
- psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
+ __pa(iommu->page_table));
control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
/* If necessary, hook us up for starfire IRQ translations. */
- if(this_is_starfire)
+ if (this_is_starfire)
p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
else
p->starfire_cookie = NULL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 0ee6bd5b9ac..da8e1364194 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
u32 dma_mask)
{
struct pci_iommu *iommu = p->pbm_A.iommu;
- unsigned long tsbbase, i, order;
+ unsigned long i;
u64 control;
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
- if (!tsbbase) {
- prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_map_base = dvma_offset;
- iommu->dma_addr_mask = dma_mask;
- pci_iommu_table_init(iommu, PAGE_SIZE << order);
-
- sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
+ __pa(iommu->page_table));
control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
switch(tsbsize) {
case 64:
control |= SABRE_IOMMU_TSBSZ_64K;
- iommu->page_table_sz_bits = 16;
break;
case 128:
control |= SABRE_IOMMU_TSBSZ_128K;
- iommu->page_table_sz_bits = 17;
break;
default:
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
break;
}
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
}
static void pbm_register_toplevel_resources(struct pci_controller_info *p,
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 331382e1a75..d8c4e0919b4 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
- u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO);
+ u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO);
u64 val;
int limit;
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct pci_iommu *iommu = pbm->iommu;
- unsigned long tsbbase, i, tagbase, database, order;
+ unsigned long i, tagbase, database;
u32 vdma[2], dma_mask;
u64 control;
int err, tsbsize;
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
prom_halt();
};
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- /* Using assumed page size 8K with 128K entries we need 1MB iommu page
- * table (128K ioptes * 8 bytes per iopte). This is
- * page order 7 on UltraSparc.
- */
- order = get_order(tsbsize * 8 * 1024);
- tsbbase = __get_free_pages(GFP_KERNEL, order);
- if (!tsbbase) {
- prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
- prom_halt();
- }
-
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_map_base = vdma[0];
- iommu->dma_addr_mask = dma_mask;
- pci_iommu_table_init(iommu, PAGE_SIZE << order);
-
- switch (tsbsize) {
- case 64:
- iommu->page_table_sz_bits = 16;
- break;
-
- case 128:
- iommu->page_table_sz_bits = 17;
- break;
-
- default:
- prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
- prom_halt();
- break;
- };
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
-
- schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
+ schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
control = schizo_read(iommu->iommu_control);
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 946cee0257e..9e8362ea310 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -17,6 +17,7 @@
#include <asm/system.h>
#include <asm/ebus.h>
+#include <asm/isa.h>
#include <asm/auxio.h>
#include <linux/unistd.h>
@@ -100,46 +101,83 @@ again:
return 0;
}
-static int __init has_button_interrupt(struct linux_ebus_device *edev)
+static int __init has_button_interrupt(unsigned int irq, int prom_node)
{
- if (edev->irqs[0] == PCI_IRQ_NONE)
+ if (irq == PCI_IRQ_NONE)
return 0;
- if (!prom_node_has_property(edev->prom_node, "button"))
+ if (!prom_node_has_property(prom_node, "button"))
return 0;
return 1;
}
-void __init power_init(void)
+static int __init power_probe_ebus(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
{
struct linux_ebus *ebus;
struct linux_ebus_device *edev;
+
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if (!strcmp(edev->prom_name, "power")) {
+ *resp = &edev->resource[0];
+ *irq_p = edev->irqs[0];
+ *prom_node_p = edev->prom_node;
+ return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+static int __init power_probe_isa(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
+{
+ struct sparc_isa_bridge *isa_bus;
+ struct sparc_isa_device *isa_dev;
+
+ for_each_isa(isa_bus) {
+ for_each_isadev(isa_dev, isa_bus) {
+ if (!strcmp(isa_dev->prom_name, "power")) {
+ *resp = &isa_dev->resource;
+ *irq_p = isa_dev->irq;
+ *prom_node_p = isa_dev->prom_node;
+ return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+void __init power_init(void)
+{
+ struct resource *res = NULL;
+ unsigned int irq;
+ int prom_node;
static int invoked;
if (invoked)
return;
invoked = 1;
- for_each_ebus(ebus) {
- for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, "power"))
- goto found;
- }
- }
+ if (!power_probe_ebus(&res, &irq, &prom_node))
+ goto found;
+
+ if (!power_probe_isa(&res, &irq, &prom_node))
+ goto found;
+
return;
found:
- power_reg = ioremap(edev->resource[0].start, 0x4);
+ power_reg = ioremap(res->start, 0x4);
printk("power: Control reg at %p ... ", power_reg);
poweroff_method = machine_halt; /* able to use the standard halt */
- if (has_button_interrupt(edev)) {
+ if (has_button_interrupt(irq, prom_node)) {
if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
printk("Failed to start power daemon.\n");
return;
}
printk("powerd running.\n");
- if (request_irq(edev->irqs[0],
+ if (request_irq(irq,
power_handler, SA_SHIRQ, "power", NULL) < 0)
printk("power: Error, cannot register IRQ handler.\n");
} else {
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 23ad839d113..774ecbb8a03 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -30,6 +30,8 @@
#include <asm/psrcompat.h>
#include <asm/visasm.h>
#include <asm/spitfire.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
/* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and
@@ -128,20 +130,24 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* is mapped to in the user's address space, we can skip the
* D-cache flush.
*/
- if ((uaddr ^ kaddr) & (1UL << 13)) {
+ if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
unsigned long start = __pa(kaddr);
unsigned long end = start + len;
+ unsigned long dcache_line_size;
+
+ dcache_line_size = local_cpu_data().dcache_line_size;
if (tlb_type == spitfire) {
- for (; start < end; start += 32)
- spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+ for (; start < end; start += dcache_line_size)
+ spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
} else {
- for (; start < end; start += 32)
+ start &= ~(dcache_line_size - 1);
+ for (; start < end; start += dcache_line_size)
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (va),
+ : "r" (start),
"i" (ASI_DCACHE_INVALIDATE));
}
}
@@ -149,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
if (write && tlb_type == spitfire) {
unsigned long start = (unsigned long) kaddr;
unsigned long end = start + len;
+ unsigned long icache_line_size;
+
+ icache_line_size = local_cpu_data().icache_line_size;
- for (; start < end; start += 32)
+ for (; start < end; start += icache_line_size)
flushi(start);
}
}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index fafd227735f..090dcca00d2 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
ldxa [%l7 + %l7] ASI_DMMU, %l0
-cplus_rtrap_insn_1:
- sethi %hi(0), %l1
- sllx %l1, 32, %l1
+ sethi %hi(sparc64_kern_pri_nuc_bits), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU
flush %g6
@@ -313,53 +312,36 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
wr %g1, FPRS_FEF, %fprs
ldx [%o1 + %o5], %g1
add %g6, TI_XFSR, %o1
- membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, TI_FPREGS, %o3
brz,pn %l6, 1f
add %g6, TI_FPREGS+0x40, %o4
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f0
ldda [%o4 + %o2] ASI_BLK_P, %f16
+ membar #Sync
1: andcc %l2, FPRS_DU, %g0
be,pn %icc, 1f
wr %g1, 0, %gsr
add %o2, 0x80, %o2
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
-
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + TI_FPDEPTH]
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
- membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, TI_FPREGS+0x80, %o3
add %g6, TI_FPREGS+0xc0, %o4
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]
-
-cplus_rinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-
- .globl cheetah_plus_patch_rtrap
-cheetah_plus_patch_rtrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_rinsn_1), %o0
- sethi %hi(cplus_rtrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_rinsn_1)], %o1
- or %o2, %lo(cplus_rtrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index ddbed3341a2..c1f34237cdf 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -187,17 +187,13 @@ int prom_callback(long *args)
}
if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
- unsigned long kernel_pctx = 0;
-
- if (tlb_type == cheetah_plus)
- kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
- CTX_CHEETAH_PLUS_CTX0);
+ extern unsigned long sparc64_kern_pri_context;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
- : "r" (kernel_pctx),
+ : "r" (sparc64_kern_pri_context),
"r" (PRIMARY_CONTEXT),
"i" (ASI_DMMU));
@@ -464,8 +460,6 @@ static void __init boot_flags_init(char *commands)
}
}
-extern int prom_probe_memory(void);
-extern unsigned long start, end;
extern void panic_setup(char *, int *);
extern unsigned short root_flags;
@@ -492,13 +486,8 @@ void register_prom_callbacks(void)
"' linux-.soft2 to .soft2");
}
-extern void paging_init(void);
-
void __init setup_arch(char **cmdline_p)
{
- unsigned long highest_paddr;
- int i;
-
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strcpy(saved_command_line, *cmdline_p);
@@ -517,40 +506,6 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
idprom_init();
- (void) prom_probe_memory();
-
- /* In paging_init() we tip off this value to see if we need
- * to change init_mm.pgd to point to the real alias mapping.
- */
- phys_base = 0xffffffffffffffffUL;
- highest_paddr = 0UL;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- unsigned long top;
-
- if (sp_banks[i].base_addr < phys_base)
- phys_base = sp_banks[i].base_addr;
- top = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- if (highest_paddr < top)
- highest_paddr = top;
- }
- pfn_base = phys_base >> PAGE_SHIFT;
-
- switch (tlb_type) {
- default:
- case spitfire:
- kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
- kern_base &= _PAGE_PADDR_SF;
- break;
-
- case cheetah:
- case cheetah_plus:
- kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
- kern_base &= _PAGE_PADDR;
- break;
- };
-
- kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -625,6 +580,9 @@ extern void smp_info(struct seq_file *);
extern void smp_bogo(struct seq_file *);
extern void mmu_info(struct seq_file *);
+unsigned int dcache_parity_tl1_occurred;
+unsigned int icache_parity_tl1_occurred;
+
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
seq_printf(m,
@@ -635,6 +593,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"type\t\t: sun4u\n"
"ncpus probed\t: %ld\n"
"ncpus active\t: %ld\n"
+ "D$ parity tl1\t: %u\n"
+ "I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP
"Cpu0Bogo\t: %lu.%02lu\n"
"Cpu0ClkTck\t: %016lx\n"
@@ -647,7 +607,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(prom_prev >> 8) & 0xff,
prom_prev & 0xff,
(long)num_possible_cpus(),
- (long)num_online_cpus()
+ (long)num_online_cpus(),
+ dcache_parity_tl1_occurred,
+ icache_parity_tl1_occurred
#ifndef CONFIG_SMP
, cpu_data(0).udelay_val/(500000/HZ),
(cpu_data(0).udelay_val/(5000/HZ)) % 100,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b4fc6a5462b..b137fd63f5e 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id)
cpu_data(id).pte_cache[1] = NULL;
cpu_data(id).pgd_cache = NULL;
cpu_data(id).idle_volume = 1;
+
+ cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
+ 16 * 1024);
+ cpu_data(id).dcache_line_size =
+ prom_getintdefault(cpu_node, "dcache-line-size", 32);
+ cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
+ 16 * 1024);
+ cpu_data(id).icache_line_size =
+ prom_getintdefault(cpu_node, "icache-line-size", 32);
+ cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
+ 4 * 1024 * 1024);
+ cpu_data(id).ecache_line_size =
+ prom_getintdefault(cpu_node, "ecache-line-size", 64);
+ printk("CPU[%d]: Caches "
+ "D[sz(%d):line_sz(%d)] "
+ "I[sz(%d):line_sz(%d)] "
+ "E[sz(%d):line_sz(%d)]\n",
+ id,
+ cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
+ cpu_data(id).icache_size, cpu_data(id).icache_line_size,
+ cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
static void smp_setup_percpu_timer(void);
@@ -980,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-extern unsigned long xcall_promstop;
-
-void smp_promstop_others(void)
-{
- smp_cross_call(&xcall_promstop, 0, 0, 0);
-}
-
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index cbb5e59824e..fb7a5370dbf 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index 5f9e4fae612..9cd272ac3ac 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
+do_einval:
+ retl
+ mov -EINVAL, %o0
- /* Each entry is exactly 32 bytes. */
.align 32
__socketcall_table_begin:
+
+ /* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+22: ldswa [%o1 + 0x8] %asi, %o2
+23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+26: lduwa [%o1 + 0x8] %asi, %o2
+27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+30: lduwa [%o1 + 0x8] %asi, %o2
+31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
- ldswa [%o1 + 0x14] %asi, %o5
+34: lduwa [%o1 + 0x8] %asi, %o2
+35: lduwa [%o1 + 0xc] %asi, %o3
+36: lduwa [%o1 + 0x10] %asi, %o4
+37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
- ldswa [%o1 + 0x0] %asi, %o0
+39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
- lduwa [%o1 + 0x14] %asi, %o5
+40: lduwa [%o1 + 0x8] %asi, %o2
+41: lduwa [%o1 + 0xc] %asi, %o3
+42: lduwa [%o1 + 0x10] %asi, %o4
+43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_setsockopt), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- ldswa [%o1 + 0x10] %asi, %o4
+48: ldswa [%o1 + 0x8] %asi, %o2
+49: lduwa [%o1 + 0xc] %asi, %o3
+50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_setsockopt), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
- ldswa [%o1 + 0x0] %asi, %o0
+52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_getsockopt), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
+53: ldswa [%o1 + 0x8] %asi, %o2
+54: lduwa [%o1 + 0xc] %asi, %o3
+55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_getsockopt), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
-__socketcall_table_end:
-
-do_einval:
- retl
- mov -EINVAL, %o0
-do_efault:
- retl
- mov -EFAULT, %o0
.section __ex_table
.align 4
- .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault
+ .word 1b, __retl_efault, 2b, __retl_efault
+ .word 3b, __retl_efault, 4b, __retl_efault
+ .word 5b, __retl_efault, 6b, __retl_efault
+ .word 7b, __retl_efault, 8b, __retl_efault
+ .word 9b, __retl_efault, 10b, __retl_efault
+ .word 11b, __retl_efault, 12b, __retl_efault
+ .word 13b, __retl_efault, 14b, __retl_efault
+ .word 15b, __retl_efault, 16b, __retl_efault
+ .word 17b, __retl_efault, 18b, __retl_efault
+ .word 19b, __retl_efault, 20b, __retl_efault
+ .word 21b, __retl_efault, 22b, __retl_efault
+ .word 23b, __retl_efault, 24b, __retl_efault
+ .word 25b, __retl_efault, 26b, __retl_efault
+ .word 27b, __retl_efault, 28b, __retl_efault
+ .word 29b, __retl_efault, 30b, __retl_efault
+ .word 31b, __retl_efault, 32b, __retl_efault
+ .word 33b, __retl_efault, 34b, __retl_efault
+ .word 35b, __retl_efault, 36b, __retl_efault
+ .word 37b, __retl_efault, 38b, __retl_efault
+ .word 39b, __retl_efault, 40b, __retl_efault
+ .word 41b, __retl_efault, 42b, __retl_efault
+ .word 43b, __retl_efault, 44b, __retl_efault
+ .word 45b, __retl_efault, 46b, __retl_efault
+ .word 47b, __retl_efault, 48b, __retl_efault
+ .word 49b, __retl_efault, 50b, __retl_efault
+ .word 51b, __retl_efault, 52b, __retl_efault
+ .word 53b, __retl_efault, 54b, __retl_efault
+ .word 55b, __retl_efault, 56b, __retl_efault
+ .word 57b, __retl_efault, 58b, __retl_efault
+ .word 59b, __retl_efault, 60b, __retl_efault
+ .word 61b, __retl_efault, 62b, __retl_efault
.previous
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 3a145fc39cf..9478551cb02 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -119,8 +119,8 @@ startup_continue:
sethi %hi(itlb_load), %g2
or %g2, %lo(itlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -156,8 +156,8 @@ startup_continue:
sethi %hi(itlb_load), %g2
or %g2, %lo(itlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE + 0x400000), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -190,8 +190,8 @@ do_dtlb:
sethi %hi(dtlb_load), %g2
or %g2, %lo(dtlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -228,8 +228,8 @@ do_dtlb:
sethi %hi(dtlb_load), %g2
or %g2, %lo(dtlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE + 0x400000), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -336,20 +336,13 @@ do_unlock:
call init_irqwork_curcpu
nop
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
+ stxa %g2, [%g1] ASI_DMMU
membar #Sync
-2:
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index b280b2ef674..5570e7bb22b 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */
- unsigned long fixup;
- unsigned long g2 = regs->u_regs[UREG_G2];
+ const struct exception_table_entry *entry;
- if ((fixup = search_extables_range(regs->tpc, &g2))) {
- /* Ouch, somebody is trying ugly VM hole tricks on us... */
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ /* Ouch, somebody is trying VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
- printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
- "g2<%016lx>\n", regs->tpc, fixup, g2);
+ printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
+ regs->tpc, entry->fixup);
#endif
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
return;
}
/* Shit... */
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void)
ecache_flush_size = (2 * largest_size);
ecache_flush_linesize = smallest_linesize;
- /* Discover a physically contiguous chunk of physical
- * memory in 'sp_banks' of size ecache_flush_size calculated
- * above. Store the physical base of this area at
- * ecache_flush_physbase.
- */
- for (node = 0; ; node++) {
- if (sp_banks[node].num_bytes == 0)
- break;
- if (sp_banks[node].num_bytes >= ecache_flush_size) {
- ecache_flush_physbase = sp_banks[node].base_addr;
- break;
- }
- }
+ ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
- /* Note: Zero would be a valid value of ecache_flush_physbase so
- * don't use that as the success test. :-)
- */
- if (sp_banks[node].num_bytes == 0) {
+ if (ecache_flush_physbase == ~0UL) {
prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
- "contiguous physical memory.\n", ecache_flush_size);
+ "contiguous physical memory.\n",
+ ecache_flush_size);
prom_halt();
}
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr)
*/
static void __cheetah_flush_icache(void)
{
- unsigned long i;
+ unsigned int icache_size, icache_line_size;
+ unsigned long addr;
+
+ icache_size = local_cpu_data().icache_size;
+ icache_line_size = local_cpu_data().icache_line_size;
/* Clear the valid bits in all the tags. */
- for (i = 0; i < (1 << 15); i += (1 << 5)) {
+ for (addr = 0; addr < icache_size; addr += icache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
+ : "r" (addr | (2 << 3)),
+ "i" (ASI_IC_TAG));
}
}
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void)
static void cheetah_flush_dcache(void)
{
- unsigned long i;
+ unsigned int dcache_size, dcache_line_size;
+ unsigned long addr;
- for (i = 0; i < (1 << 16); i += (1 << 5)) {
+ dcache_size = local_cpu_data().dcache_size;
+ dcache_line_size = local_cpu_data().dcache_line_size;
+
+ for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (i), "i" (ASI_DCACHE_TAG));
+ : "r" (addr), "i" (ASI_DCACHE_TAG));
}
}
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void)
*/
static void cheetah_plus_zap_dcache_parity(void)
{
- unsigned long i;
+ unsigned int dcache_size, dcache_line_size;
+ unsigned long addr;
+
+ dcache_size = local_cpu_data().dcache_size;
+ dcache_line_size = local_cpu_data().dcache_line_size;
- for (i = 0; i < (1 << 16); i += (1 << 5)) {
- unsigned long tag = (i >> 14);
- unsigned long j;
+ for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
+ unsigned long tag = (addr >> 14);
+ unsigned long line;
__asm__ __volatile__("membar #Sync\n\t"
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (tag), "r" (i),
+ : "r" (tag), "r" (addr),
"i" (ASI_DCACHE_UTAG));
- for (j = i; j < i + (1 << 5); j += (1 << 3))
+ for (line = addr; line < addr + dcache_line_size; line += 8)
__asm__ __volatile__("membar #Sync\n\t"
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (j), "i" (ASI_DCACHE_DATA));
+ : "r" (line),
+ "i" (ASI_DCACHE_DATA));
}
}
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr)
/* Return non-zero if PADDR is a valid physical memory address. */
static int cheetah_check_main_memory(unsigned long paddr)
{
- int i;
+ unsigned long vaddr = PAGE_OFFSET + paddr;
- for (i = 0; ; i++) {
- if (sp_banks[i].num_bytes == 0)
- break;
- if (paddr >= sp_banks[i].base_addr &&
- paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
- return 1;
- }
- return 0;
+ if (vaddr > (unsigned long) high_memory)
+ return 0;
+
+ return kern_addr_valid(vaddr);
}
void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
/* OK, usermode access. */
recoverable = 1;
} else {
- unsigned long g2 = regs->u_regs[UREG_G2];
- unsigned long fixup = search_extables_range(regs->tpc, &g2);
+ const struct exception_table_entry *entry;
- if (fixup != 0UL) {
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
/* OK, kernel access to userspace. */
recoverable = 1;
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
* recoverable condition.
*/
if (recoverable) {
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
}
}
}
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index cbb40585253..1f5b5b708ce 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -6,18 +6,11 @@
.text
-kernel_unaligned_trap_fault:
- call kernel_mna_trap_fault
- nop
- retl
- nop
- .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
-
.globl __do_int_store
__do_int_store:
rd %asi, %o4
wr %o3, 0, %asi
- ldx [%o2], %g3
+ mov %o2, %g3
cmp %o1, 2
be,pn %icc, 2f
cmp %o1, 4
@@ -51,24 +44,24 @@ __do_int_store:
0:
wr %o4, 0x0, %asi
retl
- nop
+ mov 0, %o0
.size __do_int_store, .-__do_int_store
.section __ex_table
- .word 4b, kernel_unaligned_trap_fault
- .word 5b, kernel_unaligned_trap_fault
- .word 6b, kernel_unaligned_trap_fault
- .word 7b, kernel_unaligned_trap_fault
- .word 8b, kernel_unaligned_trap_fault
- .word 9b, kernel_unaligned_trap_fault
- .word 10b, kernel_unaligned_trap_fault
- .word 11b, kernel_unaligned_trap_fault
- .word 12b, kernel_unaligned_trap_fault
- .word 13b, kernel_unaligned_trap_fault
- .word 14b, kernel_unaligned_trap_fault
- .word 15b, kernel_unaligned_trap_fault
- .word 16b, kernel_unaligned_trap_fault
- .word 17b, kernel_unaligned_trap_fault
+ .word 4b, __retl_efault
+ .word 5b, __retl_efault
+ .word 6b, __retl_efault
+ .word 7b, __retl_efault
+ .word 8b, __retl_efault
+ .word 9b, __retl_efault
+ .word 10b, __retl_efault
+ .word 11b, __retl_efault
+ .word 12b, __retl_efault
+ .word 13b, __retl_efault
+ .word 14b, __retl_efault
+ .word 15b, __retl_efault
+ .word 16b, __retl_efault
+ .word 17b, __retl_efault
.previous
.globl do_int_load
@@ -133,21 +126,21 @@ do_int_load:
0:
wr %o5, 0x0, %asi
retl
- nop
+ mov 0, %o0
.size __do_int_load, .-__do_int_load
.section __ex_table
- .word 4b, kernel_unaligned_trap_fault
- .word 5b, kernel_unaligned_trap_fault
- .word 6b, kernel_unaligned_trap_fault
- .word 7b, kernel_unaligned_trap_fault
- .word 8b, kernel_unaligned_trap_fault
- .word 9b, kernel_unaligned_trap_fault
- .word 10b, kernel_unaligned_trap_fault
- .word 11b, kernel_unaligned_trap_fault
- .word 12b, kernel_unaligned_trap_fault
- .word 13b, kernel_unaligned_trap_fault
- .word 14b, kernel_unaligned_trap_fault
- .word 15b, kernel_unaligned_trap_fault
- .word 16b, kernel_unaligned_trap_fault
+ .word 4b, __retl_efault
+ .word 5b, __retl_efault
+ .word 6b, __retl_efault
+ .word 7b, __retl_efault
+ .word 8b, __retl_efault
+ .word 9b, __retl_efault
+ .word 10b, __retl_efault
+ .word 11b, __retl_efault
+ .word 12b, __retl_efault
+ .word 13b, __retl_efault
+ .word 14b, __retl_efault
+ .word 15b, __retl_efault
+ .word 16b, __retl_efault
.previous
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index da9739f0d43..70faf630603 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,17 +180,18 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
die_if_kernel(str, regs);
}
-extern void do_int_load(unsigned long *dest_reg, int size,
- unsigned long *saddr, int is_signed, int asi);
+extern int do_int_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed, int asi);
-extern void __do_int_store(unsigned long *dst_addr, int size,
- unsigned long *src_val, int asi);
+extern int __do_int_store(unsigned long *dst_addr, int size,
+ unsigned long src_val, int asi);
-static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
- struct pt_regs *regs, int asi)
+static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
+ struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
- unsigned long *src_val = &zero;
+ unsigned long *src_val_p = &zero;
+ unsigned long src_val;
if (size == 16) {
size = 8;
@@ -198,9 +199,27 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
- src_val = fetch_reg_addr(reg_num, regs);
+ src_val_p = fetch_reg_addr(reg_num, regs);
}
- __do_int_store(dst_addr, size, src_val, asi);
+ src_val = *src_val_p;
+ if (unlikely(asi != orig_asi)) {
+ switch (size) {
+ case 2:
+ src_val = swab16(src_val);
+ break;
+ case 4:
+ src_val = swab32(src_val);
+ break;
+ case 8:
+ src_val = swab64(src_val);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
+ }
+ return __do_int_store(dst_addr, size, src_val, asi);
}
static inline void advance(struct pt_regs *regs)
@@ -223,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn)
return !floating_point_load_or_store_p(insn);
}
-void kernel_mna_trap_fault(void)
+static void kernel_mna_trap_fault(void)
{
struct pt_regs *regs = current_thread_info()->kern_una_regs;
unsigned int insn = current_thread_info()->kern_una_insn;
- unsigned long g2 = regs->u_regs[UREG_G2];
- unsigned long fixup = search_extables_range(regs->tpc, &g2);
+ const struct exception_table_entry *entry;
- if (!fixup) {
+ entry = search_exception_tables(regs->tpc);
+ if (!entry) {
unsigned long address;
address = compute_effective_address(regs, insn,
@@ -251,9 +270,8 @@ void kernel_mna_trap_fault(void)
die_if_kernel("Oops", regs);
/* Not reached */
}
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs [UREG_G2] = g2;
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= (ASI_AIUS << 24UL);
@@ -275,7 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
kernel_mna_trap_fault();
} else {
- unsigned long addr;
+ unsigned long addr, *reg_addr;
+ int orig_asi, asi, err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
@@ -285,25 +304,59 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
regs->tpc, dirstrings[dir], addr, size,
regs->u_regs[UREG_RETPC]);
#endif
+ orig_asi = asi = decode_asi(insn, regs);
+ switch (asi) {
+ case ASI_NL:
+ case ASI_AIUPL:
+ case ASI_AIUSL:
+ case ASI_PL:
+ case ASI_SL:
+ case ASI_PNFL:
+ case ASI_SNFL:
+ asi &= ~0x08;
+ break;
+ };
switch (dir) {
case load:
- do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
- size, (unsigned long *) addr,
- decode_signedness(insn),
- decode_asi(insn, regs));
+ reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
+ err = do_int_load(reg_addr, size,
+ (unsigned long *) addr,
+ decode_signedness(insn), asi);
+ if (likely(!err) && unlikely(asi != orig_asi)) {
+ unsigned long val_in = *reg_addr;
+ switch (size) {
+ case 2:
+ val_in = swab16(val_in);
+ break;
+ case 4:
+ val_in = swab32(val_in);
+ break;
+ case 8:
+ val_in = swab64(val_in);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
+ *reg_addr = val_in;
+ }
break;
case store:
- do_int_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs,
- decode_asi(insn, regs));
+ err = do_int_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
- advance(regs);
+ if (unlikely(err))
+ kernel_mna_trap_fault();
+ else
+ advance(regs);
}
}
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 9080e7cd4bb..0340041f614 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == CHEETAH_MANUF &&
- (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be3937..2af0cf0a864 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
- swapper_pmd_dir = 0x0000000000402000;
- empty_pg_dir = 0x0000000000403000;
+ swapper_low_pmd_dir = 0x0000000000402000;
. = 0x4000;
.text 0x0000000000404000 :
{
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 99c809a1e5a..39160926267 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -16,23 +16,14 @@
.text
set_pcontext:
-cplus_winfixup_insn_1:
- sethi %hi(0), %l1
+ sethi %hi(sparc64_kern_pri_context), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
mov PRIMARY_CONTEXT, %g1
- sllx %l1, 32, %l1
-cplus_winfixup_insn_2:
- sethi %hi(0), %g2
- or %l1, %g2, %l1
stxa %l1, [%g1] ASI_DMMU
flush %g6
retl
nop
-cplus_wfinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-cplus_wfinsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
.align 32
/* Here are the rules, pay attention.
@@ -395,23 +386,3 @@ window_dax_from_user_common:
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
-
-
- .globl cheetah_plus_patch_winfixup
-cheetah_plus_patch_winfixup:
- sethi %hi(cplus_wfinsn_1), %o0
- sethi %hi(cplus_winfixup_insn_1), %o2
- lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
- or %o2, %lo(cplus_winfixup_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_wfinsn_2), %o0
- sethi %hi(cplus_winfixup_insn_2), %o2
- lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
- or %o2, %lo(cplus_winfixup_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop