aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cputable.c86
-rw-r--r--arch/powerpc/kernel/head_64.S48
-rw-r--r--arch/powerpc/kernel/iommu.c77
-rw-r--r--arch/powerpc/kernel/misc_32.S74
-rw-r--r--arch/powerpc/kernel/misc_64.S124
-rw-r--r--arch/powerpc/kernel/module_32.c39
-rw-r--r--arch/powerpc/kernel/module_64.c49
-rw-r--r--arch/powerpc/kernel/perfmon_fsl_booke.c221
-rw-r--r--arch/powerpc/kernel/pmc.c2
-rw-r--r--arch/powerpc/kernel/prom.c8
-rw-r--r--arch/powerpc/kernel/rtas_flash.c47
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/time.c105
-rw-r--r--arch/powerpc/kernel/traps.c18
-rw-r--r--arch/powerpc/kernel/vdso.c43
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S12
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S10
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S8
23 files changed, 375 insertions, 628 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 8b133afbdc2..7af23c43fd4 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
obj32-$(CONFIG_MODULES) += module_32.o
-obj-$(CONFIG_E500) += perfmon_fsl_booke.o
ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 995fcef156f..93f21aaf7c8 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np)
prop = get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = get_property(np, "linebytes", NULL);
- if (prop)
+ if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 95382f99440..bfd499ee375 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -18,6 +18,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
+#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
#define PPC_FEATURE_SPE_COMP 0
#endif
-struct cpu_spec cpu_specs[] = {
+static struct cpu_spec cpu_specs[] = {
#ifdef CONFIG_PPC64
{ /* Power3 */
.pvr_mask = 0xffff0000,
@@ -227,6 +228,21 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
+ { /* PPC970GX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00450000,
+ .cpu_name = "PPC970GX",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .cpu_setup = __setup_cpu_ppc970,
+ .oprofile_cpu_type = "ppc64/970",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
+ },
{ /* Power5 GR */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000,
@@ -1152,3 +1168,71 @@ struct cpu_spec cpu_specs[] = {
#endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */
};
+
+struct cpu_spec *identify_cpu(unsigned long offset)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec **cur = &cur_cpu_spec;
+ unsigned int pvr = mfspr(SPRN_PVR);
+ int i;
+
+ s = PTRRELOC(s);
+ cur = PTRRELOC(cur);
+
+ if (*cur != NULL)
+ return PTRRELOC(*cur);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ *cur = cpu_specs + i;
+#ifdef CONFIG_PPC64
+ /* ppc64 expects identify_cpu to also call setup_cpu
+ * for that processor. I will consolidate that at a
+ * later time, for now, just use our friend #ifdef.
+ * we also don't need to PTRRELOC the function pointer
+ * on ppc64 as we are running at 0 in real mode.
+ */
+ if (s->cpu_setup) {
+ s->cpu_setup(offset, s);
+ }
+#endif /* CONFIG_PPC64 */
+ return s;
+ }
+ BUG();
+ return NULL;
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ struct fixup_entry {
+ unsigned long mask;
+ unsigned long value;
+ long start_off;
+ long end_off;
+ } *fcur, *fend;
+
+ fcur = fixup_start;
+ fend = fixup_end;
+
+ for (; fcur < fend; fcur++) {
+ unsigned int *pstart, *pend, *p;
+
+ if ((value & fcur->mask) == fcur->value)
+ continue;
+
+ /* These PTRRELOCs will disappear once the new scheme for
+ * modules and vdso is implemented
+ */
+ pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+ pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
+
+ for (p = pstart; p < pend; p++) {
+ *p = 0x60000000u;
+ asm volatile ("dcbst 0, %0" : : "r" (p));
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = pstart; p < pend; p++)
+ asm volatile ("icbi 0,%0" : : "r" (p));
+ asm volatile ("sync; isync" : : : "memory");
+ }
+}
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 645c7f10fb2..e720729f3e5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -487,7 +487,7 @@ BEGIN_FTR_SECTION
rlwimi r13,r12,16,0x20
mfcr r12
cmpwi r13,0x2c
- beq .do_stab_bolted_pSeries
+ beq do_stab_bolted_pSeries
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
@@ -600,7 +600,7 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(., performance_monitor)
.align 7
-_GLOBAL(do_stab_bolted_pSeries)
+do_stab_bolted_pSeries:
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
@@ -1046,7 +1046,7 @@ slb_miss_fault:
li r5,0
std r4,_DAR(r1)
std r5,_DSISR(r1)
- b .handle_page_fault
+ b handle_page_fault
unrecov_user_slb:
EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
@@ -1174,12 +1174,13 @@ program_check_common:
.globl fp_unavailable_common
fp_unavailable_common:
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne .load_up_fpu /* if from user, just load it up */
+ bne 1f /* if from user, just load it up */
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
ENABLE_INTS
bl .kernel_fp_unavailable_exception
BUG_OPCODE
+1: b .load_up_fpu
.align 7
.globl altivec_unavailable_common
@@ -1279,10 +1280,10 @@ _GLOBAL(do_hash_page)
std r4,_DSISR(r1)
andis. r0,r4,0xa450 /* weird error? */
- bne- .handle_page_fault /* if not, try to insert a HPTE */
+ bne- handle_page_fault /* if not, try to insert a HPTE */
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
- bne- .do_ste_alloc /* If so handle it */
+ bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
/*
@@ -1324,7 +1325,7 @@ BEGIN_FW_FTR_SECTION
* because ret_from_except_lite will check for and handle pending
* interrupts if necessary.
*/
- beq .ret_from_except_lite
+ beq 13f
/* For a hash failure, we don't bother re-enabling interrupts */
ble- 12f
@@ -1346,14 +1347,14 @@ BEGIN_FW_FTR_SECTION
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
/* Here we have a page fault that hash_page can't handle. */
-_GLOBAL(handle_page_fault)
+handle_page_fault:
ENABLE_INTS
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_page_fault
cmpdi r3,0
- beq+ .ret_from_except_lite
+ beq+ 13f
bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1370,12 +1371,14 @@ _GLOBAL(handle_page_fault)
bl .low_hash_fault
b .ret_from_except
+13: b .ret_from_except_lite
+
/* here we have a segment miss */
-_GLOBAL(do_ste_alloc)
+do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
cmpdi r3,0
- beq+ fast_exception_return
- b .handle_page_fault
+ bne- handle_page_fault
+ b fast_exception_return
/*
* r13 points to the PACA, r9 contains the saved CR,
@@ -1580,11 +1583,6 @@ _STATIC(__start_initialization_iSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- LOAD_REG_IMMEDIATE(r3,cpu_specs)
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- li r5,0
- bl .identify_cpu
-
LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
@@ -1646,6 +1644,8 @@ _GLOBAL(__start_initialization_multiplatform)
cmpwi r0,0x3c /* 970FX */
beq 1f
cmpwi r0,0x44 /* 970MP */
+ beq 1f
+ cmpwi r0,0x45 /* 970GX */
bne 2f
1: bl .__cpu_preinit_ppc970
2:
@@ -1964,13 +1964,6 @@ _STATIC(start_here_multiplatform)
addi r2,r2,0x4000
add r2,r2,r26
- LOAD_REG_IMMEDIATE(r3, cpu_specs)
- add r3,r3,r26
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- add r4,r4,r26
- mr r5,r26
- bl .identify_cpu
-
/* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */
@@ -2000,13 +1993,6 @@ _STATIC(start_here_common)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- /* Apply the CPUs-specific fixups (nop out sections not relevant
- * to this CPU
- */
- li r3,0
- bl .do_cpu_ftr_fixups
- bl .do_fw_ftr_fixups
-
/* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f88a2a675d9..ba6b7256084 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -47,6 +47,17 @@ static int novmerge = 0;
static int novmerge = 1;
#endif
+static inline unsigned long iommu_num_pages(unsigned long vaddr,
+ unsigned long slen)
+{
+ unsigned long npages;
+
+ npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
+ npages >>= IOMMU_PAGE_SHIFT;
+
+ return npages;
+}
+
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
}
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << PAGE_SHIFT; /* Set the return dma address */
+ ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */
- ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
+ ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long entry, free_entry;
unsigned long i;
- entry = dma_addr >> PAGE_SHIFT;
+ entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- DBG("mapping %d elements:\n", nelems);
+ DBG("sg mapping %d elements:\n", nelems);
spin_lock_irqsave(&(tbl->it_lock), flags);
@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long)page_address(s->page) + s->offset;
- npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
+ npages = iommu_num_pages(vaddr, slen);
+ entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << PAGE_SHIFT;
- dma_addr |= s->offset;
+ dma_addr = entry << IOMMU_PAGE_SHIFT;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
- DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
+ DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
- ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
+ ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
/* If we are in an open segment, try merging */
if (segstart != s) {
@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
- DBG(" merged, new len: %lx\n", outs->dma_length);
+ DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & PAGE_MASK;
- npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
- >> PAGE_SHIFT;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK;
+ npages = iommu_num_pages(s->dma_address, s->dma_length);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sglist->dma_length == 0)
break;
- npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
- - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
+ npages = iommu_num_pages(dma_handle,sglist->dma_length);
__iommu_free(tbl, dma_handle, npages);
sglist++;
}
@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
- npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
+ npages = iommu_num_pages(uaddr, size);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
- mask >> PAGE_SHIFT, 0);
+ mask >> IOMMU_PAGE_SHIFT, 0);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
tbl, vaddr, npages);
}
} else
- dma_handle |= (uaddr & ~PAGE_MASK);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
}
return dma_handle;
@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
+ unsigned int npages;
+
BUG_ON(direction == DMA_NONE);
- if (tbl)
- iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
+ if (tbl) {
+ npages = iommu_num_pages(dma_handle, size);
+ iommu_free(tbl, dma_handle, npages);
+ }
}
/* Allocates a contiguous real buffer and creates mappings over it.
@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
{
void *ret = NULL;
dma_addr_t mapping;
- unsigned int npages, order;
+ unsigned int order;
+ unsigned int nio_pages, io_order;
struct page *page;
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
order = get_order(size);
/*
@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
- mask >> PAGE_SHIFT, order);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ io_order = get_iommu_order(size);
+ mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- unsigned int npages;
-
if (tbl) {
+ unsigned int nio_pages;
+
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
- iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 88fd73fdf04..412bea3cf81 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -102,80 +102,6 @@ _GLOBAL(reloc_got2)
blr
/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
- addis r8,r3,cpu_specs@ha
- addi r8,r8,cpu_specs@l
- mfpvr r7
-1:
- lwz r5,CPU_SPEC_PVR_MASK(r8)
- and r5,r5,r7
- lwz r6,CPU_SPEC_PVR_VALUE(r8)
- cmplw 0,r6,r5
- beq 1f
- addi r8,r8,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- sub r8,r8,r3
- stw r8,0(r6)
- blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- lwz r4,0(r6)
- add r4,r4,r3
- lwz r4,CPU_SPEC_FEATURES(r4)
-
- /* Get the fixup table */
- addis r6,r3,__start___ftr_fixup@ha
- addi r6,r6,__start___ftr_fixup@l
- addis r7,r3,__stop___ftr_fixup@ha
- addi r7,r7,__stop___ftr_fixup@l
-
- /* Do the fixup */
-1: cmplw 0,r6,r7
- bgelr
- addi r6,r6,16
- lwz r8,-16(r6) /* mask */
- and r8,r8,r4
- lwz r9,-12(r6) /* value */
- cmplw 0,r8,r9
- beq 1b
- lwz r8,-8(r6) /* section begin */
- lwz r9,-4(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- add r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c70e20708a1..21fd2c662a9 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
-/*
- * identify_cpu and calls setup_cpu
- * In: r3 = base of the cpu_specs array
- * r4 = address of cur_cpu_spec
- * r5 = relocation offset
- */
-_GLOBAL(identify_cpu)
- mfpvr r7
-1:
- lwz r8,CPU_SPEC_PVR_MASK(r3)
- and r8,r8,r7
- lwz r9,CPU_SPEC_PVR_VALUE(r3)
- cmplw 0,r9,r8
- beq 1f
- addi r3,r3,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- sub r0,r3,r5
- std r0,0(r4)
- ld r4,CPU_SPEC_SETUP(r3)
- cmpdi 0,r4,0
- add r4,r4,r5
- beqlr
- ld r4,0(r4)
- add r4,r4,r5
- mtctr r4
- /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
- mr r4,r3
- mr r3,r5
- bctr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
- sub r6,r6,r3
- ld r4,0(r6)
- sub r4,r4,r3
- ld r4,CPU_SPEC_FEATURES(r4)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
- * do_fw_ftr_fixups - goes through the list of firmware feature fixups
- * and writes nop's over sections of code that don't apply for this firmware.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_fw_ftr_fixups)
- /* Get firmware features */
- LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
- sub r6,r6,r3
- ld r4,0(r6)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
-BEGIN_FTR_SECTION
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
- addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 92f4e5f64f0..e2c3c6a85f3 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,8 @@
#include <linux/kernel.h>
#include <linux/cache.h>
+#include "setup.h"
+
#if 0
#define DEBUGP printk
#else
@@ -269,33 +271,50 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ char *secstrings;
+ unsigned int i;
+
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
+
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- char *secstrings;
- unsigned int i;
+ const Elf_Shdr *sect;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
+ sect = find_section(hdr, sechdrs, "__bug_table");
+ if (sect != NULL) {
+ me->arch.bug_table = (void *) sect->sh_addr;
+ me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
- /*
+ /*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ba34001fca8..8dd1f0aae5d 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -22,6 +22,9 @@
#include <linux/vmalloc.h>
#include <asm/module.h>
#include <asm/uaccess.h>
+#include <asm/firmware.h>
+
+#include "setup.h"
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -400,6 +403,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
| (value & 0x03fffffc);
break;
+ case R_PPC64_REL64:
+ /* 64 bits relative (used by features fixups) */
+ *location = value - (unsigned long)location;
+ break;
+
default:
printk("%s: Unknown ADD relocation: %lu\n",
me->name,
@@ -413,23 +421,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
LIST_HEAD(module_bug_list);
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs, struct module *me)
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
{
char *secstrings;
unsigned int i;
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, struct module *me)
+{
+ const Elf_Shdr *sect;
+
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
+ sect = find_section(hdr, sechdrs, "__bug_table");
+ if (sect != NULL) {
+ me->arch.bug_table = (void *) sect->sh_addr;
+ me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
@@ -439,6 +457,19 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
list_add(&me->arch.bug_list, &module_bug_list);
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
+ sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(powerpc_firmware_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c
deleted file mode 100644
index e0dcf2b41fb..00000000000
--- a/arch/powerpc/kernel/perfmon_fsl_booke.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/* arch/powerpc/kernel/perfmon_fsl_booke.c
- * Freescale Book-E Performance Monitor code
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-#include <asm/pmc.h>
-
-static inline u32 get_pmlca(int ctr);
-static inline void set_pmlca(int ctr, u32 pmlca);
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);<