aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/vdso64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/vdso64')
-rw-r--r--arch/powerpc/kernel/vdso64/.gitignore1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile29
-rw-r--r--arch/powerpc/kernel/vdso64/cacheflush.S41
-rw-r--r--arch/powerpc/kernel/vdso64/getcpu.S45
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S184
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S27
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S227
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S6
8 files changed, 344 insertions, 216 deletions
diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore
index 3fd18cf9fec..77a0b423642 100644
--- a/arch/powerpc/kernel/vdso64/.gitignore
+++ b/arch/powerpc/kernel/vdso64/.gitignore
@@ -1 +1,2 @@
vdso64.lds
+vdso64.so.dbg
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 43af9b2a6f3..effca9404b1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -1,16 +1,18 @@
# List of files in the vdso, has to be asm only for now
-obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o
+obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
# Build rules
-targets := $(obj-vdso64) vdso64.so
+targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
-EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin
-EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
-EXTRA_AFLAGS := -D__VDSO64__ -s
+GCOV_PROFILE := n
+
+ccflags-y := -shared -fno-common -fno-builtin
+ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
@@ -20,9 +22,14 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
$(call if_changed,vdso64ld)
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S
$(call if_changed_dep,vdso64as)
@@ -33,4 +40,12 @@ quiet_cmd_vdso64ld = VDSO64L $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso64.so: $(obj)/vdso64.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+vdso_install: vdso64.so
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S
index 66a36d3cc6a..69c5af2b3c9 100644
--- a/arch/powerpc/kernel/vdso64/cacheflush.S
+++ b/arch/powerpc/kernel/vdso64/cacheflush.S
@@ -23,29 +23,46 @@
*
* Flushes the data cache & invalidate the instruction cache for the
* provided range [start, end[
- *
- * Note: all CPUs supported by this kernel have a 128 bytes cache
- * line size so we don't have to peek that info from the datapage
*/
V_FUNCTION_BEGIN(__kernel_sync_dicache)
.cfi_startproc
- li r5,127
- andc r6,r3,r5 /* round low to line bdy */
+ mflr r12
+ .cfi_register lr,r12
+ mr r11,r3
+ bl V_LOCAL_FUNC(__get_datapage)
+ mtlr r12
+ mr r10,r3
+
+ lwz r7,CFG_DCACHE_BLOCKSZ(r10)
+ addi r5,r7,-1
+ andc r6,r11,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
- srwi. r8,r8,7 /* compute line count */
+ lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
+ srw. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
- mr r3,r6
-1: dcbst 0,r3
- addi r3,r3,128
+1: dcbst 0,r6
+ add r6,r6,r7
bdnz 1b
sync
+
+/* Now invalidate the instruction cache */
+
+ lwz r7,CFG_ICACHE_BLOCKSZ(r10)
+ addi r5,r7,-1
+ andc r6,r11,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5
+ lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
+ srw. r8,r8,r9 /* compute line count */
+ crclr cr0*4+so
+ beqlr /* nothing to do? */
mtctr r8
-1: icbi 0,r6
- addi r6,r6,128
- bdnz 1b
+2: icbi 0,r6
+ add r6,r6,r7
+ bdnz 2b
isync
li r3,0
blr
diff --git a/arch/powerpc/kernel/vdso64/getcpu.S b/arch/powerpc/kernel/vdso64/getcpu.S
new file mode 100644
index 00000000000..23eb9a9441b
--- /dev/null
+++ b/arch/powerpc/kernel/vdso64/getcpu.S
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+#include <asm/vdso.h>
+
+ .text
+/*
+ * Exact prototype of getcpu
+ *
+ * int __kernel_getcpu(unsigned *cpu, unsigned *node);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_getcpu)
+ .cfi_startproc
+ mfspr r5,SPRN_SPRG_VDSO_READ
+ cmpdi cr0,r3,0
+ cmpdi cr1,r4,0
+ clrlwi r6,r5,16
+ rlwinm r7,r5,16,31-15,31-0
+ beq cr0,1f
+ stw r6,0(r3)
+1: beq cr1,2f
+ stw r7,0(r4)
+2: crclr cr0*4+so
+ li r3,0 /* always success */
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_getcpu)
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 56e76ff5498..a76b4af37ef 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -1,5 +1,4 @@
-
- /*
+/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* ppc64 kernel for use in the vDSO
*
@@ -32,19 +31,14 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
mr r11,r3 /* r11 holds tv */
mr r10,r4 /* r10 holds tz */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- std r5,TVAL64_TV_SEC(r11) /* store sec in tv */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r0,r0,44,20
- cmpldi cr0,r10,0 /* check if tz is NULL */
- std r0,TVAL64_TV_USEC(r11) /* store usec in tv */
+ cmpldi r11,0 /* check if tv is NULL */
+ beq 2f
+ lis r7,1000000@ha /* load up USEC_PER_SEC */
+ addi r7,r7,1000000@l
+ bl V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
+ std r4,TVAL64_TV_SEC(r11) /* store sec in tv */
+ std r5,TVAL64_TV_USEC(r11) /* store usec in tv */
+2: cmpldi r10,0 /* check if tz is NULL */
beq 1f
lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
lwz r5,CFG_TZ_DSTTIME(r3)
@@ -74,90 +68,51 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
- mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- beq cr1,50f /* if monotonic -> jump there */
-
- /*
- * CLOCK_REALTIME
- */
-
- bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r0,r0,44,20
- mulli r0,r0,1000 /* nsec = usec * 1000 */
- std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
-
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
+ lis r7,NSEC_PER_SEC@h /* want nanoseconds */
+ ori r7,r7,NSEC_PER_SEC@l
+50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
+ bne cr1,80f /* if not monotonic, all done */
/*
* CLOCK_MONOTONIC
*/
-50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r6,r0,44,20
- mulli r6,r6,1000 /* nsec = usec * 1000 */
-
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
- * have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r5,r6 contain our sec/nsec values.
- * can be used
+ * have the counter value in r8 that was returned by __do_get_tspec.
+ * At this point, r4,r5 contain our sec/nsec values.
*/
- lwa r4,WTOM_CLOCK_SEC(r3)
- lwa r7,WTOM_CLOCK_NSEC(r3)
+ lwa r6,WTOM_CLOCK_SEC(r3)
+ lwa r9,WTOM_CLOCK_NSEC(r3)
- /* We now have our result in r4,r7. We create a fake dependency
+ /* We now have our result in r6,r9. We create a fake dependency
* on that result and re-check the counter
*/
- or r9,r4,r7
- xor r0,r9,r9
+ or r0,r6,r9
+ xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
cmpld cr0,r0,r8 /* check if updated */
bne- 50b
- /* Calculate and store result. Note that this mimmics the C code,
- * which may cause funny results if nsec goes negative... is that
- * possible at all ?
+ /* Add wall->monotonic offset and check for overflow or underflow.
*/
- add r4,r4,r5
- add r7,r7,r6
- lis r9,NSEC_PER_SEC@h
- ori r9,r9,NSEC_PER_SEC@l
- cmpl cr0,r7,r9
- cmpli cr1,r7,0
+ add r4,r4,r6
+ add r5,r5,r9
+ cmpd cr0,r5,r7
+ cmpdi cr1,r5,0
blt 1f
- subf r7,r9,r7
+ subf r5,r7,r5
addi r4,r4,1
-1: bge cr1,1f
+1: bge cr1,80f
addi r4,r4,-1
- add r7,r7,r9
-1: std r4,TSPC64_TV_SEC(r11)
- std r7,TSPC64_TV_NSEC(r11)
+ add r5,r5,r7
+
+80: std r4,TSPC64_TV_SEC(r11)
+ std r5,TSPC64_TV_NSEC(r11)
mtlr r12
crclr cr0*4+so
@@ -167,10 +122,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/*
* syscall fallback
*/
-98:
- mtlr r12
- mr r3,r10
- mr r4,r11
99:
li r0,__NR_clock_gettime
sc
@@ -213,14 +164,44 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
.cfi_endproc
V_FUNCTION_END(__kernel_clock_getres)
+/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+
+ mr r11,r3 /* r11 holds t */
+ bl V_LOCAL_FUNC(__get_datapage)
+
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+
+ cmpldi r11,0 /* check if t is NULL */
+ beq 2f
+ std r4,0(r11) /* store result at *t */
+2: mtlr r12
+ crclr cr0*4+so
+ mr r3,r4
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_time)
+
/*
- * This is the core of gettimeofday(), it returns the xsec
- * value in r4 and expects the datapage ptr (non clobbered)
- * in r3. clobbers r0,r4,r5,r6,r7,r8
- * When returning, r8 contains the counter value that can be reused
+ * This is the core of clock_gettime() and gettimeofday(),
+ * it returns the current time in r4 (seconds) and r5.
+ * On entry, r7 gives the resolution of r5, either USEC_PER_SEC
+ * or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
+ * It expects the datapage ptr in r3 and doesn't clobber it.
+ * It clobbers r0, r6 and r9.
+ * On return, r8 contains the counter value that can be reused.
+ * This clobbers cr0 but not any other cr field.
*/
-V_FUNCTION_BEGIN(__do_get_xsec)
+V_FUNCTION_BEGIN(__do_get_tspec)
.cfi_startproc
/* check for update count & load values */
1: ld r8,CFG_TB_UPDATE_COUNT(r3)
@@ -229,24 +210,35 @@ V_FUNCTION_BEGIN(__do_get_xsec)
xor r0,r8,r8 /* create dependency */
add r3,r3,r0
- /* Get TB & offset it */
- mftb r7
+ /* Get TB & offset it. We use the MFTB macro which will generate
+ * workaround code for Cell.
+ */
+ MFTB(r6)
ld r9,CFG_TB_ORIG_STAMP(r3)
- subf r7,r9,r7
+ subf r6,r9,r6
/* Scale result */
ld r5,CFG_TB_TO_XS(r3)
- mulhdu r7,r7,r5
+ sldi r6,r6,12 /* compute time since stamp_xtime */
+ mulhdu r6,r6,r5 /* in units of 2^-32 seconds */
/* Add stamp since epoch */
- ld r6,CFG_STAMP_XSEC(r3)
- add r4,r6,r7
-
- xor r0,r4,r4
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+ lwz r5,STAMP_SEC_FRAC(r3)
+ or r0,r4,r5
+ or r0,r0,r6
+ xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
- cmpld cr0,r0,r8 /* check if updated */
- bne- 1b
+ cmpld r0,r8 /* check if updated */
+ bne- 1b /* reload if so */
+
+ /* convert to seconds & nanoseconds and add to stamp */
+ add r6,r6,r5 /* add on fractional seconds of xtime */
+ mulhwu r5,r6,r7 /* compute micro or nanoseconds and */
+ srdi r6,r6,32 /* seconds since stamp_xtime */
+ clrldi r5,r5,32
+ add r4,r4,r6
blr
.cfi_endproc
-V_FUNCTION_END(__do_get_xsec)
+V_FUNCTION_END(__do_get_tspec)
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 17a83fa6dc5..542c6f422e4 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -20,7 +20,7 @@
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
- call instruction. Since we don't have a call here, we artifically
+ call instruction. Since we don't have a call here, we artificially
extend the range covered by the unwind info by padding before the
real start. */
nop
@@ -134,13 +134,23 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
9:
/* This is where the pt_regs pointer can be found on the stack. */
-#define PTREGS 128+168+56
+#define PTREGS 128+168+56
/* Size of regs. */
-#define RSIZE 8
+#define RSIZE 8
+
+/* Size of CR reg in DWARF unwind info. */
+#define CRSIZE 4
+
+/* Offset of CR reg within a full word. */
+#ifdef __LITTLE_ENDIAN__
+#define CROFF 0
+#else
+#define CROFF (RSIZE - CRSIZE)
+#endif
/* This is the offset of the VMX reg pointer. */
-#define VREGS 48*RSIZE+33*8
+#define VREGS 48*RSIZE+33*8
/* Describe where general purpose regs are saved. */
#define EH_FRAME_GEN \
@@ -178,7 +188,14 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
rsave (31, 31*RSIZE); \
rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \
rsave (65, 36*RSIZE); /* lr */ \
- rsave (70, 38*RSIZE) /* cr */
+ rsave (68, 38*RSIZE + CROFF); /* cr fields */ \
+ rsave (69, 38*RSIZE + CROFF); \
+ rsave (70, 38*RSIZE + CROFF); \
+ rsave (71, 38*RSIZE + CROFF); \
+ rsave (72, 38*RSIZE + CROFF); \
+ rsave (73, 38*RSIZE + CROFF); \
+ rsave (74, 38*RSIZE + CROFF); \
+ rsave (75, 38*RSIZE + CROFF)
/* Describe where the FP regs are saved. */
#define EH_FRAME_FP \
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960..64fb183a47c 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -4,96 +4,130 @@
*/
#include <asm/vdso.h>
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
+#else
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
+#endif
OUTPUT_ARCH(powerpc:common64)
ENTRY(_start)
SECTIONS
{
- . = VDSO64_LBASE + SIZEOF_HEADERS;
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
-
- . = ALIGN (16);
- .text :
- {
- *(.text .stub .text.* .gnu.linkonce.t.*)
- *(.sfpr .glink)
- } :text
- PROVIDE (__etext = .);
- PROVIDE (_etext = .);
- PROVIDE (etext = .);
-
- /* Other stuff is appended to the text segment: */
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
- .rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
- .gcc_except_table : { *(.gcc_except_table) }
-
- .opd ALIGN(8) : { KEEP (*(.opd)) }
- .got ALIGN(8) : { *(.got .toc) }
- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
-
- .dynamic : { *(.dynamic) } :text :dynamic
-
- _end = .;
- PROVIDE (end = .);
-
- /* Stabs debugging sections are here too
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
- /* DWARF debug sectio/ns.
- Symbols in the DWARF debugging sections are relative to the beginning
- of the section so we begin them at 0. */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
-
- /DISCARD/ : { *(.note.GNU-stack) }
- /DISCARD/ : { *(.branch_lt) }
- /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.*) }
- /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) }
+ . = VDSO64_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+ .text : {
+ *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
+ *(.sfpr .glink)
+ } :text
+ PROVIDE(__etext = .);
+ PROVIDE(_etext = .);
+ PROVIDE(etext = .);
+
+ . = ALIGN(8);
+ __ftr_fixup : { *(__ftr_fixup) }
+
+ . = ALIGN(8);
+ __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+
+ . = ALIGN(8);
+ __lwsync_fixup : { *(__lwsync_fixup) }
+
+ . = ALIGN(8);
+ __fw_ftr_fixup : { *(__fw_ftr_fixup) }
+
+ /*
+ * Other stuff is appended to the text segment:
+ */
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .gcc_except_table : { *(.gcc_except_table) }
+ .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+
+ .opd ALIGN(8) : { KEEP (*(.opd)) }
+ .got ALIGN(8) : { *(.got .toc) }
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /*
+ * Stabs debugging sections are here too.
+ */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+
+ /*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to the beginning
+ * of the section so we begin them at 0.
+ */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.branch_lt)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
}
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
PHDRS
{
- text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
- note PT_NOTE FLAGS(4); /* PF_R */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
@@ -101,17 +135,24 @@ PHDRS
*/
VERSION
{
- VDSO_VERSION_STRING {
- global:
- __kernel_datapage_offset; /* Has to be there for the kernel to find */
- __kernel_get_syscall_map;
- __kernel_gettimeofday;
- __kernel_clock_gettime;
- __kernel_clock_getres;
- __kernel_get_tbfreq;
- __kernel_sync_dicache;
- __kernel_sync_dicache_p5;
- __kernel_sigtramp_rt64;
- local: *;
- };
+ VDSO_VERSION_STRING {
+ global:
+ /*
+ * Has to be there for the kernel to find
+ */
+ __kernel_datapage_offset;
+
+ __kernel_get_syscall_map;
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+ __kernel_get_tbfreq;
+ __kernel_sync_dicache;
+ __kernel_sync_dicache_p5;
+ __kernel_sigtramp_rt64;
+ __kernel_getcpu;
+ __kernel_time;
+
+ local: *;
+ };
}
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 0529cb9e3b9..df60fca6a13 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,12 +1,12 @@
-#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/page.h>
- .section ".data.page_aligned"
+ __PAGE_ALIGNED_DATA
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so"
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end: