aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/gate-data.S2
-rw-r--r--arch/ia64/kernel/gate.S8
-rw-r--r--arch/ia64/kernel/gate.lds.S10
-rw-r--r--arch/ia64/kernel/init_task.c2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/minstate.h4
-rw-r--r--arch/ia64/kernel/msi_ia64.c2
-rw-r--r--arch/ia64/kernel/paravirtentry.S2
-rw-r--r--arch/ia64/kernel/perfmon.c37
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smpboot.c19
-rw-r--r--arch/ia64/kernel/time.c7
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S362
16 files changed, 239 insertions, 231 deletions
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index ab9b03a9adc..ceeffc50976 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r
$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
-# gate-data.o contains the gate DSO image as data in section .data.gate.
+# gate-data.o contains the gate DSO image as data in section .data..gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/gate-data.o: $(obj)/gate.so
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9a260b317d8..244704a174d 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1768,6 +1768,9 @@ sys_call_table:
data8 sys_pwritev // 1320
data8 sys_rt_tgsigqueueinfo
data8 sys_recvmmsg
+ data8 sys_fanotify_init
+ data8 sys_fanotify_mark
+ data8 sys_prlimit64 // 1325
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S
index 258c0a3238f..b3ef1c72e13 100644
--- a/arch/ia64/kernel/gate-data.S
+++ b/arch/ia64/kernel/gate-data.S
@@ -1,3 +1,3 @@
- .section .data.gate, "aw"
+ .section .data..gate, "aw"
.incbin "arch/ia64/kernel/gate.so"
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index cf5e0a105e1..245d3e1ec7e 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -21,18 +21,18 @@
* to targets outside the shared object) and to avoid multi-phase kernel builds, we
* simply create minimalistic "patch lists" in special ELF sections.
*/
- .section ".data.patch.fsyscall_table", "a"
+ .section ".data..patch.fsyscall_table", "a"
.previous
#define LOAD_FSYSCALL_TABLE(reg) \
[1:] movl reg=0; \
- .xdata4 ".data.patch.fsyscall_table", 1b-.
+ .xdata4 ".data..patch.fsyscall_table", 1b-.
- .section ".data.patch.brl_fsys_bubble_down", "a"
+ .section ".data..patch.brl_fsys_bubble_down", "a"
.previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \
;; \
- .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
+ .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-.
GLOBAL_ENTRY(__kernel_syscall_via_break)
.prologue
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index 88c64ed47c3..d32b0855110 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -33,21 +33,21 @@ SECTIONS
*/
. = GATE_ADDR + 0x600;
- .data.patch : {
+ .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .;
- *(.data.patch.mckinley_e9)
+ *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .;
- *(.data.patch.vtop)
+ *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .;
- *(.data.patch.fsyscall_table)
+ *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
- *(.data.patch.brl_fsys_bubble_down)
+ *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index e253ab8fcbc..f9efe9739d3 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* Initial task structure.
*
* We need to make sure that this is properly aligned due to the way process stacks are
- * handled. This is done by having a special ".data.init_task" section...
+ * handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 179fd122e83..d93e396bf59 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -82,7 +82,7 @@
mov r19=n;; /* prepare to save predicates */ \
br.sptk.many dispatch_to_fault_handler
- .section .text.ivt,"ax"
+ .section .text..ivt,"ax"
.align 32768 // align on 32KB boundary
.global ia64_ivt
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 292e214a3b8..d56753a1163 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -16,7 +16,7 @@
#define ACCOUNT_SYS_ENTER
#endif
-.section ".data.patch.rse", "a"
+.section ".data..patch.rse", "a"
.previous
/*
@@ -215,7 +215,7 @@
(pUStk) extr.u r17=r18,3,6; \
(pUStk) sub r16=r18,r22; \
[1:](pKStk) br.cond.sptk.many 1f; \
- .xdata4 ".data.patch.rse",1b-. \
+ .xdata4 ".data..patch.rse",1b-. \
;; \
cmp.ge p6,p7 = 33,r17; \
;; \
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 6c892285604..4a746ea838f 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
if (irq_prepare_move(irq, cpu))
return -1;
- read_msi_msg(irq, &msg);
+ get_cached_msi_msg(irq, &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DEST_ID_MASK;
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
index 6158560d7f1..92d880c4d3d 100644
--- a/arch/ia64/kernel/paravirtentry.S
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -28,7 +28,7 @@
#include "entry.h"
#define DATA8(sym, init_value) \
- .pushsection .data.read_mostly ; \
+ .pushsection .data..read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index ab985f785c1..cce050e85c7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1696,8 +1696,8 @@ pfm_poll(struct file *filp, poll_table * wait)
return mask;
}
-static int
-pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long
+pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
DPRINT(("pfm_ioctl called\n"));
return -EINVAL;
@@ -2174,15 +2174,15 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
static const struct file_operations pfm_file_ops = {
- .llseek = no_llseek,
- .read = pfm_read,
- .write = pfm_write,
- .poll = pfm_poll,
- .ioctl = pfm_ioctl,
- .open = pfm_no_open, /* special open code to disallow open via /proc */
- .fasync = pfm_fasync,
- .release = pfm_close,
- .flush = pfm_flush
+ .llseek = no_llseek,
+ .read = pfm_read,
+ .write = pfm_write,
+ .poll = pfm_poll,
+ .unlocked_ioctl = pfm_ioctl,
+ .open = pfm_no_open, /* special open code to disallow open via /proc */
+ .fasync = pfm_fasync,
+ .release = pfm_close,
+ .flush = pfm_flush
};
static int
@@ -2191,8 +2191,15 @@ pfmfs_delete_dentry(struct dentry *dentry)
return 1;
}
+static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
+ dentry->d_inode->i_ino);
+}
+
static const struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
+ .d_dname = pfmfs_dname,
};
@@ -2202,8 +2209,7 @@ pfm_alloc_file(pfm_context_t *ctx)
struct file *file;
struct inode *inode;
struct path path;
- char name[32];
- struct qstr this;
+ struct qstr this = { .name = "" };
/*
* allocate a new inode
@@ -2218,11 +2224,6 @@ pfm_alloc_file(pfm_context_t *ctx)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- sprintf(name, "[%lu]", inode->i_ino);
- this.name = name;
- this.len = strlen(name);
- this.hash = inode->i_ino;
-
/*
* allocate a new dcache entry
*/
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 53f1648c8b8..a879c03b7f1 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -633,7 +633,7 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
}
long
-sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
+sys_execve (const char __user *filename, char __user * __user *argv, char __user * __user *envp,
struct pt_regs *regs)
{
char *fname;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 41ae6a596b5..8fb958abf8d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -98,12 +98,6 @@ static struct resource bss_resource = {
unsigned long ia64_max_cacheline_size;
-int dma_get_cache_alignment(void)
-{
- return ia64_max_cacheline_size;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
-
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6a1380e90f8..d003b502a43 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -390,13 +390,11 @@ smp_callin (void)
fix_b0_for_bsp();
-#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
set_numa_node(cpu_to_node_map[cpuid]);
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
-#endif
ipi_call_lock_irq();
spin_lock(&vector_lock);
@@ -510,21 +508,18 @@ do_boot_cpu (int sapicid, int cpu)
.done = COMPLETION_INITIALIZER(c_idle.done),
};
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
init_idle(c_idle.idle, cpu);
goto do_rest;
}
- /*
- * We can't use kernel_thread since we must avoid to reschedule the child.
- */
- if (!keventd_up() || current_is_keventd())
- c_idle.work.func(&c_idle.work);
- else {
- schedule_work(&c_idle.work);
- wait_for_completion(&c_idle.done);
- }
+ schedule_work(&c_idle.work);
+ wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
@@ -640,9 +635,7 @@ void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callin_map);
-#ifdef CONFIG_NUMA
set_numa_node(cpu_to_node_map[smp_processor_id()]);
-#endif
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 653b3c46ea8..ed6f22eb5b1 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -471,7 +471,8 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
+void update_vsyscall(struct timespec *wall, struct timespec *wtm,
+ struct clocksource *c, u32 mult)
{
unsigned long flags;
@@ -487,9 +488,9 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
- fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
+ fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
+ wall->tv_sec;
- fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
+ fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
+ wall->tv_nsec;
/* normalize */
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 1295ba327f6..5a4d044dcb1 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -6,204 +6,209 @@
#include <asm-generic/vmlinux.lds.h>
-#define IVT_TEXT \
- VMLINUX_SYMBOL(__start_ivt_text) = .; \
- *(.text.ivt) \
- VMLINUX_SYMBOL(__end_ivt_text) = .;
-
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
ENTRY(phys_start)
jiffies = jiffies_64;
+
PHDRS {
- code PT_LOAD;
- percpu PT_LOAD;
- data PT_LOAD;
- note PT_NOTE;
- unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
+ code PT_LOAD;
+ percpu PT_LOAD;
+ data PT_LOAD;
+ note PT_NOTE;
+ unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
}
-SECTIONS
-{
- /* unwind exit sections must be discarded before the rest of the
- sections get included. */
- /DISCARD/ : {
- *(.IA_64.unwind.exit.text)
- *(.IA_64.unwind_info.exit.text)
- *(.comment)
- *(.note)
- }
-
- v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
- phys_start = _start - LOAD_OFFSET;
-
- code : { } :code
- . = KERNEL_START;
-
- _text = .;
- _stext = .;
-
- .text : AT(ADDR(.text) - LOAD_OFFSET)
- {
- IVT_TEXT
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.gnu.linkonce.t*)
- }
- .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
- { *(.text2) }
-#ifdef CONFIG_SMP
- .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
- { *(.text.lock) }
-#endif
- _etext = .;
- /* Read-only data */
+SECTIONS {
+ /*
+ * unwind exit sections must be discarded before
+ * the rest of the sections get included.
+ */
+ /DISCARD/ : {
+ *(.IA_64.unwind.exit.text)
+ *(.IA_64.unwind_info.exit.text)
+ *(.comment)
+ *(.note)
+ }
- NOTES :code :note /* put .notes in text and mark in PT_NOTE */
- code_continues : {} :code /* switch back to regular program... */
+ v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
+ phys_start = _start - LOAD_OFFSET;
+
+ code : {
+ } :code
+ . = KERNEL_START;
+
+ _text = .;
+ _stext = .;
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ __start_ivt_text = .;
+ *(.text..ivt)
+ __end_ivt_text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.gnu.linkonce.t*)
+ }
- EXCEPTION_TABLE(16)
+ .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
+ *(.text2)
+ }
- /* MCA table */
- . = ALIGN(16);
- __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
- {
- __start___mca_table = .;
- *(__mca_table)
- __stop___mca_table = .;
+#ifdef CONFIG_SMP
+ .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
+ *(.text..lock)
+ }
+#endif
+ _etext = .;
+
+ /*
+ * Read-only data
+ */
+ NOTES :code :note /* put .notes in text and mark in PT_NOTE */
+ code_continues : {
+ } : code /* switch back to regular program... */
+
+ EXCEPTION_TABLE(16)
+
+ /* MCA table */
+ . = ALIGN(16);
+ __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
+ __start___mca_table = .;
+ *(__mca_table)
+ __stop___mca_table = .;
}
- .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
- {
- __start___phys_stack_reg_patchlist = .;
- *(.data.patch.phys_stack_reg)
- __end___phys_stack_reg_patchlist = .;
+ .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
+ __start___phys_stack_reg_patchlist = .;
+ *(.data..patch.phys_stack_reg)
+ __end___phys_stack_reg_patchlist = .;
}
- /* Global data */
- _data = .;
+ /*
+ * Global data
+ */
+ _data = .;
- /* Unwind info & table: */
- . = ALIGN(8);
- .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
- { *(.IA_64.unwind_info*) }
- .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
- {
- __start_unwind = .;
- *(.IA_64.unwind*)
- __end_unwind = .;
+ /* Unwind info & table: */
+ . = ALIGN(8);
+ .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
+ *(.IA_64.unwind_info*)
+ }
+ .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
+ __start_unwind = .;
+ *(.IA_64.unwind*)
+ __end_unwind = .;
} :code :unwind
- code_continues2 : {} : code
+ code_continues2 : {
+ } : code
- RODATA
+ RODATA
- .opd : AT(ADDR(.opd) - LOAD_OFFSET)
- { *(.opd) }
-
- /* Initialization code and data: */
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
+ *(.opd)
+ }
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
+ /*
+ * Initialization code and data:
+ */
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(16)
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
- .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
- {
- __start___vtop_patchlist = .;
- *(.data.patch.vtop)
- __end___vtop_patchlist = .;
+ .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
+ __start___vtop_patchlist = .;
+ *(.data..patch.vtop)
+ __end___vtop_patchlist = .;
}
- .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
- {
- __start___rse_patchlist = .;
- *(.data.patch.rse)
- __end___rse_patchlist = .;
+ .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
+ __start___rse_patchlist = .;
+ *(.data..patch.rse)
+ __end___rse_patchlist = .;
}
- .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
- {
- __start___mckinley_e9_bundles = .;
- *(.data.patch.mckinley_e9)
- __end___mckinley_e9_bundles = .;
+ .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
+ __start___mckinley_e9_bundles = .;
+ *(.data..patch.mckinley_e9)
+ __end___mckinley_e9_bundles = .;
}
#if defined(CONFIG_PARAVIRT)
- . = ALIGN(16);
- .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
- {
- __start_paravirt_bundles = .;
- *(.paravirt_bundles)
- __stop_paravirt_bundles = .;
- }
- . = ALIGN(16);
- .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
- {
- __start_paravirt_insts = .;
- *(.paravirt_insts)
- __stop_paravirt_insts = .;
- }
- . = ALIGN(16);
- .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
- {
- __start_paravirt_branches = .;
- *(.paravirt_branches)
- __stop_paravirt_branches = .;
+ . = ALIGN(16);
+ .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
+ __start_paravirt_bundles = .;
+ *(.paravirt_bundles)
+ __stop_paravirt_bundles = .;
+ }
+ . = ALIGN(16);
+ .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
+ __start_paravirt_insts = .;
+ *(.paravirt_insts)
+ __stop_paravirt_insts = .;
+ }
+ . = ALIGN(16);
+ .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
+ __start_paravirt_branches = .;
+ *(.paravirt_branches)
+ __stop_paravirt_branches = .;
}
#endif
#if defined(CONFIG_IA64_GENERIC)
- /* Machine Vector */
- . = ALIGN(16);
- .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
- {
- machvec_start = .;
- *(.machvec)
- machvec_end = .;
+ /* Machine Vector */
+ . = ALIGN(16);
+ .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
+ machvec_start = .;
+ *(.machvec)
+ machvec_end = .;
}
#endif
#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ __cpu0_per_cpu = .;
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
- {
- PAGE_ALIGNED_DATA(PAGE_SIZE)
- . = ALIGN(PAGE_SIZE);
- __start_gate_section = .;
- *(.data.gate)
- __stop_gate_section = .;
+ .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
+ __start_gate_section = .;
+ *(.data..gate)
+ __stop_gate_section = .;
#ifdef CONFIG_XEN
- . = ALIGN(PAGE_SIZE);
- __xen_start_gate_section = .;
- *(.data.gate.xen)
- __xen_stop_gate_section = .;
+ . = ALIGN(PAGE_SIZE);
+ __xen_start_gate_section = .;
+ *(.data..gate.xen)
+ __xen_stop_gate_section = .;
#endif
}
- . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
- * kernel data
- */
-
- /* Per-cpu data: */
- . = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(PERCPU_ADDR, :percpu)
- __phys_per_cpu_start = __per_cpu_load;
- . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
- * into percpu page size
- */
-
- data : { } :data
- .data : AT(ADDR(.data) - LOAD_OFFSET)
- {
+ /*
+ * make sure the gate page doesn't expose
+ * kernel data
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+ __phys_per_cpu_start = __per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+ */
+ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
+
+ data : {
+ } :data
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
@@ -213,26 +218,37 @@ SECTIONS
CONSTRUCTORS
}
- . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
- .got : AT(ADDR(.got) - LOAD_OFFSET)
- { *(.got.plt) *(.got) }
- __gp = ADDR(.got) + 0x200000;
- /* We want the small data sections together, so single-instruction offsets
- can access them all, and initialized data all before uninitialized, so
- we can shorten the on-disk segment size. */
- .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
- { *(.sdata) *(.sdata1) *(.srdata) }
- _edata = .;
+ . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
+ .got : AT(ADDR(.got) - LOAD_OFFSET) {
+ *(.got.plt)
+ *(.got)
+ }
+ __gp = ADDR(.got) + 0x200000;
+
+ /*
+ * We want the small data sections together,
+ * so single-instruction offsets can access
+ * them all, and initialized data all before
+ * uninitialized, so we can shorten the
+ * on-disk segment size.
+ */
+ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
+ *(.sdata)
+ *(.sdata1)
+ *(.srdata)
+ }
+ _edata = .;
- BSS_SECTION(0, 0, 0)
+ BSS_SECTION(0, 0, 0)
- _end = .;
+ _end = .;
- code : { } :code
+ code : {
+ } :code
- STABS_DEBUG
- DWARF_DEBUG
+ STABS_DEBUG
+ DWARF_DEBUG
- /* Default discards */
- DISCARDS
+ /* Default discards */
+ DISCARDS
}