aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel/vmlinux.lds.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/vmlinux.lds.S')
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S377
1 files changed, 208 insertions, 169 deletions
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 6ae9ebbd8e5..c9eec84aa25 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -1,186 +1,209 @@
/*
- * File: arch/blackfin/kernel/vmlinux.lds.S
- * Based on: none - original work
- * Author:
+ * Copyright 2004-2009 Analog Devices Inc.
*
- * Created: Tue Sep 21 2004
- * Description: Master linker script for blackfin architecture
- *
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later
*/
-#define VMLINUX_SYMBOL(_sym_) _##_sym_
-
#include <asm-generic/vmlinux.lds.h>
#include <asm/mem_map.h>
-
+#include <asm/page.h>
+#include <asm/thread_info.h>
OUTPUT_FORMAT("elf32-bfin")
ENTRY(__start)
_jiffies = _jiffies_64;
-MEMORY
-{
- ram : ORIGIN = CONFIG_BOOT_LOAD, LENGTH = (CONFIG_MEM_SIZE * 1024 * 1024) - (CONFIG_BOOT_LOAD)
- l1_data_a : ORIGIN = L1_DATA_A_START, LENGTH = L1_DATA_A_LENGTH
- l1_data_b : ORIGIN = L1_DATA_B_START, LENGTH = L1_DATA_B_LENGTH
- l1_code : ORIGIN = L1_CODE_START, LENGTH = L1_CODE_LENGTH
- l1_scratch : ORIGIN = L1_SCRATCH_START, LENGTH = L1_SCRATCH_LENGTH
-}
-
SECTIONS
{
+#ifdef CONFIG_RAMKERNEL
. = CONFIG_BOOT_LOAD;
+#else
+ . = CONFIG_ROM_BASE;
+#endif
+ /* Neither the text, ro_data or bss section need to be aligned
+ * So pack them back to back
+ */
.text :
{
- _text = .;
- __stext = .;
- *(.text)
+ __text = .;
+ _text = .;
+ __stext = .;
+ TEXT_TEXT
+#ifndef CONFIG_SCHEDULE_L1
SCHED_TEXT
- *(.text.lock)
- . = ALIGN(16);
- ___start___ex_table = .;
- *(__ex_table)
- ___stop___ex_table = .;
-
- *($code)
- *(.rodata)
- *(.rodata.*)
- *(__vermagic) /* Kernel version magic */
- *(.rodata1)
+#endif
+ LOCK_TEXT
+ IRQENTRY_TEXT
+ KPROBES_TEXT
+#ifdef CONFIG_ROMKERNEL
+ __sinittext = .;
+ INIT_TEXT
+ __einittext = .;
+ EXIT_TEXT
+#endif
+ *(.text.*)
*(.fixup)
- *(.spinlock.text)
- /* Kernel symbol table: Normal symbols */
- . = ALIGN(4);
- ___start___ksymtab = .;
- *(__ksymtab)
- ___stop___ksymtab = .;
-
- /* Kernel symbol table: GPL-only symbols */
- ___start___ksymtab_gpl = .;
- *(__ksymtab_gpl)
- ___stop___ksymtab_gpl = .;
-
- /* Kernel symbol table: Normal unused symbols */ \
- ___start___ksymtab_unused = .;
- *(__ksymtab_unused)
- ___stop___ksymtab_unused = .;
-
- /* Kernel symbol table: GPL-only unused symbols */
- ___start___ksymtab_unused_gpl = .;
- *(__ksymtab_unused_gpl)
- ___stop___ksymtab_unused_gpl = .;
-
-
- /* Kernel symbol table: GPL-future symbols */
- ___start___ksymtab_gpl_future = .;
- *(__ksymtab_gpl_future)
- ___stop___ksymtab_gpl_future = .;
-
- /* Kernel symbol table: Normal symbols */
- ___start___kcrctab = .;
- *(__kcrctab)
- ___stop___kcrctab = .;
-
- /* Kernel symbol table: GPL-only symbols */
- ___start___kcrctab_gpl = .;
- *(__kcrctab_gpl)
- ___stop___kcrctab_gpl = .;
-
- /* Kernel symbol table: GPL-future symbols */
- ___start___kcrctab_gpl_future = .;
- *(__kcrctab_gpl_future)
- ___stop___kcrctab_gpl_future = .;
-
- /* Kernel symbol table: strings */
- *(__ksymtab_strings)
-
- . = ALIGN(4);
+#if !L1_CODE_LENGTH
+ *(.l1.text)
+#endif
__etext = .;
- } > ram
+ }
+
+ EXCEPTION_TABLE(4)
+ NOTES
- .init :
+ /* Just in case the first read only is a 32-bit access */
+ RO_DATA(4)
+ __rodata_end = .;
+
+#ifdef CONFIG_ROMKERNEL
+ . = CONFIG_BOOT_LOAD;
+ .bss : AT(__rodata_end)
+#else
+ .bss :
+#endif
{
- . = ALIGN(4096);
- ___init_begin = .;
- __sinittext = .;
- *(.init.text)
- __einittext = .;
- *(.init.data)
- . = ALIGN(16);
- ___setup_start = .;
- *(.init.setup)
- ___setup_end = .;
- ___start___param = .;
- *(__param)
- ___stop___param = .;
- ___initcall_start = .;
- INITCALLS
- ___initcall_end = .;
- ___con_initcall_start = .;
- *(.con_initcall.init)
- ___con_initcall_end = .;
- ___security_initcall_start = .;
- *(.security_initcall.init)
- ___security_initcall_end = .;
. = ALIGN(4);
- ___initramfs_start = .;
- *(.init.ramfs)
- ___initramfs_end = .;
+ ___bss_start = .;
+ *(.bss .bss.*)
+ *(COMMON)
+#if !L1_DATA_A_LENGTH
+ *(.l1.bss)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.bss.B)
+#endif
. = ALIGN(4);
- ___init_end = .;
- } > ram
+ ___bss_stop = .;
+ }
+
+#if defined(CONFIG_ROMKERNEL)
+ .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
+#else
+ .data :
+#endif
+ {
+ __sdata = .;
+ /* This gets done first, so the glob doesn't suck it in */
+ CACHELINE_ALIGNED_DATA(32)
+
+#if !L1_DATA_A_LENGTH
+ . = ALIGN(32);
+ *(.data_l1.cacheline_aligned)
+ *(.l1.data)
+#endif
+#if !L1_DATA_B_LENGTH
+ *(.l1.data.B)
+#endif
+#if !L2_LENGTH
+ . = ALIGN(32);
+ *(.data_l2.cacheline_aligned)
+ *(.l2.data)
+#endif
+
+ DATA_DATA
+ CONSTRUCTORS
+
+ INIT_TASK_DATA(THREAD_SIZE)
- __l1_lma_start = .;
+ __edata = .;
+ }
+ __data_lma = LOADADDR(.data);
+ __data_len = SIZEOF(.data);
+
+ /* The init section should be last, so when we free it, it goes into
+ * the general memory pool, and (hopefully) will decrease fragmentation
+ * a tiny bit. The init section has a _requirement_ that it be
+ * PAGE_SIZE aligned
+ */
+ . = ALIGN(PAGE_SIZE);
+ ___init_begin = .;
+
+#ifdef CONFIG_RAMKERNEL
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ /* We have to discard exit text and such at runtime, not link time, to
+ * handle embedded cross-section references (alt instructions, bug
+ * table, eh_frame, etc...). We need all of our .text up front and
+ * .data after it for PCREL call issues.
+ */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
- .text_l1 :
+ . = ALIGN(16);
+ INIT_DATA_SECTION(16)
+ PERCPU_SECTION(32)
+
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+
+ .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
+#else
+ .init.data : AT(__data_lma + __data_len + 32)
+ {
+ __sinitdata = .;
+ INIT_DATA
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+
+ . = ALIGN(PAGE_SIZE);
+ ___per_cpu_load = .;
+ PERCPU_INPUT(32)
+
+ EXIT_DATA
+ __einitdata = .;
+ }
+ __init_data_lma = LOADADDR(.init.data);
+ __init_data_len = SIZEOF(.init.data);
+ __init_data_end = .;
+
+ .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
+#endif
{
. = ALIGN(4);
- __stext_l1 = .;
+ __stext_l1 = .;
+ *(.l1.text.head)
*(.l1.text)
-
+#ifdef CONFIG_SCHEDULE_L1
+ SCHED_TEXT
+#endif
. = ALIGN(4);
- __etext_l1 = .;
- } > l1_code AT > ram
+ __etext_l1 = .;
+ }
+ __text_l1_lma = LOADADDR(.text_l1);
+ __text_l1_len = SIZEOF(.text_l1);
+ ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
- .data_l1 :
+ .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
{
. = ALIGN(4);
- __sdata_l1 = .;
+ __sdata_l1 = .;
*(.l1.data)
- __edata_l1 = .;
-
- . = ALIGN(4);
- __sbss_l1 = .;
- *(.l1.bss)
+ __edata_l1 = .;
. = ALIGN(32);
*(.data_l1.cacheline_aligned)
. = ALIGN(4);
- __ebss_l1 = .;
- } > l1_data_a AT > ram
- .data_b_l1 :
+ __sbss_l1 = .;
+ *(.l1.bss)
+ . = ALIGN(4);
+ __ebss_l1 = .;
+ }
+ __data_l1_lma = LOADADDR(.data_l1);
+ __data_l1_len = SIZEOF(.data_l1);
+ ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
+
+ .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
{
. = ALIGN(4);
__sdata_b_l1 = .;
@@ -190,39 +213,55 @@ SECTIONS
. = ALIGN(4);
__sbss_b_l1 = .;
*(.l1.bss.B)
-
. = ALIGN(4);
__ebss_b_l1 = .;
- } > l1_data_b AT > ram
+ }
+ __data_b_l1_lma = LOADADDR(.data_b_l1);
+ __data_b_l1_len = SIZEOF(.data_b_l1);
+ ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
- .data :
+ .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
{
- __sdata = .;
- . = ALIGN(0x2000);
- *(.data.init_task)
- *(.data)
-
- . = ALIGN(32);
- *(.data.cacheline_aligned)
+ . = ALIGN(4);
+ __stext_l2 = .;
+ *(.l2.text)
+ . = ALIGN(4);
+ __etext_l2 = .;
- . = ALIGN(0x2000);
- __edata = .;
- } > ram
+ . = ALIGN(4);
+ __sdata_l2 = .;
+ *(.l2.data)
+ __edata_l2 = .;
- /DISCARD/ : { /* Exit code and data*/
- *(.exit.text)
- *(.exit.data)
- *(.exitcall.exit)
- } > ram
+ . = ALIGN(32);
+ *(.data_l2.cacheline_aligned)
- .bss :
- {
. = ALIGN(4);
- ___bss_start = .;
- *(.bss)
- *(COMMON)
+ __sbss_l2 = .;
+ *(.l2.bss)
. = ALIGN(4);
- ___bss_stop = .;
- __end = . ;
- } > ram
+ __ebss_l2 = .;
+ }
+ __l2_lma = LOADADDR(.text_data_l2);
+ __l2_len = SIZEOF(.text_data_l2);
+ ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
+
+ /* Force trailing alignment of our init section so that when we
+ * free our init memory, we don't leave behind a partial page.
+ */
+#ifdef CONFIG_RAMKERNEL
+ . = __l2_lma + __l2_len;
+#else
+ . = __init_data_end;
+#endif
+ . = ALIGN(PAGE_SIZE);
+ ___init_end = .;
+
+ __end =.;
+
+ STABS_DEBUG
+
+ DWARF_DEBUG
+
+ DISCARDS
}