vmlinux-lds 连接器脚本
/*
* Automatically generated C config: don't edit
* Linux/arm 3.0.15 Kernel Configuration
*/
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares
*/
/*
* Helper macros to support writing architecture specific
* linker scripts.
*
* A minimal linker scripts has following content:
* [This is a sample, architectures may have special requiriements]
*
* OUTPUT_FORMAT(...)
* OUTPUT_ARCH(...)
* ENTRY(...)
* SECTIONS
* {
* . = START;
* __init_begin = .;
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
* PERCPU_SECTION(CACHELINE_SIZE)
* __init_end = .;
*
* _stext = .;
* TEXT_SECTION = 0
* _etext = .;
*
* _sdata = .;
* RO_DATA_SECTION(PAGE_SIZE)
* RW_DATA_SECTION(...)
* _edata = .;
*
* EXCEPTION_TABLE(...)
* NOTES
*
* BSS_SECTION(0, 0, 0)
* _end = .;
*
* STABS_DEBUG
* DWARF_DEBUG
*
* DISCARDS // must be the last
* }
*
* [__init_begin, __init_end] is the init section that may be freed after init
* [_stext, _etext] is the text section
* [_sdata, _edata] is the data section
*
* Some of the included output section have their own set of constants.
* Examples are: [__initramfs_start, __initramfs_end] for initramfs and
* [__nosave_begin, __nosave_end] for the nosave data
*/
/* Align . to a 8 byte boundary equals to maximum function alignment. */
/*
* Align to a 32 byte boundary equal to the
* alignment gcc 4.5 uses for a struct
*/
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
/* .data section */
/*
* Data section helpers
*/
/*
* Read only Data
*/
/* RODATA & RO_DATA provided for backward compatibility.
* All archs are supposed to use RO_DATA() */
/* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map */
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
/* Section used for early init (in .S files) */
/*
* Exception table
*/
/*
* Init task
*/
/* init and exit section handling */
/*
* bss (Block Started by Symbol) - uninitialized data
* zeroed during startup
*/
/*
* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to
* the beginning of the section so we begin them at 0.
*/
/* Stabs debugging sections. */
/*
* Default discarded sections.
*
* Some archs want to discard exit text/data at runtime rather than
* link time due to cross-section references such as alt instructions,
* bug table, eh_frame, etc. DISCARDS must be the last of output
* section definitions so that such archs put those in earlier section
* definitions.
*/
/**
* PERCPU_INPUT - the percpu input sections
* @cacheline: cacheline size
*
* The core percpu section names and core symbols which do not rely
* directly upon load addresses.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
*/
/**
* PERCPU_VADDR - define output section for percpu area
* @cacheline: cacheline size
* @vaddr: explicit base address (optional)
* @phdr: destination PHDR (optional)
*
* Macro which expands to output section for percpu area.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
*
* If @vaddr is not blank, it specifies explicit base address and all
* percpu symbols will be offset from the given address. If blank,
* @vaddr always equals @laddr + LOAD_OFFSET.
*
* @phdr defines the output PHDR to use if not blank. Be warned that
* output PHDR is sticky. If @phdr is specified, the next output
* section in the linker script will go there too. @phdr should have
* a leading colon.
*
* Note that this macros defines __per_cpu_load as an absolute symbol.
* If there is no need to put the percpu section at a predetermined
* address, use PERCPU_SECTION.
*/
/**
* PERCPU_SECTION - define output section for percpu area, simple version
* @cacheline: cacheline size
*
* Align to PAGE_SIZE and outputs output section for percpu area. This
* macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
* This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
* except that __per_cpu_load is defined as a relative symbol against
* .data..percpu which is required for relocatable x86_32 configuration.
*/
/*
* Definition of the high level *_SECTION macros
* They will fit only a subset of the architectures
*/
/*
* Writeable data.
* All sections are combined in a single .data section.
* The sections following CONSTRUCTORS are arranged so their
* typical alignment matches.
* A cacheline is typical/always less than a PAGE_SIZE so
* the sections that has this restriction (or similar)
* is located before the ones requiring PAGE_SIZE alignment.
* NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
* matches the requirement of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
/*
* arch/arm/include/asm/thread_info.h
*
* Copyright (C) 2002 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Allow us to mark functions as 'deprecated' and have gcc emit a nice
* warning for each use, in hopes of speeding the functions removal.
* Usage is:
* int __deprecated foo(void)
*/
/*
* Allow us to avoid 'defined but not used' warnings on functions and data,
* as well as force them to be emitted to the assembly file.
*
* As of gcc 3.4, static functions that are not marked with attribute((used))
* may be elided from the assembly file. As of gcc 3.4, static data not so
* marked will not be elided, but this may change in a future gcc version.
*
* NOTE: Because distributions shipped with a backported unit-at-a-time
* compiler in gcc 3.3, we must define __used to be __attribute__((used))
* for gcc >=3.3 instead of 3.4.
*
* In prior versions of gcc, such functions and data would be emitted, but
* would be warned about except with attribute((unused)).
*
* Mark functions that are referenced only in inline assembly as __used so
* the code is emitted even though it appears to be unreferenced.
*/
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentaiton reasons.
*/
/*
* From the GCC manual:
*
* Many functions do not examine any values except their arguments,
* and have no effects except the return value. Basically this is
* just slightly more strict class than the `pure' attribute above,
* since function is not allowed to read global memory.
*
* Note that a function that has pointer arguments and examines the
* data pointed to must _not_ be declared `const'. Likewise, a
* function that calls a non-`const' function usually must not be
* `const'. It does not make sense for a `const' function to return
* `void'.
*/
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
*/
/* Simple shorthand for a section definition */
/* Are two types/vars the same type (ignoring qualifiers)? */
/* Compile time object size, -1 for unknown */
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
* but only when the compiler is aware of some particular ordering. One way
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
/*
* arch/arm/include/asm/fpstate.h
*
* Copyright (C) 1995 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* We use bit 30 of the preempt_count to indicate that kernel
* preemption is occurring. See .
*/
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
/*
* Change these and you break ASM code in entry-common.S
*/
/*
* arch/arm/include/asm/memory.h
*
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note: this file should not be included by non-asm/.h files
*/
/* const.h: Macros for dealing with constants. */
/* Some constant macros are used in both assembler and
* C code. Therefore we cannot annotate them always with
* 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*
* Similarly, _AT() will cast an expression with a type in C, but
* leave it unchanged in asm.
*/
/*
* asm-generic/int-ll64.h
*
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
* bitsperlong.h. In particular, an architecture that supports
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
/*
* FIXME: The check currently breaks x86-64 build, so it's
* temporarily disabled. Please fix x86-64 and reenable
*/
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
/* linux/arch/arm/mach-exynos/include/mach/memory.h
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
*
*
* EXYNOS4 - Memory definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Maximum of 256MiB in one bank */
/* Required by ION to allocate scatterlist(sglist) with nents > 256 */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Size definitions
* Copyright (C) ARM Limited 1998. All rights reserved.
*/
/*
* linux/include/asm-generic/sizes.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation.
*/
/*
* PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
/*
* The maximum size of a 26-bit user space task.
*/
/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
/*
* The highmem pkmap virtual space shares the end of the module area.
*/
/*
* The XIP kernel gets mapped at the bottom of the module vm area.
* Since we use sections to map it, this macro replaces the physical address
* with its virtual address while keeping offset from the base section.
*/
/*
* Allow 16MB-aligned ioremap pages
*/
/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
/*
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these
* locations
*/
/*
* Convert a physical address to a Page Frame Number and back
*/
/*
* Convert a page to/from a physical address
*/
/*
* arch/arm/include/asm/page.h
*
* Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* PAGE_SHIFT determines the page size */
OUTPUT_ARCH(arm)
ENTRY(stext)
jiffies = jiffies_64;
SECTIONS
{
. = 0xC0000000 + 0x00008000;
.init : { /* Init code and data */
_stext = .;
_sinittext = .;
*(.head.text)
*(.init.text) *(.meminit.text)
*(.exit.text) *(.memexit.text)
_einittext = .;
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
. = ALIGN(16); __setup_start = .; *(.init.setup) __setup_end = .;
__initcall_start = .; *(.initcallearly.init) __early_initcall_end = .; *(.initcall0.init) *(.initcall0s.init) *(.initcall1.init) *(.initcall1s.init) *(.initcall2.init) *(.initcall2s.init) *(.initcall3.init) *(.initcall3s.init) *(.initcall4.init) *(.initcall4s.init) *(.initcall5.init) *(.initcall5s.init) *(.initcallrootfs.init) *(.initcall6.init) *(.initcall6s.init) *(.initcall7.init) *(.initcall7s.init) __initcall_end = .;
__con_initcall_start = .; *(.con_initcall.init) __con_initcall_end = .;
__security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .;
. = ALIGN(4); __initramfs_start = .; *(.init.ramfs) . = ALIGN(8); *(.init.ramfs.info)
__init_begin = _stext;
*(.init.data) *(.meminit.data) *(.init.rodata) *(.meminit.rodata) . = ALIGN(32); __dtb_start = .; *(.dtb.init.rodata) __dtb_end = .;
*(.exit.data) *(.memexit.data) *(.memexit.rodata)
}
. = ALIGN((1 << 12)); .data..percpu : AT(ADDR(.data..percpu) - 0) { __per_cpu_load = .; __per_cpu_start = .; *(.data..percpu..first) . = ALIGN((1 << 12)); *(.data..percpu..page_aligned) . = ALIGN(32); *(.data..percpu..readmostly) . = ALIGN(32); *(.data..percpu) *(.data..percpu..shared_aligned) __per_cpu_end = .; }
. = ALIGN((1 << 12));
__init_end = .;
/*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
}
.text : { /* Real text segment */
_text = .; /* Text and read-only data */
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
. = ALIGN(8); *(.text.hot) *(.text) *(.ref.text) *(.devinit.text) *(.devexit.text) *(.cpuinit.text) *(.cpuexit.text) *(.text.unlikely)
. = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .;
. = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .;
. = ALIGN(8); __kprobes_text_start = .; *(.kprobes.text) __kprobes_text_end = .;
*(.fixup)
*(.gnu.warning)
*(.rodata)
*(.rodata.*)
*(.glue_7)
*(.glue_7t)
. = ALIGN(4);
*(.got) /* Global offset table */
__proc_info_begin = .; *(.proc.info.init) __proc_info_end = .;
}
. = ALIGN(((1 << 12))); .rodata : AT(ADDR(.rodata) - 0) { __start_rodata = .; *(.rodata) *(.rodata.*) *(__vermagic) . = ALIGN(8); __start___tracepoints_ptrs = .; *(__tracepoints_ptrs) __stop___tracepoints_ptrs = .; *(__markers_strings) *(__tracepoints_strings) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; __start_pci_fixups_resume = .; *(.pci_fixup_resume) __end_pci_fixups_resume = .; __start_pci_fixups_resume_early = .; *(.pci_fixup_resume_early) __end_pci_fixups_resume_early = .; __start_pci_fixups_suspend = .; *(.pci_fixup_suspend) __end_pci_fixups_suspend = .; } .builtin_fw : AT(ADDR(.builtin_fw) - 0) { __start_builtin_fw = .; *(.builtin_fw) __end_builtin_fw = .; } .rio_ops : AT(ADDR(.rio_ops) - 0) { __start_rio_switch_ops = .; *(.rio_switch_ops) __end_rio_switch_ops = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(SORT(___ksymtab+*)) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(SORT(___ksymtab_gpl+*)) __stop___ksymtab_gpl = .; } __ksymtab_unused : AT(ADDR(__ksymtab_unused) - 0) { __start___ksymtab_unused = .; *(SORT(___ksymtab_unused+*)) __stop___ksymtab_unused = .; } __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - 0) { __start___ksymtab_unused_gpl = .; *(SORT(___ksymtab_unused_gpl+*)) __stop___ksymtab_unused_gpl = .; } __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - 0) { __start___ksymtab_gpl_future = .; *(SORT(___ksymtab_gpl_future+*)) __stop___ksymtab_gpl_future = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(SORT(___kcrctab+*)) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(SORT(___kcrctab_gpl+*)) __stop___kcrctab_gpl = .; } __kcrctab_unused : AT(ADDR(__kcrctab_unused) - 0) { __start___kcrctab_unused = .; *(SORT(___kcrctab_unused+*)) __stop___kcrctab_unused = .; } __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - 0) { __start___kcrctab_unused_gpl = .; *(SORT(___kcrctab_unused_gpl+*)) __stop___kcrctab_unused_gpl = .; } __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - 0) { __start___kcrctab_gpl_future = .; *(SORT(___kcrctab_gpl_future+*)) __stop___kcrctab_gpl_future = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __init_rodata : AT(ADDR(__init_rodata) - 0) { *(.ref.rodata) *(.devinit.rodata) *(.devexit.rodata) *(.cpuinit.rodata) *(.cpuexit.rodata) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; } __modver : AT(ADDR(__modver) - 0) { __start___modver = .; *(__modver) __stop___modver = .; . = ALIGN(((1 << 12))); __end_rodata = .; } . = ALIGN(((1 << 12)));
_etext = .; /* End of text and rodata section */
. = ALIGN(8192);
__data_loc = .;
.data : AT(__data_loc) {
_data = .; /* address in memory */
_sdata = .;
/*
* first, the init task union, aligned
* to an 8192 byte boundary.
*/
. = ALIGN(8192); *(.data..init_task)
. = ALIGN((1 << 12)); __nosave_begin = .; *(.data..nosave) . = ALIGN((1 << 12)); __nosave_end = .;
. = ALIGN(32); *(.data..cacheline_aligned)
. = ALIGN(32); *(.data..read_mostly) . = ALIGN(32);
/*
* The exception fixup table (might need resorting at runtime)
*/
. = ALIGN(32);
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
/*
* and the usual data section
*/
*(.data) *(.ref.data) *(.data..shared_aligned) *(.devinit.data) *(.devexit.data) *(.cpuinit.data) *(.cpuexit.data) . = ALIGN(32); *(__tracepoints) . = ALIGN(8); __start___jump_table = .; *(__jump_table) __stop___jump_table = .; . = ALIGN(8); __start___verbose = .; *(__verbose) __stop___verbose = .;
CONSTRUCTORS
_edata = .;
}
_edata_loc = __data_loc + SIZEOF(.data);
.notes : AT(ADDR(.notes) - 0) { __start_notes = .; *(.note.*) __stop_notes = .; }
. = ALIGN(0); __bss_start = .; . = ALIGN(0); .sbss : AT(ADDR(.sbss) - 0) { *(.sbss) *(.scommon) } . = ALIGN(0); .bss : AT(ADDR(.bss) - 0) { *(.bss..page_aligned) *(.dynbss) *(.bss) *(COMMON) } . = ALIGN(0); __bss_stop = .;
_end = .;
.stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) }
.comment 0 : { *(.comment) }
/* Default discards */
/DISCARD/ : { *(.exit.text) *(.memexit.text) *(.exit.data) *(.memexit.data) *(.memexit.rodata) *(.exitcall.exit) *(.discard) *(.discard.*) }
}
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
阅读(2270) | 评论(0) | 转发(0) |