blob: 834dc99554c90a1cbcc24432bb4c3d69e1b0e9ce [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Simon Glassdde3b702012-02-23 03:28:41 +00002/*
3 * Copyright (c) 2004-2008 Texas Instruments
4 *
5 * (C) Copyright 2002
6 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
Simon Glassdde3b702012-02-23 03:28:41 +00007 */
8
Marc Zyngierbf433af2014-07-12 14:24:02 +01009#include <config.h>
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +080010#include <asm/psci.h>
Marc Zyngierbf433af2014-07-12 14:24:02 +010011
Simon Glassdde3b702012-02-23 03:28:41 +000012OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
13OUTPUT_ARCH(arm)
14ENTRY(_start)
15SECTIONS
16{
Simon Glassc1352112016-03-13 19:07:29 -060017#ifndef CONFIG_CMDLINE
18 /DISCARD/ : { *(.u_boot_list_2_cmd_*) }
19#endif
Wang Dongshengc5e954e2016-01-18 11:02:40 +080020#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC)
Peng Fand47cb0b2015-10-23 10:13:03 +080021 /*
Wang Dongshengc5e954e2016-01-18 11:02:40 +080022 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not
23 * bundle with u-boot, and code offsets are fixed. Secure zone
24 * only needs to be copied from the loading address to
25 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running
26 * address for secure code.
Peng Fand47cb0b2015-10-23 10:13:03 +080027 *
Wang Dongshengc5e954e2016-01-18 11:02:40 +080028 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will
29 * be included in u-boot address space, and some absolute address
30 * were used in secure code. The absolute addresses of the secure
31 * code also needs to be relocated along with the accompanying u-boot
32 * code.
33 *
34 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE.
Peng Fand47cb0b2015-10-23 10:13:03 +080035 */
36 /DISCARD/ : { *(.rel._secure*) }
Wang Dongshengc5e954e2016-01-18 11:02:40 +080037#endif
Simon Glassdde3b702012-02-23 03:28:41 +000038 . = 0x00000000;
39
40 . = ALIGN(4);
41 .text :
42 {
Albert ARIBAUDd026dec2013-06-11 14:17:33 +020043 *(.__image_copy_start)
Albert ARIBAUD41623c92014-04-15 16:13:51 +020044 *(.vectors)
Stephen Warrenb68d6712012-10-22 06:19:32 +000045 CPUDIR/start.o (.text*)
Alexander Graf7e21fbc2018-06-12 07:48:37 +020046 }
47
48 /* This needs to come before *(.text*) */
49 .__efi_runtime_start : {
50 *(.__efi_runtime_start)
51 }
52
53 .efi_runtime : {
54 *(.text.efi_runtime*)
55 *(.rodata.efi_runtime*)
56 *(.data.efi_runtime*)
57 }
58
59 .__efi_runtime_stop : {
60 *(.__efi_runtime_stop)
61 }
62
63 .text_rest :
64 {
Stephen Warrenb68d6712012-10-22 06:19:32 +000065 *(.text*)
Simon Glassdde3b702012-02-23 03:28:41 +000066 }
67
Jan Kiszka104d6fb2015-04-21 07:18:24 +020068#ifdef CONFIG_ARMV7_NONSEC
Marc Zyngierbf433af2014-07-12 14:24:02 +010069
Chen-Yu Tsaia1274cc2016-06-19 12:38:34 +080070 /* Align the secure section only if we're going to use it in situ */
71 .__secure_start :
72#ifndef CONFIG_ARMV7_SECURE_BASE
73 ALIGN(CONSTANT(COMMONPAGESIZE))
74#endif
75 {
76 KEEP(*(.__secure_start))
77 }
78
Marc Zyngierbf433af2014-07-12 14:24:02 +010079#ifndef CONFIG_ARMV7_SECURE_BASE
80#define CONFIG_ARMV7_SECURE_BASE
Chen-Yu Tsaib56e06d2016-06-07 10:54:27 +080081#define __ARMV7_PSCI_STACK_IN_RAM
Marc Zyngierbf433af2014-07-12 14:24:02 +010082#endif
83
Marc Zyngierbf433af2014-07-12 14:24:02 +010084 .secure_text CONFIG_ARMV7_SECURE_BASE :
85 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
86 {
87 *(._secure.text)
88 }
89
Chen-Yu Tsaia5aa7ff2016-07-05 21:45:06 +080090 .secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text))
91 {
92 *(._secure.data)
93 }
94
Masahiro Yamada2fe12812016-09-26 14:21:30 +090095#ifdef CONFIG_ARMV7_PSCI
Chen-Yu Tsaia5aa7ff2016-07-05 21:45:06 +080096 .secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data),
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +080097 CONSTANT(COMMONPAGESIZE)) (NOLOAD) :
Chen-Yu Tsaib56e06d2016-06-07 10:54:27 +080098#ifdef __ARMV7_PSCI_STACK_IN_RAM
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +080099 AT(ADDR(.secure_stack))
100#else
Chen-Yu Tsaia5aa7ff2016-07-05 21:45:06 +0800101 AT(LOADADDR(.secure_data) + SIZEOF(.secure_data))
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +0800102#endif
103 {
104 KEEP(*(.__secure_stack_start))
Masahiro Yamada2fe12812016-09-26 14:21:30 +0900105
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +0800106 /* Skip addreses for stack */
107 . = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE;
Masahiro Yamada2fe12812016-09-26 14:21:30 +0900108
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +0800109 /* Align end of stack section to page boundary */
110 . = ALIGN(CONSTANT(COMMONPAGESIZE));
111
112 KEEP(*(.__secure_stack_end))
Chen-Yu Tsai3eff6812016-06-19 12:38:39 +0800113
114#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE
115 /*
116 * We are not checking (__secure_end - __secure_start) here,
117 * as these are the load addresses, and do not include the
118 * stack section. Instead, use the end of the stack section
119 * and the start of the text section.
120 */
121 ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE,
122 "Error: secure section exceeds secure memory size");
123#endif
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +0800124 }
125
126#ifndef __ARMV7_PSCI_STACK_IN_RAM
127 /* Reset VMA but don't allocate space if we have secure SRAM */
128 . = LOADADDR(.secure_stack);
Chen-Yu Tsaib56e06d2016-06-07 10:54:27 +0800129#endif
130
Masahiro Yamada2fe12812016-09-26 14:21:30 +0900131#endif
132
Chen-Yu Tsai980d6a52016-06-19 12:38:36 +0800133 .__secure_end : AT(ADDR(.__secure_end)) {
Marc Zyngierbf433af2014-07-12 14:24:02 +0100134 *(.__secure_end)
135 LONG(0x1d1071c); /* Must output something to reset LMA */
136 }
137#endif
138
Simon Glassdde3b702012-02-23 03:28:41 +0000139 . = ALIGN(4);
140 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
141
142 . = ALIGN(4);
143 .data : {
Stephen Warrenb68d6712012-10-22 06:19:32 +0000144 *(.data*)
Simon Glassdde3b702012-02-23 03:28:41 +0000145 }
146
147 . = ALIGN(4);
148
149 . = .;
Simon Glassdde3b702012-02-23 03:28:41 +0000150
151 . = ALIGN(4);
Marek Vasut55675142012-10-12 10:27:03 +0000152 .u_boot_list : {
Albert ARIBAUDef123c52013-02-25 00:59:00 +0000153 KEEP(*(SORT(.u_boot_list*)));
Marek Vasut55675142012-10-12 10:27:03 +0000154 }
155
156 . = ALIGN(4);
Simon Glassdde3b702012-02-23 03:28:41 +0000157
Alexander Graf50149ea2016-03-04 01:10:01 +0100158 .efi_runtime_rel_start :
159 {
160 *(.__efi_runtime_rel_start)
161 }
162
163 .efi_runtime_rel : {
Alexander Graf7e21fbc2018-06-12 07:48:37 +0200164 *(.rel*.efi_runtime)
165 *(.rel*.efi_runtime.*)
Alexander Graf50149ea2016-03-04 01:10:01 +0100166 }
167
168 .efi_runtime_rel_stop :
169 {
170 *(.__efi_runtime_rel_stop)
171 }
172
Tom Rini83ebd4a2017-06-14 09:13:21 -0400173 . = ALIGN(4);
Alexander Graf50149ea2016-03-04 01:10:01 +0100174
Albert ARIBAUDd026dec2013-06-11 14:17:33 +0200175 .image_copy_end :
176 {
177 *(.__image_copy_end)
178 }
Simon Glassdde3b702012-02-23 03:28:41 +0000179
Albert ARIBAUD47bd65e2013-06-11 14:17:34 +0200180 .rel_dyn_start :
181 {
182 *(.__rel_dyn_start)
183 }
184
Simon Glassdde3b702012-02-23 03:28:41 +0000185 .rel.dyn : {
Simon Glassdde3b702012-02-23 03:28:41 +0000186 *(.rel*)
Albert ARIBAUD47bd65e2013-06-11 14:17:34 +0200187 }
188
189 .rel_dyn_end :
190 {
191 *(.__rel_dyn_end)
Simon Glassdde3b702012-02-23 03:28:41 +0000192 }
193
Albert ARIBAUDd0b5d9d2014-02-22 17:53:42 +0100194 .end :
195 {
196 *(.__end)
197 }
198
199 _image_binary_end = .;
Simon Glassdde3b702012-02-23 03:28:41 +0000200
201 /*
202 * Deprecated: this MMU section is used by pxa at present but
203 * should not be used by new boards/CPUs.
204 */
205 . = ALIGN(4096);
206 .mmutable : {
207 *(.mmutable)
208 }
209
Albert ARIBAUDf84a7b82013-04-11 05:43:21 +0000210/*
211 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
212 * __bss_base and __bss_limit are for linker only (overlay ordering)
213 */
214
Albert ARIBAUD3ebd1cb2013-02-25 00:58:59 +0000215 .bss_start __rel_dyn_start (OVERLAY) : {
216 KEEP(*(.__bss_start));
Albert ARIBAUDf84a7b82013-04-11 05:43:21 +0000217 __bss_base = .;
Albert ARIBAUD3ebd1cb2013-02-25 00:58:59 +0000218 }
219
Albert ARIBAUDf84a7b82013-04-11 05:43:21 +0000220 .bss __bss_base (OVERLAY) : {
Stephen Warrenb68d6712012-10-22 06:19:32 +0000221 *(.bss*)
Simon Glassdde3b702012-02-23 03:28:41 +0000222 . = ALIGN(4);
Albert ARIBAUDf84a7b82013-04-11 05:43:21 +0000223 __bss_limit = .;
Albert ARIBAUD3ebd1cb2013-02-25 00:58:59 +0000224 }
Tom Rini0ce033d2013-03-18 12:31:00 -0400225
Albert ARIBAUDf84a7b82013-04-11 05:43:21 +0000226 .bss_end __bss_limit (OVERLAY) : {
227 KEEP(*(.__bss_end));
Simon Glassdde3b702012-02-23 03:28:41 +0000228 }
229
Albert ARIBAUDd0b5d9d2014-02-22 17:53:42 +0100230 .dynsym _image_binary_end : { *(.dynsym) }
Albert ARIBAUD47ed5dd2013-11-07 14:21:46 +0100231 .dynbss : { *(.dynbss) }
232 .dynstr : { *(.dynstr*) }
233 .dynamic : { *(.dynamic*) }
234 .plt : { *(.plt*) }
235 .interp : { *(.interp*) }
Andreas Färber2c67e0e2014-01-27 05:48:11 +0100236 .gnu.hash : { *(.gnu.hash) }
Albert ARIBAUD47ed5dd2013-11-07 14:21:46 +0100237 .gnu : { *(.gnu*) }
238 .ARM.exidx : { *(.ARM.exidx*) }
Albert ARIBAUDb02bfc42014-01-13 14:57:05 +0100239 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
Simon Glassdde3b702012-02-23 03:28:41 +0000240}