blob: 14b7f61c1a4bb4c5b222f1be0b1252f1ab4bdd85 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Albert ARIBAUD3da0e572013-05-19 01:48:15 +00002/*
3 * relocate - common relocation function for ARM U-Boot
4 *
5 * Copyright (c) 2013 Albert ARIBAUD <albert.u.boot@aribaud.net>
Albert ARIBAUD3da0e572013-05-19 01:48:15 +00006 */
7
Georges Savoundararadj3ff46cc2014-10-28 23:16:11 +01008#include <asm-offsets.h>
Vikas Manochad22336a2018-08-31 16:57:06 -07009#include <asm/assembler.h>
Georges Savoundararadj3ff46cc2014-10-28 23:16:11 +010010#include <config.h>
Simon Glassc70f74a2016-11-07 08:47:09 -070011#include <elf.h>
Albert ARIBAUD3da0e572013-05-19 01:48:15 +000012#include <linux/linkage.h>
rev13@wp.pl12d8a722015-03-01 12:44:39 +010013#ifdef CONFIG_CPU_V7M
14#include <asm/armv7m.h>
15#endif
Albert ARIBAUD3da0e572013-05-19 01:48:15 +000016
17/*
Albert ARIBAUDdb544b92014-11-13 17:59:15 +010018 * Default/weak exception vectors relocation routine
19 *
20 * This routine covers the standard ARM cases: normal (0x00000000),
21 * high (0xffff0000) and VBAR. SoCs which do not comply with any of
22 * the standard cases must provide their own, strong, version.
23 */
24
25 .section .text.relocate_vectors,"ax",%progbits
26 .weak relocate_vectors
27
28ENTRY(relocate_vectors)
29
rev13@wp.pl12d8a722015-03-01 12:44:39 +010030#ifdef CONFIG_CPU_V7M
31 /*
32 * On ARMv7-M we only have to write the new vector address
33 * to VTOR register.
34 */
35 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
36 ldr r1, =V7M_SCB_BASE
37 str r0, [r1, V7M_SCB_VTOR]
38#else
Albert ARIBAUDdb544b92014-11-13 17:59:15 +010039#ifdef CONFIG_HAS_VBAR
40 /*
41 * If the ARM processor has the security extensions,
42 * use VBAR to relocate the exception vectors.
43 */
44 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
45 mcr p15, 0, r0, c12, c0, 0 /* Set VBAR */
46#else
47 /*
48 * Copy the relocated exception vectors to the
49 * correct address
50 * CP15 c1 V bit gives us the location of the vectors:
51 * 0x00000000 or 0xFFFF0000.
52 */
53 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
54 mrc p15, 0, r2, c1, c0, 0 /* V bit (bit[13]) in CP15 c1 */
55 ands r2, r2, #(1 << 13)
56 ldreq r1, =0x00000000 /* If V=0 */
57 ldrne r1, =0xFFFF0000 /* If V=1 */
58 ldmia r0!, {r2-r8,r10}
59 stmia r1!, {r2-r8,r10}
60 ldmia r0!, {r2-r8,r10}
61 stmia r1!, {r2-r8,r10}
62#endif
rev13@wp.pl12d8a722015-03-01 12:44:39 +010063#endif
Albert ARIBAUDdb544b92014-11-13 17:59:15 +010064 bx lr
65
66ENDPROC(relocate_vectors)
67
68/*
Albert ARIBAUD3da0e572013-05-19 01:48:15 +000069 * void relocate_code(addr_moni)
70 *
71 * This function relocates the monitor code.
72 *
73 * NOTE:
74 * To prevent the code below from containing references with an R_ARM_ABS32
75 * relocation record type, we never refer to linker-defined symbols directly.
76 * Instead, we declare literals which contain their relative location with
77 * respect to relocate_code, and at run time, add relocate_code back to them.
78 */
79
80ENTRY(relocate_code)
Chia-Wei Wangcd82f192021-08-03 10:50:10 +080081 adr r3, relocate_code
82 ldr r1, _image_copy_start_ofs
83 add r1, r3 /* r1 <- Run &__image_copy_start */
84 subs r4, r0, r1 /* r4 <- Run to copy offset */
85 beq relocate_done /* skip relocation */
86 ldr r1, _image_copy_start_ofs
87 add r1, r3 /* r1 <- Run &__image_copy_start */
88 ldr r2, _image_copy_end_ofs
89 add r2, r3 /* r2 <- Run &__image_copy_end */
Albert ARIBAUD3da0e572013-05-19 01:48:15 +000090copy_loop:
Chia-Wei Wangcd82f192021-08-03 10:50:10 +080091 ldmia r1!, {r10-r11} /* copy from source address [r1] */
92 stmia r0!, {r10-r11} /* copy to target address [r0] */
93 cmp r1, r2 /* until source end address [r2] */
Albert ARIBAUD3da0e572013-05-19 01:48:15 +000094 blo copy_loop
95
96 /*
97 * fix .rel.dyn relocations
98 */
Chia-Wei Wangcd82f192021-08-03 10:50:10 +080099 ldr r1, _rel_dyn_start_ofs
100 add r2, r1, r3 /* r2 <- Run &__rel_dyn_start */
101 ldr r1, _rel_dyn_end_ofs
102 add r3, r1, r3 /* r3 <- Run &__rel_dyn_end */
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000103fixloop:
Albert ARIBAUDfbf87b12013-06-11 14:17:35 +0200104 ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */
105 and r1, r1, #0xff
Simon Glassc70f74a2016-11-07 08:47:09 -0700106 cmp r1, #R_ARM_RELATIVE
Albert ARIBAUDfbf87b12013-06-11 14:17:35 +0200107 bne fixnext
108
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000109 /* relative fix: increase location by offset */
Jeroen Hofsteea81872f2013-09-21 14:04:40 +0200110 add r0, r0, r4
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000111 ldr r1, [r0]
Jeroen Hofsteea81872f2013-09-21 14:04:40 +0200112 add r1, r1, r4
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000113 str r1, [r0]
Albert ARIBAUDfbf87b12013-06-11 14:17:35 +0200114fixnext:
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000115 cmp r2, r3
116 blo fixloop
117
118relocate_done:
119
Mike Dunn9dc8fef2013-06-21 09:12:28 -0700120#ifdef __XSCALE__
121 /*
122 * On xscale, icache must be invalidated and write buffers drained,
123 * even with cache disabled - 4.2.7 of xscale core developer's manual
124 */
125 mcr p15, 0, r0, c7, c7, 0 /* invalidate icache */
126 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
127#endif
128
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000129 /* ARMv4- don't know bx lr but the assembler fails to see that */
130
131#ifdef __ARM_ARCH_4__
Albert ARIBAUD28970ef2014-11-13 17:59:14 +0100132 mov pc, lr
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000133#else
Albert ARIBAUD28970ef2014-11-13 17:59:14 +0100134 bx lr
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000135#endif
136
Albert ARIBAUD3da0e572013-05-19 01:48:15 +0000137ENDPROC(relocate_code)
Chia-Wei Wangcd82f192021-08-03 10:50:10 +0800138
139_image_copy_start_ofs:
140 .word __image_copy_start - relocate_code
141_image_copy_end_ofs:
142 .word __image_copy_end - relocate_code
143_rel_dyn_start_ofs:
144 .word __rel_dyn_start - relocate_code
145_rel_dyn_end_ofs:
146 .word __rel_dyn_end - relocate_code