blob: 56d1d2261e53f8627c90db14fd2f782fdc7e1e3a [file] [log] [blame]
Aaron Williams0dc4ab92020-06-30 12:08:56 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020 Stefan Roese <sr@denx.de>
4 */
5
6#include <config.h>
7#include <asm-offsets.h>
8#include <asm/cacheops.h>
9#include <asm/regdef.h>
10#include <asm/mipsregs.h>
11#include <asm/addrspace.h>
12#include <asm/asm.h>
Stefan Roese7c6f2742020-08-20 07:21:56 +020013#include <mach/octeon-model.h>
14
15#define COP0_CVMCTL_REG $9,7 /* Cavium control */
16#define COP0_CVMMEMCTL_REG $11,7 /* Cavium memory control */
17#define COP0_PROC_ID_REG $15,0
Aaron Williams0dc4ab92020-06-30 12:08:56 +020018
19 .set noreorder
20
21LEAF(lowlevel_init)
Stefan Roese7c6f2742020-08-20 07:21:56 +020022
23 /* Set LMEMSZ in CVMMEMCTL register */
24 dmfc0 a0, COP0_CVMMEMCTL_REG
25 dins a0, zero, 0, 9
26 mfc0 a4, COP0_PROC_ID_REG
27 li a5, OCTEON_CN63XX_PASS1_0 /* Octeon cn63xx pass1 chip id */
28 bgt a5, a4, 2f
29 ori a0, 0x104 /* setup 4 lines of scratch */
30 ori a6, a5, 8 /* Octeon cn63xx pass2 chip id */
31 bge a4, a6, 2f
32 nop
33 li a6, 4
34 ins a0, a6, 11, 4 /* Set WBTHRESH=4 as per Core-14752 errata */
352:
36 dmtc0 a0, COP0_CVMMEMCTL_REG
37
38 /* Set REPUN bit in CVMCTL register */
39 dmfc0 a0, COP0_CVMCTL_REG
40 ori a0, 1<<14 /* enable fixup of unaligned mem access */
41 dmtc0 a0, COP0_CVMCTL_REG
42
Aaron Williams0dc4ab92020-06-30 12:08:56 +020043 jr ra
44 nop
45 END(lowlevel_init)
Stefan Roesee9609dc2020-06-30 12:33:17 +020046
47LEAF(mips_mach_early_init)
48
49 move s0, ra
50
51 bal __dummy
52 nop
53
54__dummy:
55 /* Get the actual address that we are running at */
56 PTR_LA a7, __dummy
57 dsubu t3, ra, a7 /* t3 now has reloc offset */
58
59 PTR_LA t1, _start
60 daddu t0, t1, t3 /* t0 now has actual address of _start */
61
62 /* Calculate end address of copy loop */
63 PTR_LA t2, _end
64 daddiu t2, t2, 0x4000 /* Increase size to include appended DTB */
65 daddiu t2, t2, 127
66 ins t2, zero, 0, 7 /* Round up to cache line for memcpy */
67
68 /* Copy ourself to the L2 cache from flash, 32 bytes at a time */
691:
70 ld a0, 0(t0)
71 ld a1, 8(t0)
72 ld a2, 16(t0)
73 ld a3, 24(t0)
74 sd a0, 0(t1)
75 sd a1, 8(t1)
76 sd a2, 16(t1)
77 sd a3, 24(t1)
78 addiu t0, 32
79 addiu t1, 32
80 bne t1, t2, 1b
81 nop
82
83 sync
84
85 /*
86 * Return to start.S now running from TEXT_BASE, which points
87 * to DRAM address space, which effectively is L2 cache now.
88 * This speeds up the init process extremely, especially the
89 * DDR init code.
90 */
91 dsubu s0, s0, t3 /* Fixup return address with reloc offset */
92 jr.hb s0 /* Jump back with hazard barrier */
93 nop
94
95 END(mips_mach_early_init)
Stefan Roese7c6f2742020-08-20 07:21:56 +020096
97LEAF(nmi_bootvector)
98
99 /*
100 * From Marvell original bootvector setup
101 */
102 mfc0 k0, CP0_STATUS
103 /* Enable 64-bit addressing, set ERL (should already be set) */
104 ori k0, 0x84
105 mtc0 k0, CP0_STATUS
106 /* Core-14345, clear L1 Dcache virtual tags if the core hit an NMI */
107 cache 17, 0($0)
108
109 /*
110 * Needed for Linux kernel booting, otherwise it hangs while
111 * zero'ing all of CVMSEG
112 */
113 dmfc0 a0, COP0_CVMMEMCTL_REG
114 dins a0, zero, 0, 9
115 ori a0, 0x104 /* setup 4 lines of scratch */
116 dmtc0 a0, COP0_CVMMEMCTL_REG
117
118 /*
119 * Load parameters and entry point
120 */
121 PTR_LA t9, nmi_handler_para
122 sync
123
124 ld s0, 0x00(t9)
125 ld a0, 0x08(t9)
126 ld a1, 0x10(t9)
127 ld a2, 0x18(t9)
128 ld a3, 0x20(t9)
129
130 /* Finally jump to entry point (start kernel etc) */
131 j s0
132 nop
133
134 END(nmi_bootvector)
135
136 /*
137 * Add here some space for the NMI parameters (entry point and args)
138 */
139 .globl nmi_handler_para
140nmi_handler_para:
141 .dword 0 // entry-point
142 .dword 0 // arg0
143 .dword 0 // arg1
144 .dword 0 // arg2
145 .dword 0 // arg3