xref: /openbmc/u-boot/arch/nios2/cpu/start.S (revision 445a886d7ac7bdd5746a8bb18ba033242c3e0fed)
1 /*
2  * (C) Copyright 2004, Psyent Corporation <www.psyent.com>
3  * Scott McNutt <smcnutt@psyent.com>
4  *
5  * See file CREDITS for list of people who contributed to this
6  * project.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation; either version 2 of
11  * the License, or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21  * MA 02111-1307 USA
22  */
23 
24 #include <asm-offsets.h>
25 #include <config.h>
26 #include <version.h>
27 
28 /*************************************************************************
29  * RESTART
30  ************************************************************************/
31 
32 	.text
33 	.global _start
34 
35 _start:
36 	wrctl	status, r0		/* Disable interrupts */
37 	/* ICACHE INIT -- only the icache line at the reset address
38 	 * is invalidated at reset. So the init must stay within
39 	 * the cache line size (8 words). If GERMS is used, we'll
40 	 * just be invalidating the cache a second time. If cache
41 	 * is not implemented initi behaves as nop.
42 	 */
43 	ori	r4, r0, %lo(CONFIG_SYS_ICACHELINE_SIZE)
44 	movhi	r5, %hi(CONFIG_SYS_ICACHE_SIZE)
45 	ori	r5, r5, %lo(CONFIG_SYS_ICACHE_SIZE)
46 0:	initi	r5
47 	sub	r5, r5, r4
48 	bgt	r5, r0, 0b
49 	br	_except_end	/* Skip the tramp */
50 
51 	/* EXCEPTION TRAMPOLINE -- the following gets copied
52 	 * to the exception address (below), but is otherwise at the
53 	 * default exception vector offset (0x0020).
54 	 */
55 _except_start:
56 	movhi	et, %hi(_exception)
57 	ori	et, et, %lo(_exception)
58 	jmp	et
59 _except_end:
60 
61 	/* INTERRUPTS -- for now, all interrupts masked and globally
62 	 * disabled.
63 	 */
64 	wrctl	ienable, r0		/* All disabled	*/
65 
66 	/* DCACHE INIT -- if dcache not implemented, initd behaves as
67 	 * nop.
68 	 */
69 	movhi	r4, %hi(CONFIG_SYS_DCACHELINE_SIZE)
70 	ori	r4, r4, %lo(CONFIG_SYS_DCACHELINE_SIZE)
71 	movhi	r5, %hi(CONFIG_SYS_DCACHE_SIZE)
72 	ori	r5, r5, %lo(CONFIG_SYS_DCACHE_SIZE)
73 	mov	r6, r0
74 1:	initd	0(r6)
75 	add	r6, r6, r4
76 	bltu	r6, r5, 1b
77 
78 	/* RELOCATE CODE, DATA & COMMAND TABLE -- the following code
79 	 * assumes code, data and the command table are all
80 	 * contiguous. This lets us relocate everything as a single
81 	 * block. Make sure the linker script matches this ;-)
82 	 */
83 	nextpc	r4
84 _cur:	movhi	r5, %hi(_cur - _start)
85 	ori	r5, r5, %lo(_cur - _start)
86 	sub	r4, r4, r5		/* r4 <- cur _start */
87 	mov	r8, r4
88 	movhi	r5, %hi(_start)
89 	ori	r5, r5, %lo(_start)	/* r5 <- linked _start */
90 	beq	r4, r5, 3f
91 
92 	movhi	r6, %hi(_edata)
93 	ori	r6, r6, %lo(_edata)
94 2:	ldwio	r7, 0(r4)
95 	addi	r4, r4, 4
96 	stwio	r7, 0(r5)
97 	addi	r5, r5, 4
98 	bne	r5, r6, 2b
99 3:
100 
101 	/* ZERO BSS/SBSS -- bss and sbss are assumed to be adjacent
102 	 * and between __bss_start and __bss_end__.
103 	 */
104 	 movhi	r5, %hi(__bss_start)
105 	 ori	r5, r5, %lo(__bss_start)
106 	 movhi	r6, %hi(__bss_end__)
107 	 ori	r6, r6, %lo(__bss_end__)
108 	 beq	r5, r6, 5f
109 
110 4:	stwio	r0, 0(r5)
111 	 addi	r5, r5, 4
112 	 bne	r5, r6, 4b
113 5:
114 
115 	/* JUMP TO RELOC ADDR */
116 	movhi	r4, %hi(_reloc)
117 	ori	r4, r4, %lo(_reloc)
118 	jmp	r4
119 _reloc:
120 
121 	/* COPY EXCEPTION TRAMPOLINE -- copy the tramp to the
122 	 * exception address. Define CONFIG_ROM_STUBS to prevent
123 	 * the copy (e.g. exception in flash or in other
124 	 * softare/firmware component).
125 	 */
126 #if !defined(CONFIG_ROM_STUBS)
127 	movhi	r4, %hi(_except_start)
128 	ori	r4, r4, %lo(_except_start)
129 	movhi	r5, %hi(_except_end)
130 	ori	r5, r5, %lo(_except_end)
131 	movhi	r6, %hi(CONFIG_SYS_EXCEPTION_ADDR)
132 	ori	r6, r6, %lo(CONFIG_SYS_EXCEPTION_ADDR)
133 	beq	r4, r6, 7f	/* Skip if at proper addr */
134 
135 6:	ldwio	r7, 0(r4)
136 	stwio	r7, 0(r6)
137 	addi	r4, r4, 4
138 	addi	r6, r6, 4
139 	bne	r4, r5, 6b
140 7:
141 #endif
142 
143 	/* STACK INIT -- zero top two words for call back chain.
144 	 */
145 	movhi	sp, %hi(CONFIG_SYS_INIT_SP)
146 	ori	sp, sp, %lo(CONFIG_SYS_INIT_SP)
147 	addi	sp, sp, -8
148 	stw	r0, 0(sp)
149 	stw	r0, 4(sp)
150 	mov	fp, sp
151 
152 	/*
153 	 * Call board_init -- never returns
154 	 */
155 	movhi	r4, %hi(board_init@h)
156 	ori	r4, r4, %lo(board_init@h)
157 	callr	r4
158 
159 	/* NEVER RETURNS -- but branch to the _start just
160 	 * in case ;-)
161 	 */
162 	br	_start
163 
164 
165 /*
166  * dly_clks -- Nios2 (like Nios1) doesn't have a timebase in
167  * the core. For simple delay loops, we do our best by counting
168  * instruction cycles.
169  *
170  * Instruction performance varies based on the core. For cores
171  * with icache and static/dynamic branch prediction (II/f, II/s):
172  *
173  *	Normal ALU (e.g. add, cmp, etc):	1 cycle
174  *	Branch (correctly predicted, taken):	2 cycles
175  *	Negative offset is predicted (II/s).
176  *
177  * For cores without icache and no branch prediction (II/e):
178  *
179  *	Normal ALU (e.g. add, cmp, etc):	6 cycles
180  *	Branch (no prediction):			6 cycles
181  *
182  * For simplicity, if an instruction cache is implemented we
183  * assume II/f or II/s. Otherwise, we use the II/e.
184  *
185  */
186 	.globl dly_clks
187 
188 dly_clks:
189 
190 #if (CONFIG_SYS_ICACHE_SIZE > 0)
191 	subi	r4, r4, 3		/* 3 clocks/loop	*/
192 #else
193 	subi	r4, r4, 12		/* 12 clocks/loop	*/
194 #endif
195 	bge	r4, r0, dly_clks
196 	ret
197 
198 	.data
199 	.globl	version_string
200 
201 version_string:
202 	.ascii U_BOOT_VERSION_STRING, "\0"
203