xref: /openbmc/u-boot/arch/nios2/cpu/start.S (revision 44c6e659)
1/*
2 * (C) Copyright 2004, Psyent Corporation <www.psyent.com>
3 * Scott McNutt <smcnutt@psyent.com>
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24#include <asm-offsets.h>
25#include <config.h>
26#include <timestamp.h>
27#include <version.h>
28
29/*************************************************************************
30 * RESTART
31 ************************************************************************/
32
33	.text
34	.global _start
35
36_start:
37	wrctl	status, r0		/* Disable interrupts */
38	/* ICACHE INIT -- only the icache line at the reset address
39	 * is invalidated at reset. So the init must stay within
40	 * the cache line size (8 words). If GERMS is used, we'll
41	 * just be invalidating the cache a second time. If cache
42	 * is not implemented initi behaves as nop.
43	 */
44	ori	r4, r0, %lo(CONFIG_SYS_ICACHELINE_SIZE)
45	movhi	r5, %hi(CONFIG_SYS_ICACHE_SIZE)
46	ori	r5, r5, %lo(CONFIG_SYS_ICACHE_SIZE)
470:	initi	r5
48	sub	r5, r5, r4
49	bgt	r5, r0, 0b
50	br	_except_end	/* Skip the tramp */
51
52	/* EXCEPTION TRAMPOLINE -- the following gets copied
53	 * to the exception address (below), but is otherwise at the
54	 * default exception vector offset (0x0020).
55	 */
56_except_start:
57	movhi	et, %hi(_exception)
58	ori	et, et, %lo(_exception)
59	jmp	et
60_except_end:
61
62	/* INTERRUPTS -- for now, all interrupts masked and globally
63	 * disabled.
64	 */
65	wrctl	ienable, r0		/* All disabled	*/
66
67	/* DCACHE INIT -- if dcache not implemented, initd behaves as
68	 * nop.
69	 */
70	movhi	r4, %hi(CONFIG_SYS_DCACHELINE_SIZE)
71	ori	r4, r4, %lo(CONFIG_SYS_DCACHELINE_SIZE)
72	movhi	r5, %hi(CONFIG_SYS_DCACHE_SIZE)
73	ori	r5, r5, %lo(CONFIG_SYS_DCACHE_SIZE)
74	mov	r6, r0
751:	initd	0(r6)
76	add	r6, r6, r4
77	bltu	r6, r5, 1b
78
79	/* RELOCATE CODE, DATA & COMMAND TABLE -- the following code
80	 * assumes code, data and the command table are all
81	 * contiguous. This lets us relocate everything as a single
82	 * block. Make sure the linker script matches this ;-)
83	 */
84	nextpc	r4
85_cur:	movhi	r5, %hi(_cur - _start)
86	ori	r5, r5, %lo(_cur - _start)
87	sub	r4, r4, r5		/* r4 <- cur _start */
88	mov	r8, r4
89	movhi	r5, %hi(_start)
90	ori	r5, r5, %lo(_start)	/* r5 <- linked _start */
91	beq	r4, r5, 3f
92
93	movhi	r6, %hi(_edata)
94	ori	r6, r6, %lo(_edata)
952:	ldwio	r7, 0(r4)
96	addi	r4, r4, 4
97	stwio	r7, 0(r5)
98	addi	r5, r5, 4
99	bne	r5, r6, 2b
1003:
101
102	/* ZERO BSS/SBSS -- bss and sbss are assumed to be adjacent
103	 * and between __bss_start and __bss_end__.
104	 */
105	 movhi	r5, %hi(__bss_start)
106	 ori	r5, r5, %lo(__bss_start)
107	 movhi	r6, %hi(__bss_end__)
108	 ori	r6, r6, %lo(__bss_end__)
109	 beq	r5, r6, 5f
110
1114:	stwio	r0, 0(r5)
112	 addi	r5, r5, 4
113	 bne	r5, r6, 4b
1145:
115
116	/* JUMP TO RELOC ADDR */
117	movhi	r4, %hi(_reloc)
118	ori	r4, r4, %lo(_reloc)
119	jmp	r4
120_reloc:
121
122	/* COPY EXCEPTION TRAMPOLINE -- copy the tramp to the
123	 * exception address. Define CONFIG_ROM_STUBS to prevent
124	 * the copy (e.g. exception in flash or in other
125	 * softare/firmware component).
126	 */
127#if !defined(CONFIG_ROM_STUBS)
128	movhi	r4, %hi(_except_start)
129	ori	r4, r4, %lo(_except_start)
130	movhi	r5, %hi(_except_end)
131	ori	r5, r5, %lo(_except_end)
132	movhi	r6, %hi(CONFIG_SYS_EXCEPTION_ADDR)
133	ori	r6, r6, %lo(CONFIG_SYS_EXCEPTION_ADDR)
134	beq	r4, r6, 7f	/* Skip if at proper addr */
135
1366:	ldwio	r7, 0(r4)
137	stwio	r7, 0(r6)
138	addi	r4, r4, 4
139	addi	r6, r6, 4
140	bne	r4, r5, 6b
1417:
142#endif
143
144	/* STACK INIT -- zero top two words for call back chain.
145	 */
146	movhi	sp, %hi(CONFIG_SYS_INIT_SP)
147	ori	sp, sp, %lo(CONFIG_SYS_INIT_SP)
148	addi	sp, sp, -8
149	stw	r0, 0(sp)
150	stw	r0, 4(sp)
151	mov	fp, sp
152
153	/*
154	 * Call board_init -- never returns
155	 */
156	movhi	r4, %hi(board_init@h)
157	ori	r4, r4, %lo(board_init@h)
158	callr	r4
159
160	/* NEVER RETURNS -- but branch to the _start just
161	 * in case ;-)
162	 */
163	br	_start
164
165
166/*
167 * dly_clks -- Nios2 (like Nios1) doesn't have a timebase in
168 * the core. For simple delay loops, we do our best by counting
169 * instruction cycles.
170 *
171 * Instruction performance varies based on the core. For cores
172 * with icache and static/dynamic branch prediction (II/f, II/s):
173 *
174 *	Normal ALU (e.g. add, cmp, etc):	1 cycle
175 *	Branch (correctly predicted, taken):	2 cycles
176 *	Negative offset is predicted (II/s).
177 *
178 * For cores without icache and no branch prediction (II/e):
179 *
180 *	Normal ALU (e.g. add, cmp, etc):	6 cycles
181 *	Branch (no prediction):			6 cycles
182 *
183 * For simplicity, if an instruction cache is implemented we
184 * assume II/f or II/s. Otherwise, we use the II/e.
185 *
186 */
187	.globl dly_clks
188
189dly_clks:
190
191#if (CONFIG_SYS_ICACHE_SIZE > 0)
192	subi	r4, r4, 3		/* 3 clocks/loop	*/
193#else
194	subi	r4, r4, 12		/* 12 clocks/loop	*/
195#endif
196	bge	r4, r0, dly_clks
197	ret
198
199
200#if !defined(CONFIG_IDENT_STRING)
201#define CONFIG_IDENT_STRING ""
202#endif
203	.data
204	.globl	version_string
205
206version_string:
207	.ascii U_BOOT_VERSION
208	.ascii " (", U_BOOT_DATE, " - ", U_BOOT_TIME, ")"
209	.ascii CONFIG_IDENT_STRING, "\0"
210