xref: /openbmc/u-boot/arch/mips/lib/cache_init.S (revision 4c0411eb)
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 *  Cache-handling routined for MIPS CPUs
4 *
5 *  Copyright (c) 2003	Wolfgang Denk <wd@denx.de>
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <asm/asm.h>
11#include <asm/regdef.h>
12#include <asm/mipsregs.h>
13#include <asm/addrspace.h>
14#include <asm/cacheops.h>
15#include <asm/cm.h>
16
17	.macro	f_fill64 dst, offset, val
18	LONG_S	\val, (\offset +  0 * LONGSIZE)(\dst)
19	LONG_S	\val, (\offset +  1 * LONGSIZE)(\dst)
20	LONG_S	\val, (\offset +  2 * LONGSIZE)(\dst)
21	LONG_S	\val, (\offset +  3 * LONGSIZE)(\dst)
22	LONG_S	\val, (\offset +  4 * LONGSIZE)(\dst)
23	LONG_S	\val, (\offset +  5 * LONGSIZE)(\dst)
24	LONG_S	\val, (\offset +  6 * LONGSIZE)(\dst)
25	LONG_S	\val, (\offset +  7 * LONGSIZE)(\dst)
26#if LONGSIZE == 4
27	LONG_S	\val, (\offset +  8 * LONGSIZE)(\dst)
28	LONG_S	\val, (\offset +  9 * LONGSIZE)(\dst)
29	LONG_S	\val, (\offset + 10 * LONGSIZE)(\dst)
30	LONG_S	\val, (\offset + 11 * LONGSIZE)(\dst)
31	LONG_S	\val, (\offset + 12 * LONGSIZE)(\dst)
32	LONG_S	\val, (\offset + 13 * LONGSIZE)(\dst)
33	LONG_S	\val, (\offset + 14 * LONGSIZE)(\dst)
34	LONG_S	\val, (\offset + 15 * LONGSIZE)(\dst)
35#endif
36	.endm
37
38	.macro cache_loop	curr, end, line_sz, op
3910:	cache		\op, 0(\curr)
40	PTR_ADDU	\curr, \curr, \line_sz
41	bne		\curr, \end, 10b
42	.endm
43
44	.macro	l1_info		sz, line_sz, off
45	.set	push
46	.set	noat
47
48	mfc0	$1, CP0_CONFIG, 1
49
50	/* detect line size */
51	srl	\line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF
52	andi	\line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF)
53	move	\sz, zero
54	beqz	\line_sz, 10f
55	li	\sz, 2
56	sllv	\line_sz, \sz, \line_sz
57
58	/* detect associativity */
59	srl	\sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF
60	andi	\sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF)
61	addiu	\sz, \sz, 1
62
63	/* sz *= line_sz */
64	mul	\sz, \sz, \line_sz
65
66	/* detect log32(sets) */
67	srl	$1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF
68	andi	$1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF)
69	addiu	$1, $1, 1
70	andi	$1, $1, 0x7
71
72	/* sz <<= log32(sets) */
73	sllv	\sz, \sz, $1
74
75	/* sz *= 32 */
76	li	$1, 32
77	mul	\sz, \sz, $1
7810:
79	.set	pop
80	.endm
81
82/*
83 * mips_cache_reset - low level initialisation of the primary caches
84 *
85 * This routine initialises the primary caches to ensure that they have good
86 * parity.  It must be called by the ROM before any cached locations are used
87 * to prevent the possibility of data with bad parity being written to memory.
88 *
89 * To initialise the instruction cache it is essential that a source of data
90 * with good parity is available. This routine will initialise an area of
91 * memory starting at location zero to be used as a source of parity.
92 *
93 * Note that this function does not follow the standard calling convention &
94 * may clobber typically callee-saved registers.
95 *
96 * RETURNS: N/A
97 *
98 */
99#define R_RETURN	s0
100#define R_IC_SIZE	s1
101#define R_IC_LINE	s2
102#define R_DC_SIZE	s3
103#define R_DC_LINE	s4
104#define R_L2_SIZE	s5
105#define R_L2_LINE	s6
106#define R_L2_BYPASSED	s7
107#define R_L2_L2C	t8
108LEAF(mips_cache_reset)
109	move	R_RETURN, ra
110
111#ifdef CONFIG_MIPS_L2_CACHE
112	/*
113	 * For there to be an L2 present, Config2 must be present. If it isn't
114	 * then we proceed knowing there's no L2 cache.
115	 */
116	move	R_L2_SIZE, zero
117	move	R_L2_LINE, zero
118	move	R_L2_BYPASSED, zero
119	move	R_L2_L2C, zero
120	mfc0	t0, CP0_CONFIG, 1
121	bgez	t0, l2_probe_done
122
123	/*
124	 * From MIPSr6 onwards the L2 cache configuration might not be reported
125	 * by Config2. The Config5.L2C bit indicates whether this is the case,
126	 * and if it is then we need knowledge of where else to look. For cores
127	 * from Imagination Technologies this is a CM GCR.
128	 */
129# if __mips_isa_rev >= 6
130	/* Check that Config5 exists */
131	mfc0	t0, CP0_CONFIG, 2
132	bgez	t0, l2_probe_cop0
133	mfc0	t0, CP0_CONFIG, 3
134	bgez	t0, l2_probe_cop0
135	mfc0	t0, CP0_CONFIG, 4
136	bgez	t0, l2_probe_cop0
137
138	/* Check Config5.L2C is set */
139	mfc0	t0, CP0_CONFIG, 5
140	and	R_L2_L2C, t0, MIPS_CONF5_L2C
141	beqz	R_L2_L2C, l2_probe_cop0
142
143	/* Config5.L2C is set */
144#  ifdef CONFIG_MIPS_CM
145	/* The CM will provide L2 configuration */
146	PTR_LI	t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
147	lw	t1, GCR_L2_CONFIG(t0)
148	bgez	t1, l2_probe_done
149
150	ext	R_L2_LINE, t1, \
151		GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
152	beqz	R_L2_LINE, l2_probe_done
153	li	t2, 2
154	sllv	R_L2_LINE, t2, R_L2_LINE
155
156	ext	t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
157	addiu	t2, t2, 1
158	mul	R_L2_SIZE, R_L2_LINE, t2
159
160	ext	t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
161	sllv	R_L2_SIZE, R_L2_SIZE, t2
162	li	t2, 64
163	mul	R_L2_SIZE, R_L2_SIZE, t2
164
165	/* Bypass the L2 cache so that we can init the L1s early */
166	or	t1, t1, GCR_L2_CONFIG_BYPASS
167	sw	t1, GCR_L2_CONFIG(t0)
168	sync
169	li	R_L2_BYPASSED, 1
170
171	/* Zero the L2 tag registers */
172	sw	zero, GCR_L2_TAG_ADDR(t0)
173	sw	zero, GCR_L2_TAG_ADDR_UPPER(t0)
174	sw	zero, GCR_L2_TAG_STATE(t0)
175	sw	zero, GCR_L2_TAG_STATE_UPPER(t0)
176	sw	zero, GCR_L2_DATA(t0)
177	sw	zero, GCR_L2_DATA_UPPER(t0)
178	sync
179#  else
180	/* We don't know how to retrieve L2 configuration on this system */
181#  endif
182	b	l2_probe_done
183# endif
184
185	/*
186	 * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
187	 * cache configuration from the cop0 Config2 register.
188	 */
189l2_probe_cop0:
190	mfc0	t0, CP0_CONFIG, 2
191
192	srl	R_L2_LINE, t0, MIPS_CONF2_SL_SHF
193	andi	R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
194	beqz	R_L2_LINE, l2_probe_done
195	li	t1, 2
196	sllv	R_L2_LINE, t1, R_L2_LINE
197
198	srl	t1, t0, MIPS_CONF2_SA_SHF
199	andi	t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
200	addiu	t1, t1, 1
201	mul	R_L2_SIZE, R_L2_LINE, t1
202
203	srl	t1, t0, MIPS_CONF2_SS_SHF
204	andi	t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
205	sllv	R_L2_SIZE, R_L2_SIZE, t1
206	li	t1, 64
207	mul	R_L2_SIZE, R_L2_SIZE, t1
208
209	/* Attempt to bypass the L2 so that we can init the L1s early */
210	or	t0, t0, MIPS_CONF2_L2B
211	mtc0	t0, CP0_CONFIG, 2
212	ehb
213	mfc0	t0, CP0_CONFIG, 2
214	and	R_L2_BYPASSED, t0, MIPS_CONF2_L2B
215
216	/* Zero the L2 tag registers */
217	mtc0	zero, CP0_TAGLO, 4
218	ehb
219l2_probe_done:
220#endif
221
222#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
223	li	R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
224	li	R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
225#else
226	l1_info	R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
227#endif
228
229#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
230	li	R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
231	li	R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
232#else
233	l1_info	R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
234#endif
235
236#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
237
238	/* Determine the largest L1 cache size */
239#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
240#if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
241	li	v0, CONFIG_SYS_ICACHE_SIZE
242#else
243	li	v0, CONFIG_SYS_DCACHE_SIZE
244#endif
245#else
246	move	v0, R_IC_SIZE
247	sltu	t1, R_IC_SIZE, R_DC_SIZE
248	movn	v0, R_DC_SIZE, t1
249#endif
250	/*
251	 * Now clear that much memory starting from zero.
252	 */
253	PTR_LI		a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
254	PTR_ADDU	a1, a0, v0
2552:	PTR_ADDIU	a0, 64
256	f_fill64	a0, -64, zero
257	bne		a0, a1, 2b
258
259#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
260
261#ifdef CONFIG_MIPS_L2_CACHE
262	/*
263	 * If the L2 is bypassed, init the L1 first so that we can execute the
264	 * rest of the cache initialisation using the L1 instruction cache.
265	 */
266	bnez		R_L2_BYPASSED, l1_init
267
268l2_init:
269	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
270	PTR_ADDU	t1, t0, R_L2_SIZE
2711:	cache		INDEX_STORE_TAG_SD, 0(t0)
272	PTR_ADDU	t0, t0, R_L2_LINE
273	bne		t0, t1, 1b
274
275	/*
276	 * If the L2 was bypassed then we already initialised the L1s before
277	 * the L2, so we are now done.
278	 */
279	bnez		R_L2_BYPASSED, l2_unbypass
280#endif
281
282	/*
283	 * The TagLo registers used depend upon the CPU implementation, but the
284	 * architecture requires that it is safe for software to write to both
285	 * TagLo selects 0 & 2 covering supported cases.
286	 */
287l1_init:
288	mtc0		zero, CP0_TAGLO
289	mtc0		zero, CP0_TAGLO, 2
290	ehb
291
292	/*
293	 * The caches are probably in an indeterminate state, so we force good
294	 * parity into them by doing an invalidate for each line. If
295	 * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
296	 * perform a load/fill & a further invalidate for each line, assuming
297	 * that the bottom of RAM (having just been cleared) will generate good
298	 * parity for the cache.
299	 */
300
301	/*
302	 * Initialize the I-cache first,
303	 */
304	blez		R_IC_SIZE, 1f
305	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
306	PTR_ADDU	t1, t0, R_IC_SIZE
307	/* clear tag to invalidate */
308	cache_loop	t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
309#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
310	/* fill once, so data field parity is correct */
311	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
312	cache_loop	t0, t1, R_IC_LINE, FILL
313	/* invalidate again - prudent but not strictly neccessary */
314	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
315	cache_loop	t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
316#endif
317	sync
318
319	/*
320	 * Enable use of the I-cache by setting Config.K0. The code for this
321	 * must be executed from KSEG1. Jump from KSEG0 to KSEG1 to do this.
322	 * Jump back to KSEG0 after caches are enabled and insert an
323	 * instruction hazard barrier.
324	 */
325	PTR_LA		t0, change_k0_cca
326	li		t1, CPHYSADDR(~0)
327	and		t0, t0, t1
328	PTR_LI		t1, CKSEG1
329	or		t0, t0, t1
330	li		a0, CONF_CM_CACHABLE_NONCOHERENT
331	jalr.hb		t0
332
333	/*
334	 * then initialize D-cache.
335	 */
3361:	blez		R_DC_SIZE, 3f
337	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
338	PTR_ADDU	t1, t0, R_DC_SIZE
339	/* clear all tags */
340	cache_loop	t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
341#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
342	/* load from each line (in cached space) */
343	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
3442:	LONG_L		zero, 0(t0)
345	PTR_ADDU	t0, R_DC_LINE
346	bne		t0, t1, 2b
347	/* clear all tags */
348	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
349	cache_loop	t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
350#endif
3513:
352
353#ifdef CONFIG_MIPS_L2_CACHE
354	/* If the L2 isn't bypassed then we're done */
355	beqz		R_L2_BYPASSED, return
356
357	/* The L2 is bypassed - go initialise it */
358	b		l2_init
359
360l2_unbypass:
361# if __mips_isa_rev >= 6
362	beqz		R_L2_L2C, 1f
363
364	li		t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
365	lw		t1, GCR_L2_CONFIG(t0)
366	xor		t1, t1, GCR_L2_CONFIG_BYPASS
367	sw		t1, GCR_L2_CONFIG(t0)
368	sync
369	ehb
370	b		2f
371# endif
3721:	mfc0		t0, CP0_CONFIG, 2
373	xor		t0, t0, MIPS_CONF2_L2B
374	mtc0		t0, CP0_CONFIG, 2
375	ehb
376
3772:
378# ifdef CONFIG_MIPS_CM
379	/* Config3 must exist for a CM to be present */
380	mfc0		t0, CP0_CONFIG, 1
381	bgez		t0, 2f
382	mfc0		t0, CP0_CONFIG, 2
383	bgez		t0, 2f
384
385	/* Check Config3.CMGCR to determine CM presence */
386	mfc0		t0, CP0_CONFIG, 3
387	and		t0, t0, MIPS_CONF3_CMGCR
388	beqz		t0, 2f
389
390	/* Change Config.K0 to a coherent CCA */
391	PTR_LA		t0, change_k0_cca
392	li		a0, CONF_CM_CACHABLE_COW
393	jalr		t0
394
395	/*
396	 * Join the coherent domain such that the caches of this core are kept
397	 * coherent with those of other cores.
398	 */
399	PTR_LI		t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
400	lw		t1, GCR_REV(t0)
401	li		t2, GCR_REV_CM3
402	li		t3, GCR_Cx_COHERENCE_EN
403	bge		t1, t2, 1f
404	li		t3, GCR_Cx_COHERENCE_DOM_EN
4051:	sw		t3, GCR_Cx_COHERENCE(t0)
406	ehb
4072:
408# endif
409#endif
410
411return:
412	/* Ensure all cache operations complete before returning */
413	sync
414	jr	R_RETURN
415	END(mips_cache_reset)
416
417LEAF(change_k0_cca)
418	mfc0		t0, CP0_CONFIG
419#if __mips_isa_rev >= 2
420	ins		t0, a0, 0, 3
421#else
422	xor		a0, a0, t0
423	andi		a0, a0, CONF_CM_CMASK
424	xor		a0, a0, t0
425#endif
426	mtc0		a0, CP0_CONFIG
427
428	jr.hb		ra
429	END(change_k0_cca)
430