xref: /openbmc/u-boot/arch/arm/cpu/armv8/cache.S (revision fbe44dd1)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
12#include <asm/macro.h>
13#include <asm/system.h>
14#include <linux/linkage.h>
15
16/*
17 * void __asm_dcache_level(level)
18 *
19 * flush or invalidate one level cache.
20 *
21 * x0: cache level
22 * x1: 0 clean & invalidate, 1 invalidate only
23 * x2~x9: clobbered
24 */
25ENTRY(__asm_dcache_level)
26	lsl	x12, x0, #1
27	msr	csselr_el1, x12		/* select cache level */
28	isb				/* sync change of cssidr_el1 */
29	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
30	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
31	add	x2, x2, #4		/* x2 <- log2(cache line size) */
32	mov	x3, #0x3ff
33	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
34	clz	w5, w3			/* bit position of #ways */
35	mov	x4, #0x7fff
36	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
37	/* x12 <- cache level << 1 */
38	/* x2 <- line length offset */
39	/* x3 <- number of cache ways - 1 */
40	/* x4 <- number of cache sets - 1 */
41	/* x5 <- bit position of #ways */
42
43loop_set:
44	mov	x6, x3			/* x6 <- working copy of #ways */
45loop_way:
46	lsl	x7, x6, x5
47	orr	x9, x12, x7		/* map way and level to cisw value */
48	lsl	x7, x4, x2
49	orr	x9, x9, x7		/* map set number to cisw value */
50	tbz	w1, #0, 1f
51	dc	isw, x9
52	b	2f
531:	dc	cisw, x9		/* clean & invalidate by set/way */
542:	subs	x6, x6, #1		/* decrement the way */
55	b.ge	loop_way
56	subs	x4, x4, #1		/* decrement the set */
57	b.ge	loop_set
58
59	ret
60ENDPROC(__asm_dcache_level)
61
62/*
63 * void __asm_flush_dcache_all(int invalidate_only)
64 *
65 * x0: 0 clean & invalidate, 1 invalidate only
66 *
67 * flush or invalidate all data cache by SET/WAY.
68 */
69ENTRY(__asm_dcache_all)
70	mov	x1, x0
71	dsb	sy
72	mrs	x10, clidr_el1		/* read clidr_el1 */
73	lsr	x11, x10, #24
74	and	x11, x11, #0x7		/* x11 <- loc */
75	cbz	x11, finished		/* if loc is 0, exit */
76	mov	x15, lr
77	mov	x0, #0			/* start flush at cache level 0 */
78	/* x0  <- cache level */
79	/* x10 <- clidr_el1 */
80	/* x11 <- loc */
81	/* x15 <- return address */
82
83loop_level:
84	lsl	x12, x0, #1
85	add	x12, x12, x0		/* x0 <- tripled cache level */
86	lsr	x12, x10, x12
87	and	x12, x12, #7		/* x12 <- cache type */
88	cmp	x12, #2
89	b.lt	skip			/* skip if no cache or icache */
90	bl	__asm_dcache_level	/* x1 = 0 flush, 1 invalidate */
91skip:
92	add	x0, x0, #1		/* increment cache level */
93	cmp	x11, x0
94	b.gt	loop_level
95
96	mov	x0, #0
97	msr	csselr_el1, x0		/* restore csselr_el1 */
98	dsb	sy
99	isb
100	mov	lr, x15
101
102finished:
103	ret
104ENDPROC(__asm_dcache_all)
105
106ENTRY(__asm_flush_dcache_all)
107	mov	x0, #0
108	b	__asm_dcache_all
109ENDPROC(__asm_flush_dcache_all)
110
111ENTRY(__asm_invalidate_dcache_all)
112	mov	x0, #0x1
113	b	__asm_dcache_all
114ENDPROC(__asm_invalidate_dcache_all)
115
116/*
117 * void __asm_flush_dcache_range(start, end)
118 *
119 * clean & invalidate data cache in the range
120 *
121 * x0: start address
122 * x1: end address
123 */
124ENTRY(__asm_flush_dcache_range)
125	mrs	x3, ctr_el0
126	lsr	x3, x3, #16
127	and	x3, x3, #0xf
128	mov	x2, #4
129	lsl	x2, x2, x3		/* cache line size */
130
131	/* x2 <- minimal cache line size in cache system */
132	sub	x3, x2, #1
133	bic	x0, x0, x3
1341:	dc	civac, x0	/* clean & invalidate data or unified cache */
135	add	x0, x0, x2
136	cmp	x0, x1
137	b.lo	1b
138	dsb	sy
139	ret
140ENDPROC(__asm_flush_dcache_range)
141/*
142 * void __asm_invalidate_dcache_range(start, end)
143 *
144 * invalidate data cache in the range
145 *
146 * x0: start address
147 * x1: end address
148 */
149ENTRY(__asm_invalidate_dcache_range)
150	mrs	x3, ctr_el0
151	ubfm	x3, x3, #16, #19
152	mov	x2, #4
153	lsl	x2, x2, x3		/* cache line size */
154
155	/* x2 <- minimal cache line size in cache system */
156	sub	x3, x2, #1
157	bic	x0, x0, x3
1581:	dc	ivac, x0	/* invalidate data or unified cache */
159	add	x0, x0, x2
160	cmp	x0, x1
161	b.lo	1b
162	dsb	sy
163	ret
164ENDPROC(__asm_invalidate_dcache_range)
165
166/*
167 * void __asm_invalidate_icache_all(void)
168 *
169 * invalidate all tlb entries.
170 */
171ENTRY(__asm_invalidate_icache_all)
172	ic	ialluis
173	isb	sy
174	ret
175ENDPROC(__asm_invalidate_icache_all)
176
177ENTRY(__asm_invalidate_l3_dcache)
178	mov	x0, #0			/* return status as success */
179	ret
180ENDPROC(__asm_invalidate_l3_dcache)
181	.weak	__asm_invalidate_l3_dcache
182
183ENTRY(__asm_flush_l3_dcache)
184	mov	x0, #0			/* return status as success */
185	ret
186ENDPROC(__asm_flush_l3_dcache)
187	.weak	__asm_flush_l3_dcache
188
189ENTRY(__asm_invalidate_l3_icache)
190	mov	x0, #0			/* return status as success */
191	ret
192ENDPROC(__asm_invalidate_l3_icache)
193	.weak	__asm_invalidate_l3_icache
194
195/*
196 * void __asm_switch_ttbr(ulong new_ttbr)
197 *
198 * Safely switches to a new page table.
199 */
200ENTRY(__asm_switch_ttbr)
201	/* x2 = SCTLR (alive throghout the function) */
202	switch_el x4, 3f, 2f, 1f
2033:	mrs	x2, sctlr_el3
204	b	0f
2052:	mrs	x2, sctlr_el2
206	b	0f
2071:	mrs	x2, sctlr_el1
2080:
209
210	/* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
211	movn	x1, #(CR_M | CR_C | CR_I)
212	and	x1, x2, x1
213	switch_el x4, 3f, 2f, 1f
2143:	msr	sctlr_el3, x1
215	b	0f
2162:	msr	sctlr_el2, x1
217	b	0f
2181:	msr	sctlr_el1, x1
2190:	isb
220
221	/* This call only clobbers x30 (lr) and x9 (unused) */
222	mov	x3, x30
223	bl	__asm_invalidate_tlb_all
224
225	/* From here on we're running safely with caches disabled */
226
227	/* Set TTBR to our first argument */
228	switch_el x4, 3f, 2f, 1f
2293:	msr	ttbr0_el3, x0
230	b	0f
2312:	msr	ttbr0_el2, x0
232	b	0f
2331:	msr	ttbr0_el1, x0
2340:	isb
235
236	/* Restore original SCTLR and thus enable caches again */
237	switch_el x4, 3f, 2f, 1f
2383:	msr	sctlr_el3, x2
239	b	0f
2402:	msr	sctlr_el2, x2
241	b	0f
2421:	msr	sctlr_el1, x2
2430:	isb
244
245	ret	x3
246ENDPROC(__asm_switch_ttbr)
247