xref: /openbmc/u-boot/arch/arm/cpu/armv8/cache.S (revision 9ee16897)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
12#include <asm/macro.h>
13#include <linux/linkage.h>
14
15/*
16 * void __asm_flush_dcache_level(level)
17 *
18 * clean and invalidate one level cache.
19 *
20 * x0: cache level
21 * x1: 0 flush & invalidate, 1 invalidate only
22 * x2~x9: clobbered
23 */
24ENTRY(__asm_flush_dcache_level)
25	lsl	x12, x0, #1
26	msr	csselr_el1, x12		/* select cache level */
27	isb				/* sync change of cssidr_el1 */
28	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
29	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
30	add	x2, x2, #4		/* x2 <- log2(cache line size) */
31	mov	x3, #0x3ff
32	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
33	clz	w5, w3			/* bit position of #ways */
34	mov	x4, #0x7fff
35	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
36	/* x12 <- cache level << 1 */
37	/* x2 <- line length offset */
38	/* x3 <- number of cache ways - 1 */
39	/* x4 <- number of cache sets - 1 */
40	/* x5 <- bit position of #ways */
41
42loop_set:
43	mov	x6, x3			/* x6 <- working copy of #ways */
44loop_way:
45	lsl	x7, x6, x5
46	orr	x9, x12, x7		/* map way and level to cisw value */
47	lsl	x7, x4, x2
48	orr	x9, x9, x7		/* map set number to cisw value */
49	tbz	w1, #0, 1f
50	dc	isw, x9
51	b	2f
521:	dc	cisw, x9		/* clean & invalidate by set/way */
532:	subs	x6, x6, #1		/* decrement the way */
54	b.ge	loop_way
55	subs	x4, x4, #1		/* decrement the set */
56	b.ge	loop_set
57
58	ret
59ENDPROC(__asm_flush_dcache_level)
60
61/*
62 * void __asm_flush_dcache_all(int invalidate_only)
63 *
64 * x0: 0 flush & invalidate, 1 invalidate only
65 *
66 * clean and invalidate all data cache by SET/WAY.
67 */
68ENTRY(__asm_dcache_all)
69	mov	x1, x0
70	dsb	sy
71	mrs	x10, clidr_el1		/* read clidr_el1 */
72	lsr	x11, x10, #24
73	and	x11, x11, #0x7		/* x11 <- loc */
74	cbz	x11, finished		/* if loc is 0, exit */
75	mov	x15, lr
76	mov	x0, #0			/* start flush at cache level 0 */
77	/* x0  <- cache level */
78	/* x10 <- clidr_el1 */
79	/* x11 <- loc */
80	/* x15 <- return address */
81
82loop_level:
83	lsl	x12, x0, #1
84	add	x12, x12, x0		/* x0 <- tripled cache level */
85	lsr	x12, x10, x12
86	and	x12, x12, #7		/* x12 <- cache type */
87	cmp	x12, #2
88	b.lt	skip			/* skip if no cache or icache */
89	bl	__asm_flush_dcache_level	/* x1 = 0 flush, 1 invalidate */
90skip:
91	add	x0, x0, #1		/* increment cache level */
92	cmp	x11, x0
93	b.gt	loop_level
94
95	mov	x0, #0
96	msr	csselr_el1, x0		/* restore csselr_el1 */
97	dsb	sy
98	isb
99	mov	lr, x15
100
101finished:
102	ret
103ENDPROC(__asm_dcache_all)
104
105ENTRY(__asm_flush_dcache_all)
106	mov	x16, lr
107	mov	x0, #0
108	bl	__asm_dcache_all
109	mov	lr, x16
110	ret
111ENDPROC(__asm_flush_dcache_all)
112
113ENTRY(__asm_invalidate_dcache_all)
114	mov	x16, lr
115	mov	x0, #0xffff
116	bl	__asm_dcache_all
117	mov	lr, x16
118	ret
119ENDPROC(__asm_invalidate_dcache_all)
120
121/*
122 * void __asm_flush_dcache_range(start, end)
123 *
124 * clean & invalidate data cache in the range
125 *
126 * x0: start address
127 * x1: end address
128 */
129ENTRY(__asm_flush_dcache_range)
130	mrs	x3, ctr_el0
131	lsr	x3, x3, #16
132	and	x3, x3, #0xf
133	mov	x2, #4
134	lsl	x2, x2, x3		/* cache line size */
135
136	/* x2 <- minimal cache line size in cache system */
137	sub	x3, x2, #1
138	bic	x0, x0, x3
1391:	dc	civac, x0	/* clean & invalidate data or unified cache */
140	add	x0, x0, x2
141	cmp	x0, x1
142	b.lo	1b
143	dsb	sy
144	ret
145ENDPROC(__asm_flush_dcache_range)
146
147/*
148 * void __asm_invalidate_icache_all(void)
149 *
150 * invalidate all tlb entries.
151 */
152ENTRY(__asm_invalidate_icache_all)
153	ic	ialluis
154	isb	sy
155	ret
156ENDPROC(__asm_invalidate_icache_all)
157
158ENTRY(__asm_flush_l3_cache)
159	mov	x0, #0			/* return status as success */
160	ret
161ENDPROC(__asm_flush_l3_cache)
162	.weak	__asm_flush_l3_cache
163