xref: /openbmc/linux/arch/arm64/mm/cache.S (revision adb57164)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Cache maintenance
4 *
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9#include <linux/errno.h>
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/assembler.h>
13#include <asm/cpufeature.h>
14#include <asm/alternative.h>
15#include <asm/asm-uaccess.h>
16
17/*
18 *	flush_icache_range(start,end)
19 *
20 *	Ensure that the I and D caches are coherent within specified region.
21 *	This is typically used when code has been written to a memory region,
22 *	and will be executed.
23 *
24 *	- start   - virtual start address of region
25 *	- end     - virtual end address of region
26 */
27SYM_FUNC_START(__flush_icache_range)
28	/* FALLTHROUGH */
29
30/*
31 *	__flush_cache_user_range(start,end)
32 *
33 *	Ensure that the I and D caches are coherent within specified region.
34 *	This is typically used when code has been written to a memory region,
35 *	and will be executed.
36 *
37 *	- start   - virtual start address of region
38 *	- end     - virtual end address of region
39 */
40SYM_FUNC_START(__flush_cache_user_range)
41	uaccess_ttbr0_enable x2, x3, x4
42alternative_if ARM64_HAS_CACHE_IDC
43	dsb	ishst
44	b	7f
45alternative_else_nop_endif
46	dcache_line_size x2, x3
47	sub	x3, x2, #1
48	bic	x4, x0, x3
491:
50user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
51	add	x4, x4, x2
52	cmp	x4, x1
53	b.lo	1b
54	dsb	ish
55
567:
57alternative_if ARM64_HAS_CACHE_DIC
58	isb
59	b	8f
60alternative_else_nop_endif
61	invalidate_icache_by_line x0, x1, x2, x3, 9f
628:	mov	x0, #0
631:
64	uaccess_ttbr0_disable x1, x2
65	ret
669:
67	mov	x0, #-EFAULT
68	b	1b
69SYM_FUNC_END(__flush_icache_range)
70SYM_FUNC_END(__flush_cache_user_range)
71
72/*
73 *	invalidate_icache_range(start,end)
74 *
75 *	Ensure that the I cache is invalid within specified region.
76 *
77 *	- start   - virtual start address of region
78 *	- end     - virtual end address of region
79 */
80SYM_FUNC_START(invalidate_icache_range)
81alternative_if ARM64_HAS_CACHE_DIC
82	mov	x0, xzr
83	isb
84	ret
85alternative_else_nop_endif
86
87	uaccess_ttbr0_enable x2, x3, x4
88
89	invalidate_icache_by_line x0, x1, x2, x3, 2f
90	mov	x0, xzr
911:
92	uaccess_ttbr0_disable x1, x2
93	ret
942:
95	mov	x0, #-EFAULT
96	b	1b
97SYM_FUNC_END(invalidate_icache_range)
98
99/*
100 *	__flush_dcache_area(kaddr, size)
101 *
102 *	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
103 *	are cleaned and invalidated to the PoC.
104 *
105 *	- kaddr   - kernel address
106 *	- size    - size in question
107 */
108SYM_FUNC_START_PI(__flush_dcache_area)
109	dcache_by_line_op civac, sy, x0, x1, x2, x3
110	ret
111SYM_FUNC_END_PI(__flush_dcache_area)
112
113/*
114 *	__clean_dcache_area_pou(kaddr, size)
115 *
116 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
117 * 	are cleaned to the PoU.
118 *
119 *	- kaddr   - kernel address
120 *	- size    - size in question
121 */
122SYM_FUNC_START(__clean_dcache_area_pou)
123alternative_if ARM64_HAS_CACHE_IDC
124	dsb	ishst
125	ret
126alternative_else_nop_endif
127	dcache_by_line_op cvau, ish, x0, x1, x2, x3
128	ret
129SYM_FUNC_END(__clean_dcache_area_pou)
130
131/*
132 *	__inval_dcache_area(kaddr, size)
133 *
134 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
135 * 	are invalidated. Any partial lines at the ends of the interval are
136 *	also cleaned to PoC to prevent data loss.
137 *
138 *	- kaddr   - kernel address
139 *	- size    - size in question
140 */
141SYM_FUNC_START_LOCAL(__dma_inv_area)
142SYM_FUNC_START_PI(__inval_dcache_area)
143	/* FALLTHROUGH */
144
145/*
146 *	__dma_inv_area(start, size)
147 *	- start   - virtual start address of region
148 *	- size    - size in question
149 */
150	add	x1, x1, x0
151	dcache_line_size x2, x3
152	sub	x3, x2, #1
153	tst	x1, x3				// end cache line aligned?
154	bic	x1, x1, x3
155	b.eq	1f
156	dc	civac, x1			// clean & invalidate D / U line
1571:	tst	x0, x3				// start cache line aligned?
158	bic	x0, x0, x3
159	b.eq	2f
160	dc	civac, x0			// clean & invalidate D / U line
161	b	3f
1622:	dc	ivac, x0			// invalidate D / U line
1633:	add	x0, x0, x2
164	cmp	x0, x1
165	b.lo	2b
166	dsb	sy
167	ret
168SYM_FUNC_END_PI(__inval_dcache_area)
169SYM_FUNC_END(__dma_inv_area)
170
171/*
172 *	__clean_dcache_area_poc(kaddr, size)
173 *
174 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
175 * 	are cleaned to the PoC.
176 *
177 *	- kaddr   - kernel address
178 *	- size    - size in question
179 */
180SYM_FUNC_START_LOCAL(__dma_clean_area)
181SYM_FUNC_START_PI(__clean_dcache_area_poc)
182	/* FALLTHROUGH */
183
184/*
185 *	__dma_clean_area(start, size)
186 *	- start   - virtual start address of region
187 *	- size    - size in question
188 */
189	dcache_by_line_op cvac, sy, x0, x1, x2, x3
190	ret
191SYM_FUNC_END_PI(__clean_dcache_area_poc)
192SYM_FUNC_END(__dma_clean_area)
193
194/*
195 *	__clean_dcache_area_pop(kaddr, size)
196 *
197 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
198 * 	are cleaned to the PoP.
199 *
200 *	- kaddr   - kernel address
201 *	- size    - size in question
202 */
203SYM_FUNC_START_PI(__clean_dcache_area_pop)
204	alternative_if_not ARM64_HAS_DCPOP
205	b	__clean_dcache_area_poc
206	alternative_else_nop_endif
207	dcache_by_line_op cvap, sy, x0, x1, x2, x3
208	ret
209SYM_FUNC_END_PI(__clean_dcache_area_pop)
210
211/*
212 *	__dma_flush_area(start, size)
213 *
214 *	clean & invalidate D / U line
215 *
216 *	- start   - virtual start address of region
217 *	- size    - size in question
218 */
219SYM_FUNC_START_PI(__dma_flush_area)
220	dcache_by_line_op civac, sy, x0, x1, x2, x3
221	ret
222SYM_FUNC_END_PI(__dma_flush_area)
223
224/*
225 *	__dma_map_area(start, size, dir)
226 *	- start	- kernel virtual start address
227 *	- size	- size of region
228 *	- dir	- DMA direction
229 */
230SYM_FUNC_START_PI(__dma_map_area)
231	cmp	w2, #DMA_FROM_DEVICE
232	b.eq	__dma_inv_area
233	b	__dma_clean_area
234SYM_FUNC_END_PI(__dma_map_area)
235
236/*
237 *	__dma_unmap_area(start, size, dir)
238 *	- start	- kernel virtual start address
239 *	- size	- size of region
240 *	- dir	- DMA direction
241 */
242SYM_FUNC_START_PI(__dma_unmap_area)
243	cmp	w2, #DMA_TO_DEVICE
244	b.ne	__dma_inv_area
245	ret
246SYM_FUNC_END_PI(__dma_unmap_area)
247