xref: /openbmc/linux/arch/arm64/mm/cache.S (revision d8bcaabe)
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/errno.h>
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
24#include <asm/cpufeature.h>
25#include <asm/alternative.h>
26#include <asm/asm-uaccess.h>
27
28/*
29 *	flush_icache_range(start,end)
30 *
31 *	Ensure that the I and D caches are coherent within specified region.
32 *	This is typically used when code has been written to a memory region,
33 *	and will be executed.
34 *
35 *	- start   - virtual start address of region
36 *	- end     - virtual end address of region
37 */
38ENTRY(flush_icache_range)
39	/* FALLTHROUGH */
40
41/*
42 *	__flush_cache_user_range(start,end)
43 *
44 *	Ensure that the I and D caches are coherent within specified region.
45 *	This is typically used when code has been written to a memory region,
46 *	and will be executed.
47 *
48 *	- start   - virtual start address of region
49 *	- end     - virtual end address of region
50 */
51ENTRY(__flush_cache_user_range)
52	uaccess_ttbr0_enable x2, x3
53	dcache_line_size x2, x3
54	sub	x3, x2, #1
55	bic	x4, x0, x3
561:
57user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
58	add	x4, x4, x2
59	cmp	x4, x1
60	b.lo	1b
61	dsb	ish
62
63	icache_line_size x2, x3
64	sub	x3, x2, #1
65	bic	x4, x0, x3
661:
67USER(9f, ic	ivau, x4	)		// invalidate I line PoU
68	add	x4, x4, x2
69	cmp	x4, x1
70	b.lo	1b
71	dsb	ish
72	isb
73	mov	x0, #0
741:
75	uaccess_ttbr0_disable x1
76	ret
779:
78	mov	x0, #-EFAULT
79	b	1b
80ENDPROC(flush_icache_range)
81ENDPROC(__flush_cache_user_range)
82
83/*
84 *	__flush_dcache_area(kaddr, size)
85 *
86 *	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
87 *	are cleaned and invalidated to the PoC.
88 *
89 *	- kaddr   - kernel address
90 *	- size    - size in question
91 */
92ENTRY(__flush_dcache_area)
93	dcache_by_line_op civac, sy, x0, x1, x2, x3
94	ret
95ENDPIPROC(__flush_dcache_area)
96
97/*
98 *	__clean_dcache_area_pou(kaddr, size)
99 *
100 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
101 * 	are cleaned to the PoU.
102 *
103 *	- kaddr   - kernel address
104 *	- size    - size in question
105 */
106ENTRY(__clean_dcache_area_pou)
107	dcache_by_line_op cvau, ish, x0, x1, x2, x3
108	ret
109ENDPROC(__clean_dcache_area_pou)
110
111/*
112 *	__inval_dcache_area(kaddr, size)
113 *
114 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
115 * 	are invalidated. Any partial lines at the ends of the interval are
116 *	also cleaned to PoC to prevent data loss.
117 *
118 *	- kaddr   - kernel address
119 *	- size    - size in question
120 */
121ENTRY(__inval_dcache_area)
122	/* FALLTHROUGH */
123
124/*
125 *	__dma_inv_area(start, size)
126 *	- start   - virtual start address of region
127 *	- size    - size in question
128 */
129__dma_inv_area:
130	add	x1, x1, x0
131	dcache_line_size x2, x3
132	sub	x3, x2, #1
133	tst	x1, x3				// end cache line aligned?
134	bic	x1, x1, x3
135	b.eq	1f
136	dc	civac, x1			// clean & invalidate D / U line
1371:	tst	x0, x3				// start cache line aligned?
138	bic	x0, x0, x3
139	b.eq	2f
140	dc	civac, x0			// clean & invalidate D / U line
141	b	3f
1422:	dc	ivac, x0			// invalidate D / U line
1433:	add	x0, x0, x2
144	cmp	x0, x1
145	b.lo	2b
146	dsb	sy
147	ret
148ENDPIPROC(__inval_dcache_area)
149ENDPROC(__dma_inv_area)
150
151/*
152 *	__clean_dcache_area_poc(kaddr, size)
153 *
154 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
155 * 	are cleaned to the PoC.
156 *
157 *	- kaddr   - kernel address
158 *	- size    - size in question
159 */
160ENTRY(__clean_dcache_area_poc)
161	/* FALLTHROUGH */
162
163/*
164 *	__dma_clean_area(start, size)
165 *	- start   - virtual start address of region
166 *	- size    - size in question
167 */
168__dma_clean_area:
169	dcache_by_line_op cvac, sy, x0, x1, x2, x3
170	ret
171ENDPIPROC(__clean_dcache_area_poc)
172ENDPROC(__dma_clean_area)
173
174/*
175 *	__clean_dcache_area_pop(kaddr, size)
176 *
177 * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
178 * 	are cleaned to the PoP.
179 *
180 *	- kaddr   - kernel address
181 *	- size    - size in question
182 */
183ENTRY(__clean_dcache_area_pop)
184	dcache_by_line_op cvap, sy, x0, x1, x2, x3
185	ret
186ENDPIPROC(__clean_dcache_area_pop)
187
188/*
189 *	__dma_flush_area(start, size)
190 *
191 *	clean & invalidate D / U line
192 *
193 *	- start   - virtual start address of region
194 *	- size    - size in question
195 */
196ENTRY(__dma_flush_area)
197	dcache_by_line_op civac, sy, x0, x1, x2, x3
198	ret
199ENDPIPROC(__dma_flush_area)
200
201/*
202 *	__dma_map_area(start, size, dir)
203 *	- start	- kernel virtual start address
204 *	- size	- size of region
205 *	- dir	- DMA direction
206 */
207ENTRY(__dma_map_area)
208	cmp	w2, #DMA_FROM_DEVICE
209	b.eq	__dma_inv_area
210	b	__dma_clean_area
211ENDPIPROC(__dma_map_area)
212
213/*
214 *	__dma_unmap_area(start, size, dir)
215 *	- start	- kernel virtual start address
216 *	- size	- size of region
217 *	- dir	- DMA direction
218 */
219ENTRY(__dma_unmap_area)
220	cmp	w2, #DMA_TO_DEVICE
221	b.ne	__dma_inv_area
222	ret
223ENDPIPROC(__dma_unmap_area)
224