xref: /openbmc/linux/arch/arm/mm/proc-arm946.S (revision 6a143a7c)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S
4 *
5 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6 *
7 *  (Many of cache codes are from proc-arm926.S)
8 */
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <linux/pgtable.h>
12#include <asm/assembler.h>
13#include <asm/hwcap.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/ptrace.h>
16#include "proc-macros.S"
17
18/*
19 * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
20 * comprising 256 lines of 32 bytes (8 words).
21 */
22#define CACHE_DSIZE	(CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */
23#define CACHE_DLINESIZE	32			/* fixed */
24#define CACHE_DSEGMENTS	4			/* fixed */
25#define CACHE_DENTRIES	(CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE)
26#define CACHE_DLIMIT	(CACHE_DSIZE * 4)	/* benchmark needed */
27
28	.text
29/*
30 * cpu_arm946_proc_init()
31 * cpu_arm946_switch_mm()
32 *
33 * These are not required.
34 */
35ENTRY(cpu_arm946_proc_init)
36ENTRY(cpu_arm946_switch_mm)
37	ret	lr
38
39/*
40 * cpu_arm946_proc_fin()
41 */
42ENTRY(cpu_arm946_proc_fin)
43	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
44	bic	r0, r0, #0x00001000		@ i-cache
45	bic	r0, r0, #0x00000004		@ d-cache
46	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
47	ret	lr
48
49/*
50 * cpu_arm946_reset(loc)
51 * Params  : r0 = address to jump to
52 * Notes   : This sets up everything for a reset
53 */
54	.pushsection	.idmap.text, "ax"
55ENTRY(cpu_arm946_reset)
56	mov	ip, #0
57	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
58	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
59	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
60	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
61	bic	ip, ip, #0x00000005		@ .............c.p
62	bic	ip, ip, #0x00001000		@ i-cache
63	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
64	ret	r0
65ENDPROC(cpu_arm946_reset)
66	.popsection
67
68/*
69 * cpu_arm946_do_idle()
70 */
71	.align	5
72ENTRY(cpu_arm946_do_idle)
73	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
74	ret	lr
75
76/*
77 *	flush_icache_all()
78 *
79 *	Unconditionally clean and invalidate the entire icache.
80 */
81ENTRY(arm946_flush_icache_all)
82	mov	r0, #0
83	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
84	ret	lr
85ENDPROC(arm946_flush_icache_all)
86
87/*
88 *	flush_user_cache_all()
89 */
90ENTRY(arm946_flush_user_cache_all)
91	/* FALLTHROUGH */
92
93/*
94 *	flush_kern_cache_all()
95 *
96 *	Clean and invalidate the entire cache.
97 */
98ENTRY(arm946_flush_kern_cache_all)
99	mov	r2, #VM_EXEC
100	mov	ip, #0
101__flush_whole_cache:
102#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
103	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
104#else
105	mov	r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments
1061:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries
1072:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
108	subs	r3, r3, #1 << 4
109	bcs	2b				@ entries n to 0
110	subs	r1, r1, #1 << 29
111	bcs	1b				@ segments 3 to 0
112#endif
113	tst	r2, #VM_EXEC
114	mcrne	p15, 0, ip, c7, c5, 0		@ flush I cache
115	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
116	ret	lr
117
118/*
119 *	flush_user_cache_range(start, end, flags)
120 *
121 *	Clean and invalidate a range of cache entries in the
122 *	specified address range.
123 *
124 *	- start	- start address (inclusive)
125 *	- end	- end address (exclusive)
126 *	- flags	- vm_flags describing address space
127 * (same as arm926)
128 */
129ENTRY(arm946_flush_user_cache_range)
130	mov	ip, #0
131	sub	r3, r1, r0			@ calculate total size
132	cmp	r3, #CACHE_DLIMIT
133	bhs	__flush_whole_cache
134
1351:	tst	r2, #VM_EXEC
136#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
137	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
138	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
139	add	r0, r0, #CACHE_DLINESIZE
140	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
141	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
142	add	r0, r0, #CACHE_DLINESIZE
143#else
144	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
145	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
146	add	r0, r0, #CACHE_DLINESIZE
147	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
148	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
149	add	r0, r0, #CACHE_DLINESIZE
150#endif
151	cmp	r0, r1
152	blo	1b
153	tst	r2, #VM_EXEC
154	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
155	ret	lr
156
157/*
158 *	coherent_kern_range(start, end)
159 *
160 *	Ensure coherency between the Icache and the Dcache in the
161 *	region described by start, end.  If you have non-snooping
162 *	Harvard caches, you need to implement this function.
163 *
164 *	- start	- virtual start address
165 *	- end	- virtual end address
166 */
167ENTRY(arm946_coherent_kern_range)
168	/* FALLTHROUGH */
169
170/*
171 *	coherent_user_range(start, end)
172 *
173 *	Ensure coherency between the Icache and the Dcache in the
174 *	region described by start, end.  If you have non-snooping
175 *	Harvard caches, you need to implement this function.
176 *
177 *	- start	- virtual start address
178 *	- end	- virtual end address
179 * (same as arm926)
180 */
181ENTRY(arm946_coherent_user_range)
182	bic	r0, r0, #CACHE_DLINESIZE - 1
1831:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
184	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
185	add	r0, r0, #CACHE_DLINESIZE
186	cmp	r0, r1
187	blo	1b
188	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
189	mov	r0, #0
190	ret	lr
191
192/*
193 *	flush_kern_dcache_area(void *addr, size_t size)
194 *
195 *	Ensure no D cache aliasing occurs, either with itself or
196 *	the I cache
197 *
198 *	- addr	- kernel address
199 *	- size	- region size
200 * (same as arm926)
201 */
202ENTRY(arm946_flush_kern_dcache_area)
203	add	r1, r0, r1
2041:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
205	add	r0, r0, #CACHE_DLINESIZE
206	cmp	r0, r1
207	blo	1b
208	mov	r0, #0
209	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
210	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
211	ret	lr
212
213/*
214 *	dma_inv_range(start, end)
215 *
216 *	Invalidate (discard) the specified virtual address range.
217 *	May not write back any entries.  If 'start' or 'end'
218 *	are not cache line aligned, those lines must be written
219 *	back.
220 *
221 *	- start	- virtual start address
222 *	- end	- virtual end address
223 * (same as arm926)
224 */
225arm946_dma_inv_range:
226#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
227	tst	r0, #CACHE_DLINESIZE - 1
228	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
229	tst	r1, #CACHE_DLINESIZE - 1
230	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
231#endif
232	bic	r0, r0, #CACHE_DLINESIZE - 1
2331:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
234	add	r0, r0, #CACHE_DLINESIZE
235	cmp	r0, r1
236	blo	1b
237	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
238	ret	lr
239
240/*
241 *	dma_clean_range(start, end)
242 *
243 *	Clean the specified virtual address range.
244 *
245 *	- start	- virtual start address
246 *	- end	- virtual end address
247 *
248 * (same as arm926)
249 */
250arm946_dma_clean_range:
251#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
252	bic	r0, r0, #CACHE_DLINESIZE - 1
2531:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
254	add	r0, r0, #CACHE_DLINESIZE
255	cmp	r0, r1
256	blo	1b
257#endif
258	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
259	ret	lr
260
261/*
262 *	dma_flush_range(start, end)
263 *
264 *	Clean and invalidate the specified virtual address range.
265 *
266 *	- start	- virtual start address
267 *	- end	- virtual end address
268 *
269 * (same as arm926)
270 */
271ENTRY(arm946_dma_flush_range)
272	bic	r0, r0, #CACHE_DLINESIZE - 1
2731:
274#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
275	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
276#else
277	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
278#endif
279	add	r0, r0, #CACHE_DLINESIZE
280	cmp	r0, r1
281	blo	1b
282	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
283	ret	lr
284
285/*
286 *	dma_map_area(start, size, dir)
287 *	- start	- kernel virtual start address
288 *	- size	- size of region
289 *	- dir	- DMA direction
290 */
291ENTRY(arm946_dma_map_area)
292	add	r1, r1, r0
293	cmp	r2, #DMA_TO_DEVICE
294	beq	arm946_dma_clean_range
295	bcs	arm946_dma_inv_range
296	b	arm946_dma_flush_range
297ENDPROC(arm946_dma_map_area)
298
299/*
300 *	dma_unmap_area(start, size, dir)
301 *	- start	- kernel virtual start address
302 *	- size	- size of region
303 *	- dir	- DMA direction
304 */
305ENTRY(arm946_dma_unmap_area)
306	ret	lr
307ENDPROC(arm946_dma_unmap_area)
308
309	.globl	arm946_flush_kern_cache_louis
310	.equ	arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
311
312	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
313	define_cache_functions arm946
314
315ENTRY(cpu_arm946_dcache_clean_area)
316#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
3171:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
318	add	r0, r0, #CACHE_DLINESIZE
319	subs	r1, r1, #CACHE_DLINESIZE
320	bhi	1b
321#endif
322	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
323	ret	lr
324
325	.type	__arm946_setup, #function
326__arm946_setup:
327	mov	r0, #0
328	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
329	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
330	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
331
332	mcr	p15, 0, r0, c6, c3, 0		@ disable memory region 3~7
333	mcr	p15, 0, r0, c6, c4, 0
334	mcr	p15, 0, r0, c6, c5, 0
335	mcr	p15, 0, r0, c6, c6, 0
336	mcr	p15, 0, r0, c6, c7, 0
337
338	mov	r0, #0x0000003F			@ base = 0, size = 4GB
339	mcr	p15, 0, r0, c6,	c0, 0		@ set region 0, default
340
341	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
342	ldr	r7, =CONFIG_DRAM_SIZE		@ size of RAM (must be >= 4KB)
343	pr_val	r3, r0, r7, #1
344	mcr	p15, 0, r3, c6, c1, 0
345
346	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
347	ldr	r7, =CONFIG_FLASH_SIZE		@ size of FLASH (must be >= 4KB)
348	pr_val	r3, r0, r7, #1
349	mcr	p15, 0, r3, c6, c2, 0
350
351	mov	r0, #0x06
352	mcr	p15, 0, r0, c2, c0, 0		@ region 1,2 d-cacheable
353	mcr	p15, 0, r0, c2, c0, 1		@ region 1,2 i-cacheable
354#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
355	mov	r0, #0x00			@ disable whole write buffer
356#else
357	mov	r0, #0x02			@ region 1 write bufferred
358#endif
359	mcr	p15, 0, r0, c3, c0, 0
360
361/*
362 *  Access Permission Settings for future permission control by PU.
363 *
364 *				priv.	user
365 * 	region 0 (whole)	rw	--	: b0001
366 * 	region 1 (RAM)		rw	rw	: b0011
367 * 	region 2 (FLASH)	rw	r-	: b0010
368 *	region 3~7 (none)	--	--	: b0000
369 */
370	mov	r0, #0x00000031
371	orr	r0, r0, #0x00000200
372	mcr	p15, 0, r0, c5, c0, 2		@ set data access permission
373	mcr	p15, 0, r0, c5, c0, 3		@ set inst. access permission
374
375	mrc	p15, 0, r0, c1, c0		@ get control register
376	orr	r0, r0, #0x00001000		@ I-cache
377	orr	r0, r0, #0x00000005		@ MPU/D-cache
378#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
379	orr	r0, r0, #0x00004000		@ .1.. .... .... ....
380#endif
381	ret	lr
382
383	.size	__arm946_setup, . - __arm946_setup
384
385	__INITDATA
386
387	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
388	define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
389
390	.section ".rodata"
391
392	string	cpu_arch_name, "armv5te"
393	string	cpu_elf_name, "v5t"
394	string	cpu_arm946_name, "ARM946E-S"
395
396	.align
397
398	.section ".proc.info.init", "a"
399	.type	__arm946_proc_info,#object
400__arm946_proc_info:
401	.long	0x41009460
402	.long	0xff00fff0
403	.long	0
404	.long	0
405	initfn	__arm946_setup, __arm946_proc_info
406	.long	cpu_arch_name
407	.long	cpu_elf_name
408	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
409	.long	cpu_arm946_name
410	.long	arm946_processor_functions
411	.long	0
412	.long	0
413	.long	arm946_cache_fns
414	.size	__arm946_proc_info, . - __arm946_proc_info
415
416