xref: /openbmc/linux/arch/arm/mm/proc-arm1026.S (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1/*
2 *  linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S
3 *
4 *  Copyright (C) 2000 ARM Limited
5 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 *
13 * These are the low level assembler for performing cache and TLB
14 * functions on the ARM1026EJ-S.
15 */
16#include <linux/linkage.h>
17#include <linux/config.h>
18#include <linux/init.h>
19#include <asm/assembler.h>
20#include <asm/asm-offsets.h>
21#include <asm/pgtable.h>
22#include <asm/procinfo.h>
23#include <asm/ptrace.h>
24
25/*
26 * This is the maximum size of an area which will be invalidated
27 * using the single invalidate entry instructions.  Anything larger
28 * than this, and we go for the whole cache.
29 *
30 * This value should be chosen such that we choose the cheapest
31 * alternative.
32 */
33#define MAX_AREA_SIZE	32768
34
35/*
36 * The size of one data cache line.
37 */
38#define CACHE_DLINESIZE	32
39
40/*
41 * The number of data cache segments.
42 */
43#define CACHE_DSEGMENTS	16
44
45/*
46 * The number of lines in a cache segment.
47 */
48#define CACHE_DENTRIES	64
49
50/*
51 * This is the size at which it becomes more efficient to
52 * clean the whole cache, rather than using the individual
53 * cache line maintainence instructions.
54 */
55#define CACHE_DLIMIT	32768
56
57	.text
58/*
59 * cpu_arm1026_proc_init()
60 */
61ENTRY(cpu_arm1026_proc_init)
62	mov	pc, lr
63
64/*
65 * cpu_arm1026_proc_fin()
66 */
67ENTRY(cpu_arm1026_proc_fin)
68	stmfd	sp!, {lr}
69	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
70	msr	cpsr_c, ip
71	bl	arm1026_flush_kern_cache_all
72	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
73	bic	r0, r0, #0x1000 		@ ...i............
74	bic	r0, r0, #0x000e 		@ ............wca.
75	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
76	ldmfd	sp!, {pc}
77
78/*
79 * cpu_arm1026_reset(loc)
80 *
81 * Perform a soft reset of the system.	Put the CPU into the
82 * same state as it would be if it had been reset, and branch
83 * to what would be the reset vector.
84 *
85 * loc: location to jump to for soft reset
86 */
87	.align	5
88ENTRY(cpu_arm1026_reset)
89	mov	ip, #0
90	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
91	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
92	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
93	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
94	bic	ip, ip, #0x000f 		@ ............wcam
95	bic	ip, ip, #0x1100 		@ ...i...s........
96	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
97	mov	pc, r0
98
99/*
100 * cpu_arm1026_do_idle()
101 */
102	.align	5
103ENTRY(cpu_arm1026_do_idle)
104	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
105	mov	pc, lr
106
107/* ================================= CACHE ================================ */
108
109	.align	5
110/*
111 *	flush_user_cache_all()
112 *
113 *	Invalidate all cache entries in a particular address
114 *	space.
115 */
116ENTRY(arm1026_flush_user_cache_all)
117	/* FALLTHROUGH */
118/*
119 *	flush_kern_cache_all()
120 *
121 *	Clean and invalidate the entire cache.
122 */
123ENTRY(arm1026_flush_kern_cache_all)
124	mov	r2, #VM_EXEC
125	mov	ip, #0
126__flush_whole_cache:
127#ifndef CONFIG_CPU_DCACHE_DISABLE
1281:	mrc	p15, 0, r15, c7, c14, 3		@ test, clean, invalidate
129	bne	1b
130#endif
131	tst	r2, #VM_EXEC
132#ifndef CONFIG_CPU_ICACHE_DISABLE
133	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
134#endif
135	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
136	mov	pc, lr
137
138/*
139 *	flush_user_cache_range(start, end, flags)
140 *
141 *	Invalidate a range of cache entries in the specified
142 *	address space.
143 *
144 *	- start	- start address (inclusive)
145 *	- end	- end address (exclusive)
146 *	- flags	- vm_flags for this space
147 */
148ENTRY(arm1026_flush_user_cache_range)
149	mov	ip, #0
150	sub	r3, r1, r0			@ calculate total size
151	cmp	r3, #CACHE_DLIMIT
152	bhs	__flush_whole_cache
153
154#ifndef CONFIG_CPU_DCACHE_DISABLE
1551:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
156	add	r0, r0, #CACHE_DLINESIZE
157	cmp	r0, r1
158	blo	1b
159#endif
160	tst	r2, #VM_EXEC
161#ifndef CONFIG_CPU_ICACHE_DISABLE
162	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
163#endif
164	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
165	mov	pc, lr
166
167/*
168 *	coherent_kern_range(start, end)
169 *
170 *	Ensure coherency between the Icache and the Dcache in the
171 *	region described by start.  If you have non-snooping
172 *	Harvard caches, you need to implement this function.
173 *
174 *	- start	- virtual start address
175 *	- end	- virtual end address
176 */
177ENTRY(arm1026_coherent_kern_range)
178	/* FALLTHROUGH */
179/*
180 *	coherent_user_range(start, end)
181 *
182 *	Ensure coherency between the Icache and the Dcache in the
183 *	region described by start.  If you have non-snooping
184 *	Harvard caches, you need to implement this function.
185 *
186 *	- start	- virtual start address
187 *	- end	- virtual end address
188 */
189ENTRY(arm1026_coherent_user_range)
190	mov	ip, #0
191	bic	r0, r0, #CACHE_DLINESIZE - 1
1921:
193#ifndef CONFIG_CPU_DCACHE_DISABLE
194	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
195#endif
196#ifndef CONFIG_CPU_ICACHE_DISABLE
197	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
198#endif
199	add	r0, r0, #CACHE_DLINESIZE
200	cmp	r0, r1
201	blo	1b
202	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
203	mov	pc, lr
204
205/*
206 *	flush_kern_dcache_page(void *page)
207 *
208 *	Ensure no D cache aliasing occurs, either with itself or
209 *	the I cache
210 *
211 *	- page	- page aligned address
212 */
213ENTRY(arm1026_flush_kern_dcache_page)
214	mov	ip, #0
215#ifndef CONFIG_CPU_DCACHE_DISABLE
216	add	r1, r0, #PAGE_SZ
2171:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
218	add	r0, r0, #CACHE_DLINESIZE
219	cmp	r0, r1
220	blo	1b
221#endif
222	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
223	mov	pc, lr
224
225/*
226 *	dma_inv_range(start, end)
227 *
228 *	Invalidate (discard) the specified virtual address range.
229 *	May not write back any entries.  If 'start' or 'end'
230 *	are not cache line aligned, those lines must be written
231 *	back.
232 *
233 *	- start	- virtual start address
234 *	- end	- virtual end address
235 *
236 * (same as v4wb)
237 */
238ENTRY(arm1026_dma_inv_range)
239	mov	ip, #0
240#ifndef CONFIG_CPU_DCACHE_DISABLE
241	tst	r0, #CACHE_DLINESIZE - 1
242	bic	r0, r0, #CACHE_DLINESIZE - 1
243	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
244	tst	r1, #CACHE_DLINESIZE - 1
245	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2461:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
247	add	r0, r0, #CACHE_DLINESIZE
248	cmp	r0, r1
249	blo	1b
250#endif
251	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
252	mov	pc, lr
253
254/*
255 *	dma_clean_range(start, end)
256 *
257 *	Clean the specified virtual address range.
258 *
259 *	- start	- virtual start address
260 *	- end	- virtual end address
261 *
262 * (same as v4wb)
263 */
264ENTRY(arm1026_dma_clean_range)
265	mov	ip, #0
266#ifndef CONFIG_CPU_DCACHE_DISABLE
267	bic	r0, r0, #CACHE_DLINESIZE - 1
2681:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
269	add	r0, r0, #CACHE_DLINESIZE
270	cmp	r0, r1
271	blo	1b
272#endif
273	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
274	mov	pc, lr
275
276/*
277 *	dma_flush_range(start, end)
278 *
279 *	Clean and invalidate the specified virtual address range.
280 *
281 *	- start	- virtual start address
282 *	- end	- virtual end address
283 */
284ENTRY(arm1026_dma_flush_range)
285	mov	ip, #0
286#ifndef CONFIG_CPU_DCACHE_DISABLE
287	bic	r0, r0, #CACHE_DLINESIZE - 1
2881:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
289	add	r0, r0, #CACHE_DLINESIZE
290	cmp	r0, r1
291	blo	1b
292#endif
293	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
294	mov	pc, lr
295
296ENTRY(arm1026_cache_fns)
297	.long	arm1026_flush_kern_cache_all
298	.long	arm1026_flush_user_cache_all
299	.long	arm1026_flush_user_cache_range
300	.long	arm1026_coherent_kern_range
301	.long	arm1026_coherent_user_range
302	.long	arm1026_flush_kern_dcache_page
303	.long	arm1026_dma_inv_range
304	.long	arm1026_dma_clean_range
305	.long	arm1026_dma_flush_range
306
307	.align	5
308ENTRY(cpu_arm1026_dcache_clean_area)
309#ifndef CONFIG_CPU_DCACHE_DISABLE
310	mov	ip, #0
3111:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
312	add	r0, r0, #CACHE_DLINESIZE
313	subs	r1, r1, #CACHE_DLINESIZE
314	bhi	1b
315#endif
316	mov	pc, lr
317
318/* =============================== PageTable ============================== */
319
320/*
321 * cpu_arm1026_switch_mm(pgd)
322 *
323 * Set the translation base pointer to be as described by pgd.
324 *
325 * pgd: new page tables
326 */
327	.align	5
328ENTRY(cpu_arm1026_switch_mm)
329	mov	r1, #0
330#ifndef CONFIG_CPU_DCACHE_DISABLE
3311:	mrc	p15, 0, r15, c7, c14, 3		@ test, clean, invalidate
332	bne	1b
333#endif
334#ifndef CONFIG_CPU_ICACHE_DISABLE
335	mcr	p15, 0, r1, c7, c5, 0		@ invalidate I cache
336#endif
337	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
338	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
339	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
340	mov	pc, lr
341
342/*
343 * cpu_arm1026_set_pte(ptep, pte)
344 *
345 * Set a PTE and flush it out
346 */
347	.align	5
348ENTRY(cpu_arm1026_set_pte)
349	str	r1, [r0], #-2048		@ linux version
350
351	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
352
353	bic	r2, r1, #PTE_SMALL_AP_MASK
354	bic	r2, r2, #PTE_TYPE_MASK
355	orr	r2, r2, #PTE_TYPE_SMALL
356
357	tst	r1, #L_PTE_USER			@ User?
358	orrne	r2, r2, #PTE_SMALL_AP_URO_SRW
359
360	tst	r1, #L_PTE_WRITE | L_PTE_DIRTY	@ Write and Dirty?
361	orreq	r2, r2, #PTE_SMALL_AP_UNO_SRW
362
363	tst	r1, #L_PTE_PRESENT | L_PTE_YOUNG	@ Present and Young?
364	movne	r2, #0
365
366#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
367	eor	r3, r1, #0x0a			@ C & small page?
368	tst	r3, #0x0b
369	biceq	r2, r2, #4
370#endif
371	str	r2, [r0]			@ hardware version
372	mov	r0, r0
373#ifndef CONFIG_CPU_DCACHE_DISABLE
374	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
375#endif
376	mov	pc, lr
377
378
379	__INIT
380
381	.type	__arm1026_setup, #function
382__arm1026_setup:
383	mov	r0, #0
384	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
385	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
386	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
387	mcr	p15, 0, r4, c2, c0		@ load page table pointer
388#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
389	mov	r0, #4				@ explicitly disable writeback
390	mcr	p15, 7, r0, c15, c0, 0
391#endif
392	mrc	p15, 0, r0, c1, c0		@ get control register v4
393	ldr	r5, arm1026_cr1_clear
394	bic	r0, r0, r5
395	ldr	r5, arm1026_cr1_set
396	orr	r0, r0, r5
397#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
398	orr	r0, r0, #0x4000 		@ .R.. .... .... ....
399#endif
400	mov	pc, lr
401	.size	__arm1026_setup, . - __arm1026_setup
402
403	/*
404	 *  R
405	 * .RVI ZFRS BLDP WCAM
406	 * .011 1001 ..11 0101
407	 *
408	 */
409	.type	arm1026_cr1_clear, #object
410	.type	arm1026_cr1_set, #object
411arm1026_cr1_clear:
412	.word	0x7f3f
413arm1026_cr1_set:
414	.word	0x3935
415
416	__INITDATA
417
418/*
419 * Purpose : Function pointers used to access above functions - all calls
420 *	     come through these
421 */
422	.type	arm1026_processor_functions, #object
423arm1026_processor_functions:
424	.word	v5t_early_abort
425	.word	cpu_arm1026_proc_init
426	.word	cpu_arm1026_proc_fin
427	.word	cpu_arm1026_reset
428	.word	cpu_arm1026_do_idle
429	.word	cpu_arm1026_dcache_clean_area
430	.word	cpu_arm1026_switch_mm
431	.word	cpu_arm1026_set_pte
432	.size	arm1026_processor_functions, . - arm1026_processor_functions
433
434	.section .rodata
435
436	.type	cpu_arch_name, #object
437cpu_arch_name:
438	.asciz	"armv5tej"
439	.size	cpu_arch_name, . - cpu_arch_name
440
441	.type	cpu_elf_name, #object
442cpu_elf_name:
443	.asciz	"v5"
444	.size	cpu_elf_name, . - cpu_elf_name
445	.align
446
447	.type	cpu_arm1026_name, #object
448cpu_arm1026_name:
449	.ascii	"ARM1026EJ-S"
450#ifndef CONFIG_CPU_ICACHE_DISABLE
451	.ascii	"i"
452#endif
453#ifndef CONFIG_CPU_DCACHE_DISABLE
454	.ascii	"d"
455#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
456	.ascii	"(wt)"
457#else
458	.ascii	"(wb)"
459#endif
460#endif
461#ifndef CONFIG_CPU_BPREDICT_DISABLE
462	.ascii	"B"
463#endif
464#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
465	.ascii	"RR"
466#endif
467	.ascii	"\0"
468	.size	cpu_arm1026_name, . - cpu_arm1026_name
469
470	.align
471
472	.section ".proc.info.init", #alloc, #execinstr
473
474	.type	__arm1026_proc_info,#object
475__arm1026_proc_info:
476	.long	0x4106a260			@ ARM 1026EJ-S (v5TEJ)
477	.long	0xff0ffff0
478	.long   PMD_TYPE_SECT | \
479		PMD_BIT4 | \
480		PMD_SECT_AP_WRITE | \
481		PMD_SECT_AP_READ
482	b	__arm1026_setup
483	.long	cpu_arch_name
484	.long	cpu_elf_name
485	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
486	.long	cpu_arm1026_name
487	.long	arm1026_processor_functions
488	.long	v4wbi_tlb_fns
489	.long	v4wb_user_fns
490	.long	arm1026_cache_fns
491	.size	__arm1026_proc_info, . - __arm1026_proc_info
492