xref: /openbmc/linux/arch/arm/mm/proc-arm925.S (revision 732a675a)
1/*
2 *  linux/arch/arm/mm/arm925.S: MMU functions for ARM925
3 *
4 *  Copyright (C) 1999,2000 ARM Limited
5 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
6 *  Copyright (C) 2002 RidgeRun, Inc.
7 *  Copyright (C) 2002-2003 MontaVista Software, Inc.
8 *
9 *  Update for Linux-2.6 and cache flush improvements
10 *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
11 *
12 *  hacked for non-paged-MM by Hyok S. Choi, 2004.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
27 *
28 *
29 * These are the low level assembler for performing cache and TLB
30 * functions on the arm925.
31 *
32 *  CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
33 *
34 * Some additional notes based on deciphering the TI TRM on OMAP-5910:
35 *
36 * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
37 *	  entry mode" must be 0 to flush the entries in both segments
38 *	  at once. This is the default value. See TRM 2-20 and 2-24 for
39 *	  more information.
40 *
41 * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
42 *	  like the "Transparent mode" must be on for partial cache flushes
43 *	  to work in this mode. This mode only works with 16-bit external
44 *	  memory. See TRM 2-24 for more information.
45 *
46 * NOTE3: Write-back cache flushing seems to be flakey with devices using
47 *        direct memory access, such as USB OHCI. The workaround is to use
48 *        write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
49 *        the default for OMAP-1510).
50 */
51
52#include <linux/linkage.h>
53#include <linux/init.h>
54#include <asm/assembler.h>
55#include <asm/elf.h>
56#include <asm/pgtable-hwdef.h>
57#include <asm/pgtable.h>
58#include <asm/page.h>
59#include <asm/ptrace.h>
60#include "proc-macros.S"
61
62/*
63 * The size of one data cache line.
64 */
65#define CACHE_DLINESIZE	16
66
67/*
68 * The number of data cache segments.
69 */
70#define CACHE_DSEGMENTS	2
71
72/*
73 * The number of lines in a cache segment.
74 */
75#define CACHE_DENTRIES	256
76
77/*
78 * This is the size at which it becomes more efficient to
79 * clean the whole cache, rather than using the individual
80 * cache line maintainence instructions.
81 */
82#define CACHE_DLIMIT	8192
83
84	.text
85/*
86 * cpu_arm925_proc_init()
87 */
88ENTRY(cpu_arm925_proc_init)
89	mov	pc, lr
90
91/*
92 * cpu_arm925_proc_fin()
93 */
94ENTRY(cpu_arm925_proc_fin)
95	stmfd	sp!, {lr}
96	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
97	msr	cpsr_c, ip
98	bl	arm925_flush_kern_cache_all
99	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
100	bic	r0, r0, #0x1000			@ ...i............
101	bic	r0, r0, #0x000e			@ ............wca.
102	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
103	ldmfd	sp!, {pc}
104
105/*
106 * cpu_arm925_reset(loc)
107 *
108 * Perform a soft reset of the system.  Put the CPU into the
109 * same state as it would be if it had been reset, and branch
110 * to what would be the reset vector.
111 *
112 * loc: location to jump to for soft reset
113 */
114	.align	5
115ENTRY(cpu_arm925_reset)
116	/* Send software reset to MPU and DSP */
117	mov	ip, #0xff000000
118	orr	ip, ip, #0x00fe0000
119	orr	ip, ip, #0x0000ce00
120	mov	r4, #1
121	strh	r4, [ip, #0x10]
122
123	mov	ip, #0
124	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
125	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
126#ifdef CONFIG_MMU
127	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
128#endif
129	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
130	bic	ip, ip, #0x000f			@ ............wcam
131	bic	ip, ip, #0x1100			@ ...i...s........
132	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
133	mov	pc, r0
134
135/*
136 * cpu_arm925_do_idle()
137 *
138 * Called with IRQs disabled
139 */
140	.align	10
141ENTRY(cpu_arm925_do_idle)
142	mov	r0, #0
143	mrc	p15, 0, r1, c1, c0, 0		@ Read control register
144	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
145	bic	r2, r1, #1 << 12
146	mcr	p15, 0, r2, c1, c0, 0		@ Disable I cache
147	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
148	mcr	p15, 0, r1, c1, c0, 0		@ Restore ICache enable
149	mov	pc, lr
150
151/*
152 *	flush_user_cache_all()
153 *
154 *	Clean and invalidate all cache entries in a particular
155 *	address space.
156 */
157ENTRY(arm925_flush_user_cache_all)
158	/* FALLTHROUGH */
159
160/*
161 *	flush_kern_cache_all()
162 *
163 *	Clean and invalidate the entire cache.
164 */
165ENTRY(arm925_flush_kern_cache_all)
166	mov	r2, #VM_EXEC
167	mov	ip, #0
168__flush_whole_cache:
169#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
170	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
171#else
172	/* Flush entries in both segments at once, see NOTE1 above */
173	mov	r3, #(CACHE_DENTRIES - 1) << 4	@ 256 entries in segment
1742:	mcr	p15, 0, r3, c7, c14, 2		@ clean+invalidate D index
175	subs	r3, r3, #1 << 4
176	bcs	2b				@ entries 255 to 0
177#endif
178	tst	r2, #VM_EXEC
179	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
180	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
181	mov	pc, lr
182
183/*
184 *	flush_user_cache_range(start, end, flags)
185 *
186 *	Clean and invalidate a range of cache entries in the
187 *	specified address range.
188 *
189 *	- start	- start address (inclusive)
190 *	- end	- end address (exclusive)
191 *	- flags	- vm_flags describing address space
192 */
193ENTRY(arm925_flush_user_cache_range)
194	mov	ip, #0
195	sub	r3, r1, r0			@ calculate total size
196	cmp	r3, #CACHE_DLIMIT
197	bgt	__flush_whole_cache
1981:	tst	r2, #VM_EXEC
199#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
200	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
201	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
202	add	r0, r0, #CACHE_DLINESIZE
203	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
204	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
205	add	r0, r0, #CACHE_DLINESIZE
206#else
207	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
208	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
209	add	r0, r0, #CACHE_DLINESIZE
210	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
211	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
212	add	r0, r0, #CACHE_DLINESIZE
213#endif
214	cmp	r0, r1
215	blo	1b
216	tst	r2, #VM_EXEC
217	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
218	mov	pc, lr
219
220/*
221 *	coherent_kern_range(start, end)
222 *
223 *	Ensure coherency between the Icache and the Dcache in the
224 *	region described by start, end.  If you have non-snooping
225 *	Harvard caches, you need to implement this function.
226 *
227 *	- start	- virtual start address
228 *	- end	- virtual end address
229 */
230ENTRY(arm925_coherent_kern_range)
231	/* FALLTHROUGH */
232
233/*
234 *	coherent_user_range(start, end)
235 *
236 *	Ensure coherency between the Icache and the Dcache in the
237 *	region described by start, end.  If you have non-snooping
238 *	Harvard caches, you need to implement this function.
239 *
240 *	- start	- virtual start address
241 *	- end	- virtual end address
242 */
243ENTRY(arm925_coherent_user_range)
244	bic	r0, r0, #CACHE_DLINESIZE - 1
2451:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
246	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
247	add	r0, r0, #CACHE_DLINESIZE
248	cmp	r0, r1
249	blo	1b
250	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
251	mov	pc, lr
252
253/*
254 *	flush_kern_dcache_page(void *page)
255 *
256 *	Ensure no D cache aliasing occurs, either with itself or
257 *	the I cache
258 *
259 *	- addr	- page aligned address
260 */
261ENTRY(arm925_flush_kern_dcache_page)
262	add	r1, r0, #PAGE_SZ
2631:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
264	add	r0, r0, #CACHE_DLINESIZE
265	cmp	r0, r1
266	blo	1b
267	mov	r0, #0
268	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
269	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
270	mov	pc, lr
271
272/*
273 *	dma_inv_range(start, end)
274 *
275 *	Invalidate (discard) the specified virtual address range.
276 *	May not write back any entries.  If 'start' or 'end'
277 *	are not cache line aligned, those lines must be written
278 *	back.
279 *
280 *	- start	- virtual start address
281 *	- end	- virtual end address
282 *
283 * (same as v4wb)
284 */
285ENTRY(arm925_dma_inv_range)
286#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
287	tst	r0, #CACHE_DLINESIZE - 1
288	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
289	tst	r1, #CACHE_DLINESIZE - 1
290	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
291#endif
292	bic	r0, r0, #CACHE_DLINESIZE - 1
2931:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
294	add	r0, r0, #CACHE_DLINESIZE
295	cmp	r0, r1
296	blo	1b
297	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
298	mov	pc, lr
299
300/*
301 *	dma_clean_range(start, end)
302 *
303 *	Clean the specified virtual address range.
304 *
305 *	- start	- virtual start address
306 *	- end	- virtual end address
307 *
308 * (same as v4wb)
309 */
310ENTRY(arm925_dma_clean_range)
311#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
312	bic	r0, r0, #CACHE_DLINESIZE - 1
3131:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
314	add	r0, r0, #CACHE_DLINESIZE
315	cmp	r0, r1
316	blo	1b
317#endif
318	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
319	mov	pc, lr
320
321/*
322 *	dma_flush_range(start, end)
323 *
324 *	Clean and invalidate the specified virtual address range.
325 *
326 *	- start	- virtual start address
327 *	- end	- virtual end address
328 */
329ENTRY(arm925_dma_flush_range)
330	bic	r0, r0, #CACHE_DLINESIZE - 1
3311:
332#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
333	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
334#else
335	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
336#endif
337	add	r0, r0, #CACHE_DLINESIZE
338	cmp	r0, r1
339	blo	1b
340	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
341	mov	pc, lr
342
343ENTRY(arm925_cache_fns)
344	.long	arm925_flush_kern_cache_all
345	.long	arm925_flush_user_cache_all
346	.long	arm925_flush_user_cache_range
347	.long	arm925_coherent_kern_range
348	.long	arm925_coherent_user_range
349	.long	arm925_flush_kern_dcache_page
350	.long	arm925_dma_inv_range
351	.long	arm925_dma_clean_range
352	.long	arm925_dma_flush_range
353
354ENTRY(cpu_arm925_dcache_clean_area)
355#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
3561:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
357	add	r0, r0, #CACHE_DLINESIZE
358	subs	r1, r1, #CACHE_DLINESIZE
359	bhi	1b
360#endif
361	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
362	mov	pc, lr
363
364/* =============================== PageTable ============================== */
365
366/*
367 * cpu_arm925_switch_mm(pgd)
368 *
369 * Set the translation base pointer to be as described by pgd.
370 *
371 * pgd: new page tables
372 */
373	.align	5
374ENTRY(cpu_arm925_switch_mm)
375#ifdef CONFIG_MMU
376	mov	ip, #0
377#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
378	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
379#else
380	/* Flush entries in bothe segments at once, see NOTE1 above */
381	mov	r3, #(CACHE_DENTRIES - 1) << 4	@ 256 entries in segment
3822:	mcr	p15, 0, r3, c7, c14, 2		@ clean & invalidate D index
383	subs	r3, r3, #1 << 4
384	bcs	2b				@ entries 255 to 0
385#endif
386	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
387	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
388	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
389	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
390#endif
391	mov	pc, lr
392
393/*
394 * cpu_arm925_set_pte_ext(ptep, pte, ext)
395 *
396 * Set a PTE and flush it out
397 */
398	.align	5
399ENTRY(cpu_arm925_set_pte_ext)
400#ifdef CONFIG_MMU
401	str	r1, [r0], #-2048		@ linux version
402
403	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
404
405	bic	r2, r1, #PTE_SMALL_AP_MASK
406	bic	r2, r2, #PTE_TYPE_MASK
407	orr	r2, r2, #PTE_TYPE_SMALL
408
409	tst	r1, #L_PTE_USER			@ User?
410	orrne	r2, r2, #PTE_SMALL_AP_URO_SRW
411
412	tst	r1, #L_PTE_WRITE | L_PTE_DIRTY	@ Write and Dirty?
413	orreq	r2, r2, #PTE_SMALL_AP_UNO_SRW
414
415	tst	r1, #L_PTE_PRESENT | L_PTE_YOUNG	@ Present and Young?
416	movne	r2, #0
417
418#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
419	eor	r3, r2, #0x0a			@ C & small page?
420	tst	r3, #0x0b
421	biceq	r2, r2, #4
422#endif
423	str	r2, [r0]			@ hardware version
424	mov	r0, r0
425#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
426	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
427#endif
428	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
429#endif /* CONFIG_MMU */
430	mov	pc, lr
431
432	__INIT
433
434	.type	__arm925_setup, #function
435__arm925_setup:
436	mov	r0, #0
437#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
438        orr     r0,r0,#1 << 7
439#endif
440
441	/* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
442        orr     r0,r0,#1 << 1			@ transparent mode on
443        mcr     p15, 0, r0, c15, c1, 0          @ write TI config register
444
445	mov	r0, #0
446	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
447	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
448#ifdef CONFIG_MMU
449	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
450#endif
451
452#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
453	mov	r0, #4				@ disable write-back on caches explicitly
454	mcr	p15, 7, r0, c15, c0, 0
455#endif
456
457	adr	r5, arm925_crval
458	ldmia	r5, {r5, r6}
459	mrc	p15, 0, r0, c1, c0		@ get control register v4
460	bic	r0, r0, r5
461	orr	r0, r0, r6
462#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
463	orr	r0, r0, #0x4000			@ .1.. .... .... ....
464#endif
465	mov	pc, lr
466	.size	__arm925_setup, . - __arm925_setup
467
468	/*
469	 *  R
470	 * .RVI ZFRS BLDP WCAM
471	 * .011 0001 ..11 1101
472	 *
473	 */
474	.type	arm925_crval, #object
475arm925_crval:
476	crval	clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
477
478	__INITDATA
479
480/*
481 * Purpose : Function pointers used to access above functions - all calls
482 *	     come through these
483 */
484	.type	arm925_processor_functions, #object
485arm925_processor_functions:
486	.word	v4t_early_abort
487	.word	pabort_noifar
488	.word	cpu_arm925_proc_init
489	.word	cpu_arm925_proc_fin
490	.word	cpu_arm925_reset
491	.word   cpu_arm925_do_idle
492	.word	cpu_arm925_dcache_clean_area
493	.word	cpu_arm925_switch_mm
494	.word	cpu_arm925_set_pte_ext
495	.size	arm925_processor_functions, . - arm925_processor_functions
496
497	.section ".rodata"
498
499	.type	cpu_arch_name, #object
500cpu_arch_name:
501	.asciz	"armv4t"
502	.size	cpu_arch_name, . - cpu_arch_name
503
504	.type	cpu_elf_name, #object
505cpu_elf_name:
506	.asciz	"v4"
507	.size	cpu_elf_name, . - cpu_elf_name
508
509	.type	cpu_arm925_name, #object
510cpu_arm925_name:
511	.asciz	"ARM925T"
512	.size	cpu_arm925_name, . - cpu_arm925_name
513
514	.align
515
516	.section ".proc.info.init", #alloc, #execinstr
517
518	.type	__arm925_proc_info,#object
519__arm925_proc_info:
520	.long	0x54029250
521	.long	0xfffffff0
522	.long   PMD_TYPE_SECT | \
523		PMD_BIT4 | \
524		PMD_SECT_AP_WRITE | \
525		PMD_SECT_AP_READ
526	.long   PMD_TYPE_SECT | \
527		PMD_BIT4 | \
528		PMD_SECT_AP_WRITE | \
529		PMD_SECT_AP_READ
530	b	__arm925_setup
531	.long	cpu_arch_name
532	.long	cpu_elf_name
533	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
534	.long	cpu_arm925_name
535	.long	arm925_processor_functions
536	.long	v4wbi_tlb_fns
537	.long	v4wb_user_fns
538	.long	arm925_cache_fns
539	.size	__arm925_proc_info, . - __arm925_proc_info
540
541	.type	__arm915_proc_info,#object
542__arm915_proc_info:
543	.long	0x54029150
544	.long	0xfffffff0
545	.long   PMD_TYPE_SECT | \
546		PMD_BIT4 | \
547		PMD_SECT_AP_WRITE | \
548		PMD_SECT_AP_READ
549	.long   PMD_TYPE_SECT | \
550		PMD_BIT4 | \
551		PMD_SECT_AP_WRITE | \
552		PMD_SECT_AP_READ
553	b	__arm925_setup
554	.long	cpu_arch_name
555	.long	cpu_elf_name
556	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
557	.long	cpu_arm925_name
558	.long	arm925_processor_functions
559	.long	v4wbi_tlb_fns
560	.long	v4wb_user_fns
561	.long	arm925_cache_fns
562	.size	__arm925_proc_info, . - __arm925_proc_info
563