xref: /openbmc/linux/arch/arm/mm/proc-arm940.S (revision 4800cd83)
1/*
2 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/hwcap.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/ptrace.h>
18#include "proc-macros.S"
19
20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
21#define CACHE_DLINESIZE	16
22#define CACHE_DSEGMENTS	4
23#define CACHE_DENTRIES	64
24
25	.text
26/*
27 * cpu_arm940_proc_init()
28 * cpu_arm940_switch_mm()
29 *
30 * These are not required.
31 */
32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm)
34	mov	pc, lr
35
36/*
37 * cpu_arm940_proc_fin()
38 */
39ENTRY(cpu_arm940_proc_fin)
40	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
41	bic	r0, r0, #0x00001000		@ i-cache
42	bic	r0, r0, #0x00000004		@ d-cache
43	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
44	mov	pc, lr
45
46/*
47 * cpu_arm940_reset(loc)
48 * Params  : r0 = address to jump to
49 * Notes   : This sets up everything for a reset
50 */
51ENTRY(cpu_arm940_reset)
52	mov	ip, #0
53	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
54	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
55	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
56	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
57	bic	ip, ip, #0x00000005		@ .............c.p
58	bic	ip, ip, #0x00001000		@ i-cache
59	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
60	mov	pc, r0
61
62/*
63 * cpu_arm940_do_idle()
64 */
65	.align	5
66ENTRY(cpu_arm940_do_idle)
67	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
68	mov	pc, lr
69
70/*
71 *	flush_icache_all()
72 *
73 *	Unconditionally clean and invalidate the entire icache.
74 */
75ENTRY(arm940_flush_icache_all)
76	mov	r0, #0
77	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
78	mov	pc, lr
79ENDPROC(arm940_flush_icache_all)
80
81/*
82 *	flush_user_cache_all()
83 */
84ENTRY(arm940_flush_user_cache_all)
85	/* FALLTHROUGH */
86
87/*
88 *	flush_kern_cache_all()
89 *
90 *	Clean and invalidate the entire cache.
91 */
92ENTRY(arm940_flush_kern_cache_all)
93	mov	r2, #VM_EXEC
94	/* FALLTHROUGH */
95
96/*
97 *	flush_user_cache_range(start, end, flags)
98 *
99 *	There is no efficient way to flush a range of cache entries
100 *	in the specified address range. Thus, flushes all.
101 *
102 *	- start	- start address (inclusive)
103 *	- end	- end address (exclusive)
104 *	- flags	- vm_flags describing address space
105 */
106ENTRY(arm940_flush_user_cache_range)
107	mov	ip, #0
108#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
109	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
110#else
111	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1121:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1132:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
114	subs	r3, r3, #1 << 26
115	bcs	2b				@ entries 63 to 0
116	subs	r1, r1, #1 << 4
117	bcs	1b				@ segments 3 to 0
118#endif
119	tst	r2, #VM_EXEC
120	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
121	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
122	mov	pc, lr
123
124/*
125 *	coherent_kern_range(start, end)
126 *
127 *	Ensure coherency between the Icache and the Dcache in the
128 *	region described by start, end.  If you have non-snooping
129 *	Harvard caches, you need to implement this function.
130 *
131 *	- start	- virtual start address
132 *	- end	- virtual end address
133 */
134ENTRY(arm940_coherent_kern_range)
135	/* FALLTHROUGH */
136
137/*
138 *	coherent_user_range(start, end)
139 *
140 *	Ensure coherency between the Icache and the Dcache in the
141 *	region described by start, end.  If you have non-snooping
142 *	Harvard caches, you need to implement this function.
143 *
144 *	- start	- virtual start address
145 *	- end	- virtual end address
146 */
147ENTRY(arm940_coherent_user_range)
148	/* FALLTHROUGH */
149
150/*
151 *	flush_kern_dcache_area(void *addr, size_t size)
152 *
153 *	Ensure no D cache aliasing occurs, either with itself or
154 *	the I cache
155 *
156 *	- addr	- kernel address
157 *	- size	- region size
158 */
159ENTRY(arm940_flush_kern_dcache_area)
160	mov	ip, #0
161	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1621:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1632:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
164	subs	r3, r3, #1 << 26
165	bcs	2b				@ entries 63 to 0
166	subs	r1, r1, #1 << 4
167	bcs	1b				@ segments 7 to 0
168	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
169	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
170	mov	pc, lr
171
172/*
173 *	dma_inv_range(start, end)
174 *
175 *	There is no efficient way to invalidate a specifid virtual
176 *	address range. Thus, invalidates all.
177 *
178 *	- start	- virtual start address
179 *	- end	- virtual end address
180 */
181arm940_dma_inv_range:
182	mov	ip, #0
183	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1841:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1852:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
186	subs	r3, r3, #1 << 26
187	bcs	2b				@ entries 63 to 0
188	subs	r1, r1, #1 << 4
189	bcs	1b				@ segments 7 to 0
190	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
191	mov	pc, lr
192
193/*
194 *	dma_clean_range(start, end)
195 *
196 *	There is no efficient way to clean a specifid virtual
197 *	address range. Thus, cleans all.
198 *
199 *	- start	- virtual start address
200 *	- end	- virtual end address
201 */
202arm940_dma_clean_range:
203ENTRY(cpu_arm940_dcache_clean_area)
204	mov	ip, #0
205#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
206	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2071:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2082:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
209	subs	r3, r3, #1 << 26
210	bcs	2b				@ entries 63 to 0
211	subs	r1, r1, #1 << 4
212	bcs	1b				@ segments 7 to 0
213#endif
214	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
215	mov	pc, lr
216
217/*
218 *	dma_flush_range(start, end)
219 *
220 *	There is no efficient way to clean and invalidate a specifid
221 *	virtual address range.
222 *
223 *	- start	- virtual start address
224 *	- end	- virtual end address
225 */
226ENTRY(arm940_dma_flush_range)
227	mov	ip, #0
228	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2291:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2302:
231#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
232	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
233#else
234	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
235#endif
236	subs	r3, r3, #1 << 26
237	bcs	2b				@ entries 63 to 0
238	subs	r1, r1, #1 << 4
239	bcs	1b				@ segments 7 to 0
240	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
241	mov	pc, lr
242
243/*
244 *	dma_map_area(start, size, dir)
245 *	- start	- kernel virtual start address
246 *	- size	- size of region
247 *	- dir	- DMA direction
248 */
249ENTRY(arm940_dma_map_area)
250	add	r1, r1, r0
251	cmp	r2, #DMA_TO_DEVICE
252	beq	arm940_dma_clean_range
253	bcs	arm940_dma_inv_range
254	b	arm940_dma_flush_range
255ENDPROC(arm940_dma_map_area)
256
257/*
258 *	dma_unmap_area(start, size, dir)
259 *	- start	- kernel virtual start address
260 *	- size	- size of region
261 *	- dir	- DMA direction
262 */
263ENTRY(arm940_dma_unmap_area)
264	mov	pc, lr
265ENDPROC(arm940_dma_unmap_area)
266
267ENTRY(arm940_cache_fns)
268	.long	arm940_flush_icache_all
269	.long	arm940_flush_kern_cache_all
270	.long	arm940_flush_user_cache_all
271	.long	arm940_flush_user_cache_range
272	.long	arm940_coherent_kern_range
273	.long	arm940_coherent_user_range
274	.long	arm940_flush_kern_dcache_area
275	.long	arm940_dma_map_area
276	.long	arm940_dma_unmap_area
277	.long	arm940_dma_flush_range
278
279	__CPUINIT
280
281	.type	__arm940_setup, #function
282__arm940_setup:
283	mov	r0, #0
284	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
285	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
286	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
287
288	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
289	mcr	p15, 0, r0, c6, c4, 0
290	mcr	p15, 0, r0, c6, c5, 0
291	mcr	p15, 0, r0, c6, c6, 0
292	mcr	p15, 0, r0, c6, c7, 0
293
294	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
295	mcr	p15, 0, r0, c6, c4, 1
296	mcr	p15, 0, r0, c6, c5, 1
297	mcr	p15, 0, r0, c6, c6, 1
298	mcr	p15, 0, r0, c6, c7, 1
299
300	mov	r0, #0x0000003F			@ base = 0, size = 4GB
301	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
302	mcr	p15, 0, r0, c6,	c0, 1
303
304	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
305	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
306	mov	r2, #10				@ 11 is the minimum (4KB)
3071:	add	r2, r2, #1			@ area size *= 2
308	mov	r1, r1, lsr #1
309	bne	1b				@ count not zero r-shift
310	orr	r0, r0, r2, lsl #1		@ the area register value
311	orr	r0, r0, #1			@ set enable bit
312	mcr	p15, 0, r0, c6,	c1, 0		@ set area 1, RAM
313	mcr	p15, 0, r0, c6,	c1, 1
314
315	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
316	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
317	mov	r2, #10				@ 11 is the minimum (4KB)
3181:	add	r2, r2, #1			@ area size *= 2
319	mov	r1, r1, lsr #1
320	bne	1b				@ count not zero r-shift
321	orr	r0, r0, r2, lsl #1		@ the area register value
322	orr	r0, r0, #1			@ set enable bit
323	mcr	p15, 0, r0, c6,	c2, 0		@ set area 2, ROM/FLASH
324	mcr	p15, 0, r0, c6,	c2, 1
325
326	mov	r0, #0x06
327	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
328	mcr	p15, 0, r0, c2, c0, 1
329#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
330	mov	r0, #0x00			@ disable whole write buffer
331#else
332	mov	r0, #0x02			@ Region 1 write bufferred
333#endif
334	mcr	p15, 0, r0, c3, c0, 0
335
336	mov	r0, #0x10000
337	sub	r0, r0, #1			@ r0 = 0xffff
338	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
339	mcr	p15, 0, r0, c5, c0, 1
340
341	mrc	p15, 0, r0, c1, c0		@ get control register
342	orr	r0, r0, #0x00001000		@ I-cache
343	orr	r0, r0, #0x00000005		@ MPU/D-cache
344
345	mov	pc, lr
346
347	.size	__arm940_setup, . - __arm940_setup
348
349	__INITDATA
350
351/*
352 * Purpose : Function pointers used to access above functions - all calls
353 *	     come through these
354 */
355	.type	arm940_processor_functions, #object
356ENTRY(arm940_processor_functions)
357	.word	nommu_early_abort
358	.word	legacy_pabort
359	.word	cpu_arm940_proc_init
360	.word	cpu_arm940_proc_fin
361	.word	cpu_arm940_reset
362	.word   cpu_arm940_do_idle
363	.word	cpu_arm940_dcache_clean_area
364	.word	cpu_arm940_switch_mm
365	.word	0		@ cpu_*_set_pte
366	.size	arm940_processor_functions, . - arm940_processor_functions
367
368	.section ".rodata"
369
370.type	cpu_arch_name, #object
371cpu_arch_name:
372	.asciz	"armv4t"
373	.size	cpu_arch_name, . - cpu_arch_name
374
375	.type	cpu_elf_name, #object
376cpu_elf_name:
377	.asciz	"v4"
378	.size	cpu_elf_name, . - cpu_elf_name
379
380	.type	cpu_arm940_name, #object
381cpu_arm940_name:
382	.ascii	"ARM940T"
383	.size	cpu_arm940_name, . - cpu_arm940_name
384
385	.align
386
387	.section ".proc.info.init", #alloc, #execinstr
388
389	.type	__arm940_proc_info,#object
390__arm940_proc_info:
391	.long	0x41009400
392	.long	0xff00fff0
393	.long	0
394	b	__arm940_setup
395	.long	cpu_arch_name
396	.long	cpu_elf_name
397	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
398	.long	cpu_arm940_name
399	.long	arm940_processor_functions
400	.long	0
401	.long	0
402	.long	arm940_cache_fns
403	.size	__arm940_proc_info, . - __arm940_proc_info
404
405