xref: /openbmc/linux/arch/arm/mm/proc-mohawk.S (revision e290ed81)
1/*
2 *  linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
3 *
4 *  PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
5 *
6 *  Heavily based on proc-arm926.S and proc-xsc3.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25#include <asm/assembler.h>
26#include <asm/hwcap.h>
27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h>
29#include <asm/page.h>
30#include <asm/ptrace.h>
31#include "proc-macros.S"
32
33/*
34 * This is the maximum size of an area which will be flushed.  If the
35 * area is larger than this, then we flush the whole cache.
36 */
37#define CACHE_DLIMIT	32768
38
39/*
40 * The cache line size of the L1 D cache.
41 */
42#define CACHE_DLINESIZE	32
43
44/*
45 * cpu_mohawk_proc_init()
46 */
47ENTRY(cpu_mohawk_proc_init)
48	mov	pc, lr
49
50/*
51 * cpu_mohawk_proc_fin()
52 */
53ENTRY(cpu_mohawk_proc_fin)
54	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
55	bic	r0, r0, #0x1800			@ ...iz...........
56	bic	r0, r0, #0x0006			@ .............ca.
57	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
58	mov	pc, lr
59
60/*
61 * cpu_mohawk_reset(loc)
62 *
63 * Perform a soft reset of the system.  Put the CPU into the
64 * same state as it would be if it had been reset, and branch
65 * to what would be the reset vector.
66 *
67 * loc: location to jump to for soft reset
68 *
69 * (same as arm926)
70 */
71	.align	5
72ENTRY(cpu_mohawk_reset)
73	mov	ip, #0
74	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
75	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
76	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
77	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
78	bic	ip, ip, #0x0007			@ .............cam
79	bic	ip, ip, #0x1100			@ ...i...s........
80	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
81	mov	pc, r0
82
83/*
84 * cpu_mohawk_do_idle()
85 *
86 * Called with IRQs disabled
87 */
88	.align	5
89ENTRY(cpu_mohawk_do_idle)
90	mov	r0, #0
91	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
92	mcr	p15, 0, r0, c7, c0, 4		@ wait for interrupt
93	mov	pc, lr
94
95/*
96 *	flush_icache_all()
97 *
98 *	Unconditionally clean and invalidate the entire icache.
99 */
100ENTRY(mohawk_flush_icache_all)
101	mov	r0, #0
102	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
103	mov	pc, lr
104ENDPROC(mohawk_flush_icache_all)
105
106/*
107 *	flush_user_cache_all()
108 *
109 *	Clean and invalidate all cache entries in a particular
110 *	address space.
111 */
112ENTRY(mohawk_flush_user_cache_all)
113	/* FALLTHROUGH */
114
115/*
116 *	flush_kern_cache_all()
117 *
118 *	Clean and invalidate the entire cache.
119 */
120ENTRY(mohawk_flush_kern_cache_all)
121	mov	r2, #VM_EXEC
122	mov	ip, #0
123__flush_whole_cache:
124	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
125	tst	r2, #VM_EXEC
126	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
127	mcrne	p15, 0, ip, c7, c10, 0		@ drain write buffer
128	mov	pc, lr
129
130/*
131 *	flush_user_cache_range(start, end, flags)
132 *
133 *	Clean and invalidate a range of cache entries in the
134 *	specified address range.
135 *
136 *	- start	- start address (inclusive)
137 *	- end	- end address (exclusive)
138 *	- flags	- vm_flags describing address space
139 *
140 * (same as arm926)
141 */
142ENTRY(mohawk_flush_user_cache_range)
143	mov	ip, #0
144	sub	r3, r1, r0			@ calculate total size
145	cmp	r3, #CACHE_DLIMIT
146	bgt	__flush_whole_cache
1471:	tst	r2, #VM_EXEC
148	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
149	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
150	add	r0, r0, #CACHE_DLINESIZE
151	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
152	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
153	add	r0, r0, #CACHE_DLINESIZE
154	cmp	r0, r1
155	blo	1b
156	tst	r2, #VM_EXEC
157	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
158	mov	pc, lr
159
160/*
161 *	coherent_kern_range(start, end)
162 *
163 *	Ensure coherency between the Icache and the Dcache in the
164 *	region described by start, end.  If you have non-snooping
165 *	Harvard caches, you need to implement this function.
166 *
167 *	- start	- virtual start address
168 *	- end	- virtual end address
169 */
170ENTRY(mohawk_coherent_kern_range)
171	/* FALLTHROUGH */
172
173/*
174 *	coherent_user_range(start, end)
175 *
176 *	Ensure coherency between the Icache and the Dcache in the
177 *	region described by start, end.  If you have non-snooping
178 *	Harvard caches, you need to implement this function.
179 *
180 *	- start	- virtual start address
181 *	- end	- virtual end address
182 *
183 * (same as arm926)
184 */
185ENTRY(mohawk_coherent_user_range)
186	bic	r0, r0, #CACHE_DLINESIZE - 1
1871:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
188	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
189	add	r0, r0, #CACHE_DLINESIZE
190	cmp	r0, r1
191	blo	1b
192	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
193	mov	pc, lr
194
195/*
196 *	flush_kern_dcache_area(void *addr, size_t size)
197 *
198 *	Ensure no D cache aliasing occurs, either with itself or
199 *	the I cache
200 *
201 *	- addr	- kernel address
202 *	- size	- region size
203 */
204ENTRY(mohawk_flush_kern_dcache_area)
205	add	r1, r0, r1
2061:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
207	add	r0, r0, #CACHE_DLINESIZE
208	cmp	r0, r1
209	blo	1b
210	mov	r0, #0
211	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
212	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
213	mov	pc, lr
214
215/*
216 *	dma_inv_range(start, end)
217 *
218 *	Invalidate (discard) the specified virtual address range.
219 *	May not write back any entries.  If 'start' or 'end'
220 *	are not cache line aligned, those lines must be written
221 *	back.
222 *
223 *	- start	- virtual start address
224 *	- end	- virtual end address
225 *
226 * (same as v4wb)
227 */
228mohawk_dma_inv_range:
229	tst	r0, #CACHE_DLINESIZE - 1
230	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
231	tst	r1, #CACHE_DLINESIZE - 1
232	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
233	bic	r0, r0, #CACHE_DLINESIZE - 1
2341:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
235	add	r0, r0, #CACHE_DLINESIZE
236	cmp	r0, r1
237	blo	1b
238	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
239	mov	pc, lr
240
241/*
242 *	dma_clean_range(start, end)
243 *
244 *	Clean the specified virtual address range.
245 *
246 *	- start	- virtual start address
247 *	- end	- virtual end address
248 *
249 * (same as v4wb)
250 */
251mohawk_dma_clean_range:
252	bic	r0, r0, #CACHE_DLINESIZE - 1
2531:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
254	add	r0, r0, #CACHE_DLINESIZE
255	cmp	r0, r1
256	blo	1b
257	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
258	mov	pc, lr
259
260/*
261 *	dma_flush_range(start, end)
262 *
263 *	Clean and invalidate the specified virtual address range.
264 *
265 *	- start	- virtual start address
266 *	- end	- virtual end address
267 */
268ENTRY(mohawk_dma_flush_range)
269	bic	r0, r0, #CACHE_DLINESIZE - 1
2701:
271	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
272	add	r0, r0, #CACHE_DLINESIZE
273	cmp	r0, r1
274	blo	1b
275	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
276	mov	pc, lr
277
278/*
279 *	dma_map_area(start, size, dir)
280 *	- start	- kernel virtual start address
281 *	- size	- size of region
282 *	- dir	- DMA direction
283 */
284ENTRY(mohawk_dma_map_area)
285	add	r1, r1, r0
286	cmp	r2, #DMA_TO_DEVICE
287	beq	mohawk_dma_clean_range
288	bcs	mohawk_dma_inv_range
289	b	mohawk_dma_flush_range
290ENDPROC(mohawk_dma_map_area)
291
292/*
293 *	dma_unmap_area(start, size, dir)
294 *	- start	- kernel virtual start address
295 *	- size	- size of region
296 *	- dir	- DMA direction
297 */
298ENTRY(mohawk_dma_unmap_area)
299	mov	pc, lr
300ENDPROC(mohawk_dma_unmap_area)
301
302	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
303	define_cache_functions mohawk
304
305ENTRY(cpu_mohawk_dcache_clean_area)
3061:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
307	add	r0, r0, #CACHE_DLINESIZE
308	subs	r1, r1, #CACHE_DLINESIZE
309	bhi	1b
310	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
311	mov	pc, lr
312
313/*
314 * cpu_mohawk_switch_mm(pgd)
315 *
316 * Set the translation base pointer to be as described by pgd.
317 *
318 * pgd: new page tables
319 */
320	.align	5
321ENTRY(cpu_mohawk_switch_mm)
322	mov	ip, #0
323	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
324	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
325	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
326	orr	r0, r0, #0x18			@ cache the page table in L2
327	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
328	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
329	mov	pc, lr
330
331/*
332 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
333 *
334 * Set a PTE and flush it out
335 */
336	.align	5
337ENTRY(cpu_mohawk_set_pte_ext)
338	armv3_set_pte_ext
339	mov	r0, r0
340	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
341	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
342	mov	pc, lr
343
344	__CPUINIT
345
346	.type	__mohawk_setup, #function
347__mohawk_setup:
348	mov	r0, #0
349	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches
350	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
351	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs
352	orr	r4, r4, #0x18			@ cache the page table in L2
353	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
354
355	mov	r0, #0				@ don't allow CP access
356	mcr	p15, 0, r0, c15, c1, 0		@ write CP access register
357
358	adr	r5, mohawk_crval
359	ldmia	r5, {r5, r6}
360	mrc	p15, 0, r0, c1, c0		@ get control register
361	bic	r0, r0, r5
362	orr	r0, r0, r6
363	mov	pc, lr
364
365	.size	__mohawk_setup, . - __mohawk_setup
366
367	/*
368	 *  R
369	 * .RVI ZFRS BLDP WCAM
370	 * .011 1001 ..00 0101
371	 *
372	 */
373	.type	mohawk_crval, #object
374mohawk_crval:
375	crval	clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
376
377	__INITDATA
378
379	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
380	define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
381
382	.section ".rodata"
383
384	string	cpu_arch_name, "armv5te"
385	string	cpu_elf_name, "v5"
386	string	cpu_mohawk_name, "Marvell 88SV331x"
387
388	.align
389
390	.section ".proc.info.init", #alloc, #execinstr
391
392	.type	__88sv331x_proc_info,#object
393__88sv331x_proc_info:
394	.long	0x56158000			@ Marvell 88SV331x (MOHAWK)
395	.long	0xfffff000
396	.long   PMD_TYPE_SECT | \
397		PMD_SECT_BUFFERABLE | \
398		PMD_SECT_CACHEABLE | \
399		PMD_BIT4 | \
400		PMD_SECT_AP_WRITE | \
401		PMD_SECT_AP_READ
402	.long   PMD_TYPE_SECT | \
403		PMD_BIT4 | \
404		PMD_SECT_AP_WRITE | \
405		PMD_SECT_AP_READ
406	b	__mohawk_setup
407	.long	cpu_arch_name
408	.long	cpu_elf_name
409	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
410	.long	cpu_mohawk_name
411	.long	mohawk_processor_functions
412	.long	v4wbi_tlb_fns
413	.long	v4wb_user_fns
414	.long	mohawk_cache_fns
415	.size	__88sv331x_proc_info, . - __88sv331x_proc_info
416