xref: /openbmc/linux/arch/arm/mm/proc-xscale.S (revision 3cea11cd)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/mm/proc-xscale.S
4 *
5 *  Author:	Nicolas Pitre
6 *  Created:	November 2000
7 *  Copyright:	(C) 2000, 2001 MontaVista Software Inc.
8 *
9 * MMU functions for the Intel XScale CPUs
10 *
11 * 2001 Aug 21:
12 *	some contributions by Brett Gaines <brett.w.gaines@intel.com>
13 *	Copyright 2001 by Intel Corp.
14 *
15 * 2001 Sep 08:
16 *	Completely revisited, many important fixes
17 *	Nicolas Pitre <nico@fluxnic.net>
18 */
19
20#include <linux/linkage.h>
21#include <linux/init.h>
22#include <linux/pgtable.h>
23#include <asm/assembler.h>
24#include <asm/hwcap.h>
25#include <asm/pgtable-hwdef.h>
26#include <asm/page.h>
27#include <asm/ptrace.h>
28#include "proc-macros.S"
29
30/*
31 * This is the maximum size of an area which will be flushed.  If the area
32 * is larger than this, then we flush the whole cache
33 */
34#define MAX_AREA_SIZE	32768
35
36/*
37 * the cache line size of the I and D cache
38 */
39#define CACHELINESIZE	32
40
41/*
42 * the size of the data cache
43 */
44#define CACHESIZE	32768
45
46/*
47 * Virtual address used to allocate the cache when flushed
48 *
49 * This must be an address range which is _never_ used.  It should
50 * apparently have a mapping in the corresponding page table for
51 * compatibility with future CPUs that _could_ require it.  For instance we
52 * don't care.
53 *
54 * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
55 * the 2 areas in alternance each time the clean_d_cache macro is used.
56 * Without this the XScale core exhibits cache eviction problems and no one
57 * knows why.
58 *
59 * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
60 */
61#define CLEAN_ADDR	0xfffe0000
62
63/*
64 * This macro is used to wait for a CP15 write and is needed
65 * when we have to ensure that the last operation to the co-pro
66 * was completed before continuing with operation.
67 */
68	.macro	cpwait, rd
69	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
70	mov	\rd, \rd			@ wait for completion
71	sub 	pc, pc, #4			@ flush instruction pipeline
72	.endm
73
74	.macro	cpwait_ret, lr, rd
75	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
76	sub	pc, \lr, \rd, LSR #32		@ wait for completion and
77						@ flush instruction pipeline
78	.endm
79
80/*
81 * This macro cleans the entire dcache using line allocate.
82 * The main loop has been unrolled to reduce loop overhead.
83 * rd and rs are two scratch registers.
84 */
85	.macro  clean_d_cache, rd, rs
86	ldr	\rs, =clean_addr
87	ldr	\rd, [\rs]
88	eor	\rd, \rd, #CACHESIZE
89	str	\rd, [\rs]
90	add	\rs, \rd, #CACHESIZE
911:	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
92	add	\rd, \rd, #CACHELINESIZE
93	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
94	add	\rd, \rd, #CACHELINESIZE
95	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
96	add	\rd, \rd, #CACHELINESIZE
97	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
98	add	\rd, \rd, #CACHELINESIZE
99	teq	\rd, \rs
100	bne	1b
101	.endm
102
103	.data
104	.align	2
105clean_addr:	.word	CLEAN_ADDR
106
107	.text
108
109/*
110 * cpu_xscale_proc_init()
111 *
112 * Nothing too exciting at the moment
113 */
114ENTRY(cpu_xscale_proc_init)
115	@ enable write buffer coalescing. Some bootloader disable it
116	mrc	p15, 0, r1, c1, c0, 1
117	bic	r1, r1, #1
118	mcr	p15, 0, r1, c1, c0, 1
119	ret	lr
120
121/*
122 * cpu_xscale_proc_fin()
123 */
124ENTRY(cpu_xscale_proc_fin)
125	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
126	bic	r0, r0, #0x1800			@ ...IZ...........
127	bic	r0, r0, #0x0006			@ .............CA.
128	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
129	ret	lr
130
131/*
132 * cpu_xscale_reset(loc)
133 *
134 * Perform a soft reset of the system.  Put the CPU into the
135 * same state as it would be if it had been reset, and branch
136 * to what would be the reset vector.
137 *
138 * loc: location to jump to for soft reset
139 *
140 * Beware PXA270 erratum E7.
141 */
142	.align	5
143	.pushsection	.idmap.text, "ax"
144ENTRY(cpu_xscale_reset)
145	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
146	msr	cpsr_c, r1			@ reset CPSR
147	mcr	p15, 0, r1, c10, c4, 1		@ unlock I-TLB
148	mcr	p15, 0, r1, c8, c5, 0		@ invalidate I-TLB
149	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register
150	bic	r1, r1, #0x0086			@ ........B....CA.
151	bic	r1, r1, #0x3900			@ ..VIZ..S........
152	sub	pc, pc, #4			@ flush pipeline
153	@ *** cache line aligned ***
154	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
155	bic	r1, r1, #0x0001			@ ...............M
156	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches & BTB
157	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
158	@ CAUTION: MMU turned off from this point. We count on the pipeline
159	@ already containing those two last instructions to survive.
160	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
161	ret	r0
162ENDPROC(cpu_xscale_reset)
163	.popsection
164
165/*
166 * cpu_xscale_do_idle()
167 *
168 * Cause the processor to idle
169 *
170 * For now we do nothing but go to idle mode for every case
171 *
172 * XScale supports clock switching, but using idle mode support
173 * allows external hardware to react to system state changes.
174 */
175	.align	5
176
177ENTRY(cpu_xscale_do_idle)
178	mov	r0, #1
179	mcr	p14, 0, r0, c7, c0, 0		@ Go to IDLE
180	ret	lr
181
182/* ================================= CACHE ================================ */
183
184/*
185 *	flush_icache_all()
186 *
187 *	Unconditionally clean and invalidate the entire icache.
188 */
189ENTRY(xscale_flush_icache_all)
190	mov	r0, #0
191	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
192	ret	lr
193ENDPROC(xscale_flush_icache_all)
194
195/*
196 *	flush_user_cache_all()
197 *
198 *	Invalidate all cache entries in a particular address
199 *	space.
200 */
201ENTRY(xscale_flush_user_cache_all)
202	/* FALLTHROUGH */
203
204/*
205 *	flush_kern_cache_all()
206 *
207 *	Clean and invalidate the entire cache.
208 */
209ENTRY(xscale_flush_kern_cache_all)
210	mov	r2, #VM_EXEC
211	mov	ip, #0
212__flush_whole_cache:
213	clean_d_cache r0, r1
214	tst	r2, #VM_EXEC
215	mcrne	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
216	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
217	ret	lr
218
219/*
220 *	flush_user_cache_range(start, end, vm_flags)
221 *
222 *	Invalidate a range of cache entries in the specified
223 *	address space.
224 *
225 *	- start - start address (may not be aligned)
226 *	- end	- end address (exclusive, may not be aligned)
227 *	- vma	- vma_area_struct describing address space
228 */
229	.align	5
230ENTRY(xscale_flush_user_cache_range)
231	mov	ip, #0
232	sub	r3, r1, r0			@ calculate total size
233	cmp	r3, #MAX_AREA_SIZE
234	bhs	__flush_whole_cache
235
2361:	tst	r2, #VM_EXEC
237	mcrne	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line
238	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line
239	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line
240	add	r0, r0, #CACHELINESIZE
241	cmp	r0, r1
242	blo	1b
243	tst	r2, #VM_EXEC
244	mcrne	p15, 0, ip, c7, c5, 6		@ Invalidate BTB
245	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
246	ret	lr
247
248/*
249 *	coherent_kern_range(start, end)
250 *
251 *	Ensure coherency between the Icache and the Dcache in the
252 *	region described by start.  If you have non-snooping
253 *	Harvard caches, you need to implement this function.
254 *
255 *	- start  - virtual start address
256 *	- end	 - virtual end address
257 *
258 *	Note: single I-cache line invalidation isn't used here since
259 *	it also trashes the mini I-cache used by JTAG debuggers.
260 */
261ENTRY(xscale_coherent_kern_range)
262	bic	r0, r0, #CACHELINESIZE - 1
2631:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
264	add	r0, r0, #CACHELINESIZE
265	cmp	r0, r1
266	blo	1b
267	mov	r0, #0
268	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
269	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
270	ret	lr
271
272/*
273 *	coherent_user_range(start, end)
274 *
275 *	Ensure coherency between the Icache and the Dcache in the
276 *	region described by start.  If you have non-snooping
277 *	Harvard caches, you need to implement this function.
278 *
279 *	- start  - virtual start address
280 *	- end	 - virtual end address
281 */
282ENTRY(xscale_coherent_user_range)
283	bic	r0, r0, #CACHELINESIZE - 1
2841:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
285	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache entry
286	add	r0, r0, #CACHELINESIZE
287	cmp	r0, r1
288	blo	1b
289	mov	r0, #0
290	mcr	p15, 0, r0, c7, c5, 6		@ Invalidate BTB
291	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
292	ret	lr
293
294/*
295 *	flush_kern_dcache_area(void *addr, size_t size)
296 *
297 *	Ensure no D cache aliasing occurs, either with itself or
298 *	the I cache
299 *
300 *	- addr	- kernel address
301 *	- size	- region size
302 */
303ENTRY(xscale_flush_kern_dcache_area)
304	add	r1, r0, r1
3051:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
306	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
307	add	r0, r0, #CACHELINESIZE
308	cmp	r0, r1
309	blo	1b
310	mov	r0, #0
311	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
312	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
313	ret	lr
314
315/*
316 *	dma_inv_range(start, end)
317 *
318 *	Invalidate (discard) the specified virtual address range.
319 *	May not write back any entries.  If 'start' or 'end'
320 *	are not cache line aligned, those lines must be written
321 *	back.
322 *
323 *	- start  - virtual start address
324 *	- end	 - virtual end address
325 */
326xscale_dma_inv_range:
327	tst	r0, #CACHELINESIZE - 1
328	bic	r0, r0, #CACHELINESIZE - 1
329	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
330	tst	r1, #CACHELINESIZE - 1
331	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
3321:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
333	add	r0, r0, #CACHELINESIZE
334	cmp	r0, r1
335	blo	1b
336	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
337	ret	lr
338
339/*
340 *	dma_clean_range(start, end)
341 *
342 *	Clean the specified virtual address range.
343 *
344 *	- start  - virtual start address
345 *	- end	 - virtual end address
346 */
347xscale_dma_clean_range:
348	bic	r0, r0, #CACHELINESIZE - 1
3491:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
350	add	r0, r0, #CACHELINESIZE
351	cmp	r0, r1
352	blo	1b
353	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
354	ret	lr
355
356/*
357 *	dma_flush_range(start, end)
358 *
359 *	Clean and invalidate the specified virtual address range.
360 *
361 *	- start  - virtual start address
362 *	- end	 - virtual end address
363 */
364ENTRY(xscale_dma_flush_range)
365	bic	r0, r0, #CACHELINESIZE - 1
3661:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
367	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
368	add	r0, r0, #CACHELINESIZE
369	cmp	r0, r1
370	blo	1b
371	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
372	ret	lr
373
374/*
375 *	dma_map_area(start, size, dir)
376 *	- start	- kernel virtual start address
377 *	- size	- size of region
378 *	- dir	- DMA direction
379 */
380ENTRY(xscale_dma_map_area)
381	add	r1, r1, r0
382	cmp	r2, #DMA_TO_DEVICE
383	beq	xscale_dma_clean_range
384	bcs	xscale_dma_inv_range
385	b	xscale_dma_flush_range
386ENDPROC(xscale_dma_map_area)
387
388/*
389 *	dma_map_area(start, size, dir)
390 *	- start	- kernel virtual start address
391 *	- size	- size of region
392 *	- dir	- DMA direction
393 */
394ENTRY(xscale_80200_A0_A1_dma_map_area)
395	add	r1, r1, r0
396	teq	r2, #DMA_TO_DEVICE
397	beq	xscale_dma_clean_range
398	b	xscale_dma_flush_range
399ENDPROC(xscale_80200_A0_A1_dma_map_area)
400
401/*
402 *	dma_unmap_area(start, size, dir)
403 *	- start	- kernel virtual start address
404 *	- size	- size of region
405 *	- dir	- DMA direction
406 */
407ENTRY(xscale_dma_unmap_area)
408	ret	lr
409ENDPROC(xscale_dma_unmap_area)
410
411	.globl	xscale_flush_kern_cache_louis
412	.equ	xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
413
414	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
415	define_cache_functions xscale
416
417/*
418 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
419 * clear the dirty bits, which means that if we invalidate a dirty line,
420 * the dirty data can still be written back to external memory later on.
421 *
422 * The recommended workaround is to always do a clean D-cache line before
423 * doing an invalidate D-cache line, so on the affected processors,
424 * dma_inv_range() is implemented as dma_flush_range().
425 *
426 * See erratum #25 of "Intel 80200 Processor Specification Update",
427 * revision January 22, 2003, available at:
428 *     http://www.intel.com/design/iio/specupdt/273415.htm
429 */
430.macro a0_alias basename
431	.globl xscale_80200_A0_A1_\basename
432	.type xscale_80200_A0_A1_\basename , %function
433	.equ xscale_80200_A0_A1_\basename , xscale_\basename
434.endm
435
436/*
437 * Most of the cache functions are unchanged for these processor revisions.
438 * Export suitable alias symbols for the unchanged functions:
439 */
440	a0_alias flush_icache_all
441	a0_alias flush_user_cache_all
442	a0_alias flush_kern_cache_all
443	a0_alias flush_kern_cache_louis
444	a0_alias flush_user_cache_range
445	a0_alias coherent_kern_range
446	a0_alias coherent_user_range
447	a0_alias flush_kern_dcache_area
448	a0_alias dma_flush_range
449	a0_alias dma_unmap_area
450
451	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
452	define_cache_functions xscale_80200_A0_A1
453
454ENTRY(cpu_xscale_dcache_clean_area)
4551:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
456	add	r0, r0, #CACHELINESIZE
457	subs	r1, r1, #CACHELINESIZE
458	bhi	1b
459	ret	lr
460
461/* =============================== PageTable ============================== */
462
463/*
464 * cpu_xscale_switch_mm(pgd)
465 *
466 * Set the translation base pointer to be as described by pgd.
467 *
468 * pgd: new page tables
469 */
470	.align	5
471ENTRY(cpu_xscale_switch_mm)
472	clean_d_cache r1, r2
473	mcr	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
474	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
475	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
476	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
477	cpwait_ret lr, ip
478
479/*
480 * cpu_xscale_set_pte_ext(ptep, pte, ext)
481 *
482 * Set a PTE and flush it out
483 *
484 * Errata 40: must set memory to write-through for user read-only pages.
485 */
486cpu_xscale_mt_table:
487	.long	0x00						@ L_PTE_MT_UNCACHED
488	.long	PTE_BUFFERABLE					@ L_PTE_MT_BUFFERABLE
489	.long	PTE_CACHEABLE					@ L_PTE_MT_WRITETHROUGH
490	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_WRITEBACK
491	.long	PTE_EXT_TEX(1) | PTE_BUFFERABLE			@ L_PTE_MT_DEV_SHARED
492	.long	0x00						@ unused
493	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE			@ L_PTE_MT_MINICACHE
494	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE	@ L_PTE_MT_WRITEALLOC
495	.long	0x00						@ unused
496	.long	PTE_BUFFERABLE					@ L_PTE_MT_DEV_WC
497	.long	0x00						@ unused
498	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_DEV_CACHED
499	.long	0x00						@ L_PTE_MT_DEV_NONSHARED
500	.long	0x00						@ unused
501	.long	0x00						@ unused
502	.long	0x00						@ unused
503
504	.align	5
505ENTRY(cpu_xscale_set_pte_ext)
506	xscale_set_pte_ext_prologue
507
508	@
509	@ Erratum 40: must set memory to write-through for user read-only pages
510	@
511	and	ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
512	teq	ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
513
514	moveq	r1, #L_PTE_MT_WRITETHROUGH
515	and	r1, r1, #L_PTE_MT_MASK
516	adr	ip, cpu_xscale_mt_table
517	ldr	ip, [ip, r1]
518	bic	r2, r2, #0x0c
519	orr	r2, r2, ip
520
521	xscale_set_pte_ext_epilogue
522	ret	lr
523
524	.ltorg
525	.align
526
527.globl	cpu_xscale_suspend_size
528.equ	cpu_xscale_suspend_size, 4 * 6
529#ifdef CONFIG_ARM_CPU_SUSPEND
530ENTRY(cpu_xscale_do_suspend)
531	stmfd	sp!, {r4 - r9, lr}
532	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode
533	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
534	mrc	p15, 0, r6, c13, c0, 0	@ PID
535	mrc	p15, 0, r7, c3, c0, 0	@ domain ID
536	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
537	mrc	p15, 0, r9, c1, c0, 0	@ control reg
538	bic	r4, r4, #2		@ clear frequency change bit
539	stmia	r0, {r4 - r9}		@ store cp regs
540	ldmfd	sp!, {r4 - r9, pc}
541ENDPROC(cpu_xscale_do_suspend)
542
543ENTRY(cpu_xscale_do_resume)
544	ldmia	r0, {r4 - r9}		@ load cp regs
545	mov	ip, #0
546	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I & D TLBs
547	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB
548	mcr	p14, 0, r4, c6, c0, 0	@ clock configuration, turbo mode.
549	mcr	p15, 0, r5, c15, c1, 0	@ CP access reg
550	mcr	p15, 0, r6, c13, c0, 0	@ PID
551	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
552	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
553	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
554	mov	r0, r9			@ control register
555	b	cpu_resume_mmu
556ENDPROC(cpu_xscale_do_resume)
557#endif
558
559	.type	__xscale_setup, #function
560__xscale_setup:
561	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I, D caches & BTB
562	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
563	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I, D TLBs
564	mov	r0, #1 << 6			@ cp6 for IOP3xx and Bulverde
565	orr	r0, r0, #1 << 13		@ Its undefined whether this
566	mcr	p15, 0, r0, c15, c1, 0		@ affects USR or SVC modes
567
568	adr	r5, xscale_crval
569	ldmia	r5, {r5, r6}
570	mrc	p15, 0, r0, c1, c0, 0		@ get control register
571	bic	r0, r0, r5
572	orr	r0, r0, r6
573	ret	lr
574	.size	__xscale_setup, . - __xscale_setup
575
576	/*
577	 *  R
578	 * .RVI ZFRS BLDP WCAM
579	 * ..11 1.01 .... .101
580	 *
581	 */
582	.type	xscale_crval, #object
583xscale_crval:
584	crval	clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
585
586	__INITDATA
587
588	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
589	define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
590
591	.section ".rodata"
592
593	string	cpu_arch_name, "armv5te"
594	string	cpu_elf_name, "v5"
595
596	string	cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
597	string	cpu_80200_name, "XScale-80200"
598	string	cpu_80219_name, "XScale-80219"
599	string	cpu_8032x_name, "XScale-IOP8032x Family"
600	string	cpu_8033x_name, "XScale-IOP8033x Family"
601	string	cpu_pxa250_name, "XScale-PXA250"
602	string	cpu_pxa210_name, "XScale-PXA210"
603	string	cpu_ixp42x_name, "XScale-IXP42x Family"
604	string	cpu_ixp43x_name, "XScale-IXP43x Family"
605	string	cpu_ixp46x_name, "XScale-IXP46x Family"
606	string	cpu_ixp2400_name, "XScale-IXP2400"
607	string	cpu_ixp2800_name, "XScale-IXP2800"
608	string	cpu_pxa255_name, "XScale-PXA255"
609	string	cpu_pxa270_name, "XScale-PXA270"
610
611	.align
612
613	.section ".proc.info.init", "a"
614
615.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
616	.type	__\name\()_proc_info,#object
617__\name\()_proc_info:
618	.long	\cpu_val
619	.long	\cpu_mask
620	.long	PMD_TYPE_SECT | \
621		PMD_SECT_BUFFERABLE | \
622		PMD_SECT_CACHEABLE | \
623		PMD_SECT_AP_WRITE | \
624		PMD_SECT_AP_READ
625	.long	PMD_TYPE_SECT | \
626		PMD_SECT_AP_WRITE | \
627		PMD_SECT_AP_READ
628	initfn	__xscale_setup, __\name\()_proc_info
629	.long	cpu_arch_name
630	.long	cpu_elf_name
631	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
632	.long	\cpu_name
633	.long	xscale_processor_functions
634	.long	v4wbi_tlb_fns
635	.long	xscale_mc_user_fns
636	.ifb \cache
637		.long	xscale_cache_fns
638	.else
639		.long	\cache
640	.endif
641	.size	__\name\()_proc_info, . - __\name\()_proc_info
642.endm
643
644	xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
645		cache=xscale_80200_A0_A1_cache_fns
646	xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
647	xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
648	xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
649	xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
650	xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
651	xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
652	xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
653	xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
654	xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
655	xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
656	xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
657	xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
658	xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
659