xref: /openbmc/linux/arch/arm/mm/proc-xscale.S (revision 4800cd83)
1/*
2 *  linux/arch/arm/mm/proc-xscale.S
3 *
4 *  Author:	Nicolas Pitre
5 *  Created:	November 2000
6 *  Copyright:	(C) 2000, 2001 MontaVista Software Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * MMU functions for the Intel XScale CPUs
13 *
14 * 2001 Aug 21:
15 *	some contributions by Brett Gaines <brett.w.gaines@intel.com>
16 *	Copyright 2001 by Intel Corp.
17 *
18 * 2001 Sep 08:
19 *	Completely revisited, many important fixes
20 *	Nicolas Pitre <nico@fluxnic.net>
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25#include <asm/assembler.h>
26#include <asm/hwcap.h>
27#include <asm/pgtable.h>
28#include <asm/pgtable-hwdef.h>
29#include <asm/page.h>
30#include <asm/ptrace.h>
31#include "proc-macros.S"
32
33/*
34 * This is the maximum size of an area which will be flushed.  If the area
35 * is larger than this, then we flush the whole cache
36 */
37#define MAX_AREA_SIZE	32768
38
39/*
40 * the cache line size of the I and D cache
41 */
42#define CACHELINESIZE	32
43
44/*
45 * the size of the data cache
46 */
47#define CACHESIZE	32768
48
49/*
50 * Virtual address used to allocate the cache when flushed
51 *
52 * This must be an address range which is _never_ used.  It should
53 * apparently have a mapping in the corresponding page table for
54 * compatibility with future CPUs that _could_ require it.  For instance we
55 * don't care.
56 *
57 * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
58 * the 2 areas in alternance each time the clean_d_cache macro is used.
59 * Without this the XScale core exhibits cache eviction problems and no one
60 * knows why.
61 *
62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
63 */
64#define CLEAN_ADDR	0xfffe0000
65
66/*
67 * This macro is used to wait for a CP15 write and is needed
68 * when we have to ensure that the last operation to the co-pro
69 * was completed before continuing with operation.
70 */
71	.macro	cpwait, rd
72	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
73	mov	\rd, \rd			@ wait for completion
74	sub 	pc, pc, #4			@ flush instruction pipeline
75	.endm
76
77	.macro	cpwait_ret, lr, rd
78	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
79	sub	pc, \lr, \rd, LSR #32		@ wait for completion and
80						@ flush instruction pipeline
81	.endm
82
83/*
84 * This macro cleans the entire dcache using line allocate.
85 * The main loop has been unrolled to reduce loop overhead.
86 * rd and rs are two scratch registers.
87 */
88	.macro  clean_d_cache, rd, rs
89	ldr	\rs, =clean_addr
90	ldr	\rd, [\rs]
91	eor	\rd, \rd, #CACHESIZE
92	str	\rd, [\rs]
93	add	\rs, \rd, #CACHESIZE
941:	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
95	add	\rd, \rd, #CACHELINESIZE
96	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
97	add	\rd, \rd, #CACHELINESIZE
98	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
99	add	\rd, \rd, #CACHELINESIZE
100	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
101	add	\rd, \rd, #CACHELINESIZE
102	teq	\rd, \rs
103	bne	1b
104	.endm
105
106	.data
107clean_addr:	.word	CLEAN_ADDR
108
109	.text
110
111/*
112 * cpu_xscale_proc_init()
113 *
114 * Nothing too exciting at the moment
115 */
116ENTRY(cpu_xscale_proc_init)
117	@ enable write buffer coalescing. Some bootloader disable it
118	mrc	p15, 0, r1, c1, c0, 1
119	bic	r1, r1, #1
120	mcr	p15, 0, r1, c1, c0, 1
121	mov	pc, lr
122
123/*
124 * cpu_xscale_proc_fin()
125 */
126ENTRY(cpu_xscale_proc_fin)
127	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
128	bic	r0, r0, #0x1800			@ ...IZ...........
129	bic	r0, r0, #0x0006			@ .............CA.
130	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
131	mov	pc, lr
132
133/*
134 * cpu_xscale_reset(loc)
135 *
136 * Perform a soft reset of the system.  Put the CPU into the
137 * same state as it would be if it had been reset, and branch
138 * to what would be the reset vector.
139 *
140 * loc: location to jump to for soft reset
141 *
142 * Beware PXA270 erratum E7.
143 */
144	.align	5
145ENTRY(cpu_xscale_reset)
146	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
147	msr	cpsr_c, r1			@ reset CPSR
148	mcr	p15, 0, r1, c10, c4, 1		@ unlock I-TLB
149	mcr	p15, 0, r1, c8, c5, 0		@ invalidate I-TLB
150	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register
151	bic	r1, r1, #0x0086			@ ........B....CA.
152	bic	r1, r1, #0x3900			@ ..VIZ..S........
153	sub	pc, pc, #4			@ flush pipeline
154	@ *** cache line aligned ***
155	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
156	bic	r1, r1, #0x0001			@ ...............M
157	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches & BTB
158	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
159	@ CAUTION: MMU turned off from this point. We count on the pipeline
160	@ already containing those two last instructions to survive.
161	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
162	mov	pc, r0
163
164/*
165 * cpu_xscale_do_idle()
166 *
167 * Cause the processor to idle
168 *
169 * For now we do nothing but go to idle mode for every case
170 *
171 * XScale supports clock switching, but using idle mode support
172 * allows external hardware to react to system state changes.
173 */
174	.align	5
175
176ENTRY(cpu_xscale_do_idle)
177	mov	r0, #1
178	mcr	p14, 0, r0, c7, c0, 0		@ Go to IDLE
179	mov	pc, lr
180
181/* ================================= CACHE ================================ */
182
183/*
184 *	flush_icache_all()
185 *
186 *	Unconditionally clean and invalidate the entire icache.
187 */
188ENTRY(xscale_flush_icache_all)
189	mov	r0, #0
190	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
191	mov	pc, lr
192ENDPROC(xscale_flush_icache_all)
193
194/*
195 *	flush_user_cache_all()
196 *
197 *	Invalidate all cache entries in a particular address
198 *	space.
199 */
200ENTRY(xscale_flush_user_cache_all)
201	/* FALLTHROUGH */
202
203/*
204 *	flush_kern_cache_all()
205 *
206 *	Clean and invalidate the entire cache.
207 */
208ENTRY(xscale_flush_kern_cache_all)
209	mov	r2, #VM_EXEC
210	mov	ip, #0
211__flush_whole_cache:
212	clean_d_cache r0, r1
213	tst	r2, #VM_EXEC
214	mcrne	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
215	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
216	mov	pc, lr
217
218/*
219 *	flush_user_cache_range(start, end, vm_flags)
220 *
221 *	Invalidate a range of cache entries in the specified
222 *	address space.
223 *
224 *	- start - start address (may not be aligned)
225 *	- end	- end address (exclusive, may not be aligned)
226 *	- vma	- vma_area_struct describing address space
227 */
228	.align	5
229ENTRY(xscale_flush_user_cache_range)
230	mov	ip, #0
231	sub	r3, r1, r0			@ calculate total size
232	cmp	r3, #MAX_AREA_SIZE
233	bhs	__flush_whole_cache
234
2351:	tst	r2, #VM_EXEC
236	mcrne	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line
237	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line
238	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line
239	add	r0, r0, #CACHELINESIZE
240	cmp	r0, r1
241	blo	1b
242	tst	r2, #VM_EXEC
243	mcrne	p15, 0, ip, c7, c5, 6		@ Invalidate BTB
244	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
245	mov	pc, lr
246
247/*
248 *	coherent_kern_range(start, end)
249 *
250 *	Ensure coherency between the Icache and the Dcache in the
251 *	region described by start.  If you have non-snooping
252 *	Harvard caches, you need to implement this function.
253 *
254 *	- start  - virtual start address
255 *	- end	 - virtual end address
256 *
257 *	Note: single I-cache line invalidation isn't used here since
258 *	it also trashes the mini I-cache used by JTAG debuggers.
259 */
260ENTRY(xscale_coherent_kern_range)
261	bic	r0, r0, #CACHELINESIZE - 1
2621:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
263	add	r0, r0, #CACHELINESIZE
264	cmp	r0, r1
265	blo	1b
266	mov	r0, #0
267	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
268	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
269	mov	pc, lr
270
271/*
272 *	coherent_user_range(start, end)
273 *
274 *	Ensure coherency between the Icache and the Dcache in the
275 *	region described by start.  If you have non-snooping
276 *	Harvard caches, you need to implement this function.
277 *
278 *	- start  - virtual start address
279 *	- end	 - virtual end address
280 */
281ENTRY(xscale_coherent_user_range)
282	bic	r0, r0, #CACHELINESIZE - 1
2831:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
284	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache entry
285	add	r0, r0, #CACHELINESIZE
286	cmp	r0, r1
287	blo	1b
288	mov	r0, #0
289	mcr	p15, 0, r0, c7, c5, 6		@ Invalidate BTB
290	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
291	mov	pc, lr
292
293/*
294 *	flush_kern_dcache_area(void *addr, size_t size)
295 *
296 *	Ensure no D cache aliasing occurs, either with itself or
297 *	the I cache
298 *
299 *	- addr	- kernel address
300 *	- size	- region size
301 */
302ENTRY(xscale_flush_kern_dcache_area)
303	add	r1, r0, r1
3041:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
305	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
306	add	r0, r0, #CACHELINESIZE
307	cmp	r0, r1
308	blo	1b
309	mov	r0, #0
310	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
311	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
312	mov	pc, lr
313
314/*
315 *	dma_inv_range(start, end)
316 *
317 *	Invalidate (discard) the specified virtual address range.
318 *	May not write back any entries.  If 'start' or 'end'
319 *	are not cache line aligned, those lines must be written
320 *	back.
321 *
322 *	- start  - virtual start address
323 *	- end	 - virtual end address
324 */
325xscale_dma_inv_range:
326	tst	r0, #CACHELINESIZE - 1
327	bic	r0, r0, #CACHELINESIZE - 1
328	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
329	tst	r1, #CACHELINESIZE - 1
330	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
3311:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
332	add	r0, r0, #CACHELINESIZE
333	cmp	r0, r1
334	blo	1b
335	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
336	mov	pc, lr
337
338/*
339 *	dma_clean_range(start, end)
340 *
341 *	Clean the specified virtual address range.
342 *
343 *	- start  - virtual start address
344 *	- end	 - virtual end address
345 */
346xscale_dma_clean_range:
347	bic	r0, r0, #CACHELINESIZE - 1
3481:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
349	add	r0, r0, #CACHELINESIZE
350	cmp	r0, r1
351	blo	1b
352	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
353	mov	pc, lr
354
355/*
356 *	dma_flush_range(start, end)
357 *
358 *	Clean and invalidate the specified virtual address range.
359 *
360 *	- start  - virtual start address
361 *	- end	 - virtual end address
362 */
363ENTRY(xscale_dma_flush_range)
364	bic	r0, r0, #CACHELINESIZE - 1
3651:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
366	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
367	add	r0, r0, #CACHELINESIZE
368	cmp	r0, r1
369	blo	1b
370	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
371	mov	pc, lr
372
373/*
374 *	dma_map_area(start, size, dir)
375 *	- start	- kernel virtual start address
376 *	- size	- size of region
377 *	- dir	- DMA direction
378 */
379ENTRY(xscale_dma_map_area)
380	add	r1, r1, r0
381	cmp	r2, #DMA_TO_DEVICE
382	beq	xscale_dma_clean_range
383	bcs	xscale_dma_inv_range
384	b	xscale_dma_flush_range
385ENDPROC(xscale_dma_map_area)
386
387/*
388 *	dma_map_area(start, size, dir)
389 *	- start	- kernel virtual start address
390 *	- size	- size of region
391 *	- dir	- DMA direction
392 */
393ENTRY(xscale_dma_a0_map_area)
394	add	r1, r1, r0
395	teq	r2, #DMA_TO_DEVICE
396	beq	xscale_dma_clean_range
397	b	xscale_dma_flush_range
398ENDPROC(xscsale_dma_a0_map_area)
399
400/*
401 *	dma_unmap_area(start, size, dir)
402 *	- start	- kernel virtual start address
403 *	- size	- size of region
404 *	- dir	- DMA direction
405 */
406ENTRY(xscale_dma_unmap_area)
407	mov	pc, lr
408ENDPROC(xscale_dma_unmap_area)
409
410ENTRY(xscale_cache_fns)
411	.long	xscale_flush_icache_all
412	.long	xscale_flush_kern_cache_all
413	.long	xscale_flush_user_cache_all
414	.long	xscale_flush_user_cache_range
415	.long	xscale_coherent_kern_range
416	.long	xscale_coherent_user_range
417	.long	xscale_flush_kern_dcache_area
418	.long	xscale_dma_map_area
419	.long	xscale_dma_unmap_area
420	.long	xscale_dma_flush_range
421
422/*
423 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
424 * clear the dirty bits, which means that if we invalidate a dirty line,
425 * the dirty data can still be written back to external memory later on.
426 *
427 * The recommended workaround is to always do a clean D-cache line before
428 * doing an invalidate D-cache line, so on the affected processors,
429 * dma_inv_range() is implemented as dma_flush_range().
430 *
431 * See erratum #25 of "Intel 80200 Processor Specification Update",
432 * revision January 22, 2003, available at:
433 *     http://www.intel.com/design/iio/specupdt/273415.htm
434 */
435ENTRY(xscale_80200_A0_A1_cache_fns)
436	.long	xscale_flush_kern_cache_all
437	.long	xscale_flush_user_cache_all
438	.long	xscale_flush_user_cache_range
439	.long	xscale_coherent_kern_range
440	.long	xscale_coherent_user_range
441	.long	xscale_flush_kern_dcache_area
442	.long	xscale_dma_a0_map_area
443	.long	xscale_dma_unmap_area
444	.long	xscale_dma_flush_range
445
446ENTRY(cpu_xscale_dcache_clean_area)
4471:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
448	add	r0, r0, #CACHELINESIZE
449	subs	r1, r1, #CACHELINESIZE
450	bhi	1b
451	mov	pc, lr
452
453/* =============================== PageTable ============================== */
454
455/*
456 * cpu_xscale_switch_mm(pgd)
457 *
458 * Set the translation base pointer to be as described by pgd.
459 *
460 * pgd: new page tables
461 */
462	.align	5
463ENTRY(cpu_xscale_switch_mm)
464	clean_d_cache r1, r2
465	mcr	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
466	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
467	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
468	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
469	cpwait_ret lr, ip
470
471/*
472 * cpu_xscale_set_pte_ext(ptep, pte, ext)
473 *
474 * Set a PTE and flush it out
475 *
476 * Errata 40: must set memory to write-through for user read-only pages.
477 */
478cpu_xscale_mt_table:
479	.long	0x00						@ L_PTE_MT_UNCACHED
480	.long	PTE_BUFFERABLE					@ L_PTE_MT_BUFFERABLE
481	.long	PTE_CACHEABLE					@ L_PTE_MT_WRITETHROUGH
482	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_WRITEBACK
483	.long	PTE_EXT_TEX(1) | PTE_BUFFERABLE			@ L_PTE_MT_DEV_SHARED
484	.long	0x00						@ unused
485	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE			@ L_PTE_MT_MINICACHE
486	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE	@ L_PTE_MT_WRITEALLOC
487	.long	0x00						@ unused
488	.long	PTE_BUFFERABLE					@ L_PTE_MT_DEV_WC
489	.long	0x00						@ unused
490	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_DEV_CACHED
491	.long	0x00						@ L_PTE_MT_DEV_NONSHARED
492	.long	0x00						@ unused
493	.long	0x00						@ unused
494	.long	0x00						@ unused
495
496	.align	5
497ENTRY(cpu_xscale_set_pte_ext)
498	xscale_set_pte_ext_prologue
499
500	@
501	@ Erratum 40: must set memory to write-through for user read-only pages
502	@
503	and	ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
504	teq	ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
505
506	moveq	r1, #L_PTE_MT_WRITETHROUGH
507	and	r1, r1, #L_PTE_MT_MASK
508	adr	ip, cpu_xscale_mt_table
509	ldr	ip, [ip, r1]
510	bic	r2, r2, #0x0c
511	orr	r2, r2, ip
512
513	xscale_set_pte_ext_epilogue
514	mov	pc, lr
515
516
517	.ltorg
518
519	.align
520
521	__CPUINIT
522
523	.type	__xscale_setup, #function
524__xscale_setup:
525	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I, D caches & BTB
526	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
527	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I, D TLBs
528	mov	r0, #1 << 6			@ cp6 for IOP3xx and Bulverde
529	orr	r0, r0, #1 << 13		@ Its undefined whether this
530	mcr	p15, 0, r0, c15, c1, 0		@ affects USR or SVC modes
531
532	adr	r5, xscale_crval
533	ldmia	r5, {r5, r6}
534	mrc	p15, 0, r0, c1, c0, 0		@ get control register
535	bic	r0, r0, r5
536	orr	r0, r0, r6
537	mov	pc, lr
538	.size	__xscale_setup, . - __xscale_setup
539
540	/*
541	 *  R
542	 * .RVI ZFRS BLDP WCAM
543	 * ..11 1.01 .... .101
544	 *
545	 */
546	.type	xscale_crval, #object
547xscale_crval:
548	crval	clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
549
550	__INITDATA
551
552/*
553 * Purpose : Function pointers used to access above functions - all calls
554 *	     come through these
555 */
556
557	.type	xscale_processor_functions, #object
558ENTRY(xscale_processor_functions)
559	.word	v5t_early_abort
560	.word	legacy_pabort
561	.word	cpu_xscale_proc_init
562	.word	cpu_xscale_proc_fin
563	.word	cpu_xscale_reset
564	.word	cpu_xscale_do_idle
565	.word	cpu_xscale_dcache_clean_area
566	.word	cpu_xscale_switch_mm
567	.word	cpu_xscale_set_pte_ext
568	.size	xscale_processor_functions, . - xscale_processor_functions
569
570	.section ".rodata"
571
572	.type	cpu_arch_name, #object
573cpu_arch_name:
574	.asciz	"armv5te"
575	.size	cpu_arch_name, . - cpu_arch_name
576
577	.type	cpu_elf_name, #object
578cpu_elf_name:
579	.asciz	"v5"
580	.size	cpu_elf_name, . - cpu_elf_name
581
582	.type	cpu_80200_A0_A1_name, #object
583cpu_80200_A0_A1_name:
584	.asciz	"XScale-80200 A0/A1"
585	.size	cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name
586
587	.type	cpu_80200_name, #object
588cpu_80200_name:
589	.asciz	"XScale-80200"
590	.size	cpu_80200_name, . - cpu_80200_name
591
592	.type	cpu_80219_name, #object
593cpu_80219_name:
594	.asciz	"XScale-80219"
595	.size	cpu_80219_name, . - cpu_80219_name
596
597	.type	cpu_8032x_name, #object
598cpu_8032x_name:
599	.asciz	"XScale-IOP8032x Family"
600	.size	cpu_8032x_name, . - cpu_8032x_name
601
602	.type	cpu_8033x_name, #object
603cpu_8033x_name:
604	.asciz	"XScale-IOP8033x Family"
605	.size	cpu_8033x_name, . - cpu_8033x_name
606
607	.type	cpu_pxa250_name, #object
608cpu_pxa250_name:
609	.asciz	"XScale-PXA250"
610	.size	cpu_pxa250_name, . - cpu_pxa250_name
611
612	.type	cpu_pxa210_name, #object
613cpu_pxa210_name:
614	.asciz	"XScale-PXA210"
615	.size	cpu_pxa210_name, . - cpu_pxa210_name
616
617	.type	cpu_ixp42x_name, #object
618cpu_ixp42x_name:
619	.asciz	"XScale-IXP42x Family"
620	.size	cpu_ixp42x_name, . - cpu_ixp42x_name
621
622	.type	cpu_ixp43x_name, #object
623cpu_ixp43x_name:
624	.asciz	"XScale-IXP43x Family"
625	.size	cpu_ixp43x_name, . - cpu_ixp43x_name
626
627	.type	cpu_ixp46x_name, #object
628cpu_ixp46x_name:
629	.asciz	"XScale-IXP46x Family"
630	.size	cpu_ixp46x_name, . - cpu_ixp46x_name
631
632	.type	cpu_ixp2400_name, #object
633cpu_ixp2400_name:
634	.asciz	"XScale-IXP2400"
635	.size	cpu_ixp2400_name, . - cpu_ixp2400_name
636
637	.type	cpu_ixp2800_name, #object
638cpu_ixp2800_name:
639	.asciz	"XScale-IXP2800"
640	.size	cpu_ixp2800_name, . - cpu_ixp2800_name
641
642	.type	cpu_pxa255_name, #object
643cpu_pxa255_name:
644	.asciz	"XScale-PXA255"
645	.size	cpu_pxa255_name, . - cpu_pxa255_name
646
647	.type	cpu_pxa270_name, #object
648cpu_pxa270_name:
649	.asciz	"XScale-PXA270"
650	.size	cpu_pxa270_name, . - cpu_pxa270_name
651
652	.align
653
654	.section ".proc.info.init", #alloc, #execinstr
655
656	.type	__80200_A0_A1_proc_info,#object
657__80200_A0_A1_proc_info:
658	.long	0x69052000
659	.long	0xfffffffe
660	.long   PMD_TYPE_SECT | \
661		PMD_SECT_BUFFERABLE | \
662		PMD_SECT_CACHEABLE | \
663		PMD_SECT_AP_WRITE | \
664		PMD_SECT_AP_READ
665	.long   PMD_TYPE_SECT | \
666		PMD_SECT_AP_WRITE | \
667		PMD_SECT_AP_READ
668	b	__xscale_setup
669	.long	cpu_arch_name
670	.long	cpu_elf_name
671	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
672	.long	cpu_80200_name
673	.long	xscale_processor_functions
674	.long	v4wbi_tlb_fns
675	.long	xscale_mc_user_fns
676	.long	xscale_80200_A0_A1_cache_fns
677	.size	__80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
678
679	.type	__80200_proc_info,#object
680__80200_proc_info:
681	.long	0x69052000
682	.long	0xfffffff0
683	.long   PMD_TYPE_SECT | \
684		PMD_SECT_BUFFERABLE | \
685		PMD_SECT_CACHEABLE | \
686		PMD_SECT_AP_WRITE | \
687		PMD_SECT_AP_READ
688	.long   PMD_TYPE_SECT | \
689		PMD_SECT_AP_WRITE | \
690		PMD_SECT_AP_READ
691	b	__xscale_setup
692	.long	cpu_arch_name
693	.long	cpu_elf_name
694	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
695	.long	cpu_80200_name
696	.long	xscale_processor_functions
697	.long	v4wbi_tlb_fns
698	.long	xscale_mc_user_fns
699	.long	xscale_cache_fns
700	.size	__80200_proc_info, . - __80200_proc_info
701
702	.type	__80219_proc_info,#object
703__80219_proc_info:
704	.long	0x69052e20
705	.long	0xffffffe0
706	.long   PMD_TYPE_SECT | \
707		PMD_SECT_BUFFERABLE | \
708		PMD_SECT_CACHEABLE | \
709		PMD_SECT_AP_WRITE | \
710		PMD_SECT_AP_READ
711	.long   PMD_TYPE_SECT | \
712		PMD_SECT_AP_WRITE | \
713		PMD_SECT_AP_READ
714	b	__xscale_setup
715	.long	cpu_arch_name
716	.long	cpu_elf_name
717	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
718	.long	cpu_80219_name
719	.long	xscale_processor_functions
720	.long	v4wbi_tlb_fns
721	.long	xscale_mc_user_fns
722	.long	xscale_cache_fns
723	.size	__80219_proc_info, . - __80219_proc_info
724
725	.type	__8032x_proc_info,#object
726__8032x_proc_info:
727	.long	0x69052420
728	.long	0xfffff7e0
729	.long   PMD_TYPE_SECT | \
730		PMD_SECT_BUFFERABLE | \
731		PMD_SECT_CACHEABLE | \
732		PMD_SECT_AP_WRITE | \
733		PMD_SECT_AP_READ
734	.long   PMD_TYPE_SECT | \
735		PMD_SECT_AP_WRITE | \
736		PMD_SECT_AP_READ
737	b	__xscale_setup
738	.long	cpu_arch_name
739	.long	cpu_elf_name
740	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
741	.long	cpu_8032x_name
742	.long	xscale_processor_functions
743	.long	v4wbi_tlb_fns
744	.long	xscale_mc_user_fns
745	.long	xscale_cache_fns
746	.size	__8032x_proc_info, . - __8032x_proc_info
747
748	.type	__8033x_proc_info,#object
749__8033x_proc_info:
750	.long	0x69054010
751	.long	0xfffffd30
752	.long   PMD_TYPE_SECT | \
753		PMD_SECT_BUFFERABLE | \
754		PMD_SECT_CACHEABLE | \
755		PMD_SECT_AP_WRITE | \
756		PMD_SECT_AP_READ
757	.long   PMD_TYPE_SECT | \
758		PMD_SECT_AP_WRITE | \
759		PMD_SECT_AP_READ
760	b	__xscale_setup
761	.long	cpu_arch_name
762	.long	cpu_elf_name
763	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
764	.long	cpu_8033x_name
765	.long	xscale_processor_functions
766	.long	v4wbi_tlb_fns
767	.long	xscale_mc_user_fns
768	.long	xscale_cache_fns
769	.size	__8033x_proc_info, . - __8033x_proc_info
770
771	.type	__pxa250_proc_info,#object
772__pxa250_proc_info:
773	.long	0x69052100
774	.long	0xfffff7f0
775	.long   PMD_TYPE_SECT | \
776		PMD_SECT_BUFFERABLE | \
777		PMD_SECT_CACHEABLE | \
778		PMD_SECT_AP_WRITE | \
779		PMD_SECT_AP_READ
780	.long   PMD_TYPE_SECT | \
781		PMD_SECT_AP_WRITE | \
782		PMD_SECT_AP_READ
783	b	__xscale_setup
784	.long	cpu_arch_name
785	.long	cpu_elf_name
786	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
787	.long	cpu_pxa250_name
788	.long	xscale_processor_functions
789	.long	v4wbi_tlb_fns
790	.long	xscale_mc_user_fns
791	.long	xscale_cache_fns
792	.size	__pxa250_proc_info, . - __pxa250_proc_info
793
794	.type	__pxa210_proc_info,#object
795__pxa210_proc_info:
796	.long	0x69052120
797	.long	0xfffff3f0
798	.long   PMD_TYPE_SECT | \
799		PMD_SECT_BUFFERABLE | \
800		PMD_SECT_CACHEABLE | \
801		PMD_SECT_AP_WRITE | \
802		PMD_SECT_AP_READ
803	.long   PMD_TYPE_SECT | \
804		PMD_SECT_AP_WRITE | \
805		PMD_SECT_AP_READ
806	b	__xscale_setup
807	.long	cpu_arch_name
808	.long	cpu_elf_name
809	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
810	.long	cpu_pxa210_name
811	.long	xscale_processor_functions
812	.long	v4wbi_tlb_fns
813	.long	xscale_mc_user_fns
814	.long	xscale_cache_fns
815	.size	__pxa210_proc_info, . - __pxa210_proc_info
816
817	.type	__ixp2400_proc_info, #object
818__ixp2400_proc_info:
819	.long   0x69054190
820	.long   0xfffffff0
821	.long   PMD_TYPE_SECT | \
822		PMD_SECT_BUFFERABLE | \
823		PMD_SECT_CACHEABLE | \
824		PMD_SECT_AP_WRITE | \
825		PMD_SECT_AP_READ
826	.long   PMD_TYPE_SECT | \
827		PMD_SECT_AP_WRITE | \
828		PMD_SECT_AP_READ
829	b       __xscale_setup
830	.long   cpu_arch_name
831	.long   cpu_elf_name
832	.long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
833	.long   cpu_ixp2400_name
834	.long   xscale_processor_functions
835	.long	v4wbi_tlb_fns
836	.long	xscale_mc_user_fns
837	.long	xscale_cache_fns
838	.size   __ixp2400_proc_info, . - __ixp2400_proc_info
839
840	.type	__ixp2800_proc_info, #object
841__ixp2800_proc_info:
842	.long   0x690541a0
843	.long   0xfffffff0
844	.long   PMD_TYPE_SECT | \
845		PMD_SECT_BUFFERABLE | \
846		PMD_SECT_CACHEABLE | \
847		PMD_SECT_AP_WRITE | \
848		PMD_SECT_AP_READ
849	.long   PMD_TYPE_SECT | \
850		PMD_SECT_AP_WRITE | \
851		PMD_SECT_AP_READ
852	b       __xscale_setup
853	.long   cpu_arch_name
854	.long   cpu_elf_name
855	.long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
856	.long   cpu_ixp2800_name
857	.long   xscale_processor_functions
858	.long	v4wbi_tlb_fns
859	.long	xscale_mc_user_fns
860	.long	xscale_cache_fns
861	.size   __ixp2800_proc_info, . - __ixp2800_proc_info
862
863	.type	__ixp42x_proc_info, #object
864__ixp42x_proc_info:
865	.long   0x690541c0
866	.long   0xffffffc0
867	.long   PMD_TYPE_SECT | \
868		PMD_SECT_BUFFERABLE | \
869		PMD_SECT_CACHEABLE | \
870		PMD_SECT_AP_WRITE | \
871		PMD_SECT_AP_READ
872	.long   PMD_TYPE_SECT | \
873		PMD_SECT_AP_WRITE | \
874		PMD_SECT_AP_READ
875	b       __xscale_setup
876	.long   cpu_arch_name
877	.long   cpu_elf_name
878	.long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
879	.long   cpu_ixp42x_name
880	.long   xscale_processor_functions
881	.long	v4wbi_tlb_fns
882	.long	xscale_mc_user_fns
883	.long	xscale_cache_fns
884	.size   __ixp42x_proc_info, . - __ixp42x_proc_info
885
886	.type   __ixp43x_proc_info, #object
887__ixp43x_proc_info:
888	.long   0x69054040
889	.long   0xfffffff0
890	.long   PMD_TYPE_SECT | \
891		PMD_SECT_BUFFERABLE | \
892		PMD_SECT_CACHEABLE | \
893		PMD_SECT_AP_WRITE | \
894		PMD_SECT_AP_READ
895	.long   PMD_TYPE_SECT | \
896		PMD_SECT_AP_WRITE | \
897		PMD_SECT_AP_READ
898	b       __xscale_setup
899	.long   cpu_arch_name
900	.long   cpu_elf_name
901	.long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
902	.long   cpu_ixp43x_name
903	.long   xscale_processor_functions
904	.long   v4wbi_tlb_fns
905	.long   xscale_mc_user_fns
906	.long   xscale_cache_fns
907	.size   __ixp43x_proc_info, . - __ixp43x_proc_info
908
909	.type	__ixp46x_proc_info, #object
910__ixp46x_proc_info:
911	.long   0x69054200
912	.long   0xffffff00
913	.long   PMD_TYPE_SECT | \
914		PMD_SECT_BUFFERABLE | \
915		PMD_SECT_CACHEABLE | \
916		PMD_SECT_AP_WRITE | \
917		PMD_SECT_AP_READ
918	.long   PMD_TYPE_SECT | \
919		PMD_SECT_AP_WRITE | \
920		PMD_SECT_AP_READ
921	b       __xscale_setup
922	.long   cpu_arch_name
923	.long   cpu_elf_name
924	.long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
925	.long   cpu_ixp46x_name
926	.long   xscale_processor_functions
927	.long	v4wbi_tlb_fns
928	.long	xscale_mc_user_fns
929	.long	xscale_cache_fns
930	.size   __ixp46x_proc_info, . - __ixp46x_proc_info
931
932	.type	__pxa255_proc_info,#object
933__pxa255_proc_info:
934	.long	0x69052d00
935	.long	0xfffffff0
936	.long   PMD_TYPE_SECT | \
937		PMD_SECT_BUFFERABLE | \
938		PMD_SECT_CACHEABLE | \
939		PMD_SECT_AP_WRITE | \
940		PMD_SECT_AP_READ
941	.long   PMD_TYPE_SECT | \
942		PMD_SECT_AP_WRITE | \
943		PMD_SECT_AP_READ
944	b	__xscale_setup
945	.long	cpu_arch_name
946	.long	cpu_elf_name
947	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
948	.long	cpu_pxa255_name
949	.long	xscale_processor_functions
950	.long	v4wbi_tlb_fns
951	.long	xscale_mc_user_fns
952	.long	xscale_cache_fns
953	.size	__pxa255_proc_info, . - __pxa255_proc_info
954
955	.type	__pxa270_proc_info,#object
956__pxa270_proc_info:
957	.long	0x69054110
958	.long	0xfffffff0
959	.long   PMD_TYPE_SECT | \
960		PMD_SECT_BUFFERABLE | \
961		PMD_SECT_CACHEABLE | \
962		PMD_SECT_AP_WRITE | \
963		PMD_SECT_AP_READ
964	.long   PMD_TYPE_SECT | \
965		PMD_SECT_AP_WRITE | \
966		PMD_SECT_AP_READ
967	b	__xscale_setup
968	.long	cpu_arch_name
969	.long	cpu_elf_name
970	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
971	.long	cpu_pxa270_name
972	.long	xscale_processor_functions
973	.long	v4wbi_tlb_fns
974	.long	xscale_mc_user_fns
975	.long	xscale_cache_fns
976	.size	__pxa270_proc_info, . - __pxa270_proc_info
977
978