xref: /openbmc/linux/arch/arm/mm/proc-v7.S (revision 9ac8d3fb)
1/*
2 *  linux/arch/arm/mm/proc-v7.S
3 *
4 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  This is the "shell" of the ARMv7 processor support.
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h>
18
19#include "proc-macros.S"
20
21#define TTB_C		(1 << 0)
22#define TTB_S		(1 << 1)
23#define TTB_RGN_OC_WT	(2 << 3)
24#define TTB_RGN_OC_WB	(3 << 3)
25
26ENTRY(cpu_v7_proc_init)
27	mov	pc, lr
28ENDPROC(cpu_v7_proc_init)
29
30ENTRY(cpu_v7_proc_fin)
31	mov	pc, lr
32ENDPROC(cpu_v7_proc_fin)
33
34/*
35 *	cpu_v7_reset(loc)
36 *
37 *	Perform a soft reset of the system.  Put the CPU into the
38 *	same state as it would be if it had been reset, and branch
39 *	to what would be the reset vector.
40 *
41 *	- loc   - location to jump to for soft reset
42 *
43 *	It is assumed that:
44 */
45	.align	5
46ENTRY(cpu_v7_reset)
47	mov	pc, r0
48ENDPROC(cpu_v7_reset)
49
50/*
51 *	cpu_v7_do_idle()
52 *
53 *	Idle the processor (eg, wait for interrupt).
54 *
55 *	IRQs are already disabled.
56 */
57ENTRY(cpu_v7_do_idle)
58	wfi
59	mov	pc, lr
60ENDPROC(cpu_v7_do_idle)
61
62ENTRY(cpu_v7_dcache_clean_area)
63#ifndef TLB_CAN_READ_FROM_L1_CACHE
64	dcache_line_size r2, r3
651:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
66	add	r0, r0, r2
67	subs	r1, r1, r2
68	bhi	1b
69	dsb
70#endif
71	mov	pc, lr
72ENDPROC(cpu_v7_dcache_clean_area)
73
74/*
75 *	cpu_v7_switch_mm(pgd_phys, tsk)
76 *
77 *	Set the translation table base pointer to be pgd_phys
78 *
79 *	- pgd_phys - physical address of new TTB
80 *
81 *	It is assumed that:
82 *	- we are not using split page tables
83 */
84ENTRY(cpu_v7_switch_mm)
85#ifdef CONFIG_MMU
86	mov	r2, #0
87	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
88	orr	r0, r0, #TTB_RGN_OC_WB		@ mark PTWs outer cacheable, WB
89	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
90	isb
911:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
92	isb
93	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
94	isb
95#endif
96	mov	pc, lr
97ENDPROC(cpu_v7_switch_mm)
98
99/*
100 *	cpu_v7_set_pte_ext(ptep, pte)
101 *
102 *	Set a level 2 translation table entry.
103 *
104 *	- ptep  - pointer to level 2 translation table entry
105 *		  (hardware version is stored at -1024 bytes)
106 *	- pte   - PTE value to store
107 *	- ext	- value for extended PTE bits
108 */
109ENTRY(cpu_v7_set_pte_ext)
110#ifdef CONFIG_MMU
111	str	r1, [r0], #-2048		@ linux version
112
113	bic	r3, r1, #0x000003f0
114	bic	r3, r3, #PTE_TYPE_MASK
115	orr	r3, r3, r2
116	orr	r3, r3, #PTE_EXT_AP0 | 2
117
118	tst	r2, #1 << 4
119	orrne	r3, r3, #PTE_EXT_TEX(1)
120
121	tst	r1, #L_PTE_WRITE
122	tstne	r1, #L_PTE_DIRTY
123	orreq	r3, r3, #PTE_EXT_APX
124
125	tst	r1, #L_PTE_USER
126	orrne	r3, r3, #PTE_EXT_AP1
127	tstne	r3, #PTE_EXT_APX
128	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
129
130	tst	r1, #L_PTE_EXEC
131	orreq	r3, r3, #PTE_EXT_XN
132
133	tst	r1, #L_PTE_YOUNG
134	tstne	r1, #L_PTE_PRESENT
135	moveq	r3, #0
136
137	str	r3, [r0]
138	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
139#endif
140	mov	pc, lr
141ENDPROC(cpu_v7_set_pte_ext)
142
143cpu_v7_name:
144	.ascii	"ARMv7 Processor"
145	.align
146
147	.section ".text.init", #alloc, #execinstr
148
149/*
150 *	__v7_setup
151 *
152 *	Initialise TLB, Caches, and MMU state ready to switch the MMU
153 *	on.  Return in r0 the new CP15 C1 control register setting.
154 *
155 *	We automatically detect if we have a Harvard cache, and use the
156 *	Harvard cache control instructions insead of the unified cache
157 *	control instructions.
158 *
159 *	This should be able to cover all ARMv7 cores.
160 *
161 *	It is assumed that:
162 *	- cache type register is implemented
163 */
164__v7_setup:
165	adr	r12, __v7_setup_stack		@ the local stack
166	stmia	r12, {r0-r5, r7, r9, r11, lr}
167	bl	v7_flush_dcache_all
168	ldmia	r12, {r0-r5, r7, r9, r11, lr}
169	mov	r10, #0
170#ifdef HARVARD_CACHE
171	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
172#endif
173	dsb
174#ifdef CONFIG_MMU
175	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
176	mcr	p15, 0, r10, c2, c0, 2		@ TTB control register
177	orr	r4, r4, #TTB_RGN_OC_WB		@ mark PTWs outer cacheable, WB
178	mcr	p15, 0, r4, c2, c0, 0		@ load TTB0
179	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
180	mov	r10, #0x1f			@ domains 0, 1 = manager
181	mcr	p15, 0, r10, c3, c0, 0		@ load domain access register
182#endif
183	ldr	r5, =0xff0aa1a8
184	ldr	r6, =0x40e040e0
185	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
186	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
187	adr	r5, v7_crval
188	ldmia	r5, {r5, r6}
189   	mrc	p15, 0, r0, c1, c0, 0		@ read control register
190	bic	r0, r0, r5			@ clear bits them
191	orr	r0, r0, r6			@ set them
192	mov	pc, lr				@ return to head.S:__ret
193ENDPROC(__v7_setup)
194
195	/*
196	 *         V X F   I D LR
197	 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
198	 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
199	 *         0 110       0011 1.00 .111 1101 < we want
200	 */
201	.type	v7_crval, #object
202v7_crval:
203	crval	clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c
204
205__v7_setup_stack:
206	.space	4 * 11				@ 11 registers
207
208	.type	v7_processor_functions, #object
209ENTRY(v7_processor_functions)
210	.word	v7_early_abort
211	.word	pabort_ifar
212	.word	cpu_v7_proc_init
213	.word	cpu_v7_proc_fin
214	.word	cpu_v7_reset
215	.word	cpu_v7_do_idle
216	.word	cpu_v7_dcache_clean_area
217	.word	cpu_v7_switch_mm
218	.word	cpu_v7_set_pte_ext
219	.size	v7_processor_functions, . - v7_processor_functions
220
221	.type	cpu_arch_name, #object
222cpu_arch_name:
223	.asciz	"armv7"
224	.size	cpu_arch_name, . - cpu_arch_name
225
226	.type	cpu_elf_name, #object
227cpu_elf_name:
228	.asciz	"v7"
229	.size	cpu_elf_name, . - cpu_elf_name
230	.align
231
232	.section ".proc.info.init", #alloc, #execinstr
233
234	/*
235	 * Match any ARMv7 processor core.
236	 */
237	.type	__v7_proc_info, #object
238__v7_proc_info:
239	.long	0x000f0000		@ Required ID value
240	.long	0x000f0000		@ Mask for ID
241	.long   PMD_TYPE_SECT | \
242		PMD_SECT_BUFFERABLE | \
243		PMD_SECT_CACHEABLE | \
244		PMD_SECT_AP_WRITE | \
245		PMD_SECT_AP_READ
246	.long   PMD_TYPE_SECT | \
247		PMD_SECT_XN | \
248		PMD_SECT_AP_WRITE | \
249		PMD_SECT_AP_READ
250	b	__v7_setup
251	.long	cpu_arch_name
252	.long	cpu_elf_name
253	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
254	.long	cpu_v7_name
255	.long	v7_processor_functions
256	.long	v7wbi_tlb_fns
257	.long	v6_user_fns
258	.long	v7_cache_fns
259	.size	__v7_proc_info, . - __v7_proc_info
260