1/* 2 * linux/arch/arm/mm/proc-v7.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv7 processor support. 11 */ 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/assembler.h> 15#include <asm/asm-offsets.h> 16#include <asm/hwcap.h> 17#include <asm/pgtable-hwdef.h> 18#include <asm/pgtable.h> 19 20#include "proc-macros.S" 21 22#define TTB_C (1 << 0) 23#define TTB_S (1 << 1) 24#define TTB_RGN_NC (0 << 3) 25#define TTB_RGN_OC_WBWA (1 << 3) 26#define TTB_RGN_OC_WT (2 << 3) 27#define TTB_RGN_OC_WB (3 << 3) 28 29#ifndef CONFIG_SMP 30#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB 31#else 32#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA 33#endif 34 35ENTRY(cpu_v7_proc_init) 36 mov pc, lr 37ENDPROC(cpu_v7_proc_init) 38 39ENTRY(cpu_v7_proc_fin) 40 mov pc, lr 41ENDPROC(cpu_v7_proc_fin) 42 43/* 44 * cpu_v7_reset(loc) 45 * 46 * Perform a soft reset of the system. Put the CPU into the 47 * same state as it would be if it had been reset, and branch 48 * to what would be the reset vector. 49 * 50 * - loc - location to jump to for soft reset 51 * 52 * It is assumed that: 53 */ 54 .align 5 55ENTRY(cpu_v7_reset) 56 mov pc, r0 57ENDPROC(cpu_v7_reset) 58 59/* 60 * cpu_v7_do_idle() 61 * 62 * Idle the processor (eg, wait for interrupt). 63 * 64 * IRQs are already disabled. 65 */ 66ENTRY(cpu_v7_do_idle) 67 dsb @ WFI may enter a low-power mode 68 wfi 69 mov pc, lr 70ENDPROC(cpu_v7_do_idle) 71 72ENTRY(cpu_v7_dcache_clean_area) 73#ifndef TLB_CAN_READ_FROM_L1_CACHE 74 dcache_line_size r2, r3 751: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 76 add r0, r0, r2 77 subs r1, r1, r2 78 bhi 1b 79 dsb 80#endif 81 mov pc, lr 82ENDPROC(cpu_v7_dcache_clean_area) 83 84/* 85 * cpu_v7_switch_mm(pgd_phys, tsk) 86 * 87 * Set the translation table base pointer to be pgd_phys 88 * 89 * - pgd_phys - physical address of new TTB 90 * 91 * It is assumed that: 92 * - we are not using split page tables 93 */ 94ENTRY(cpu_v7_switch_mm) 95#ifdef CONFIG_MMU 96 mov r2, #0 97 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 98 orr r0, r0, #TTB_FLAGS 99#ifdef CONFIG_ARM_ERRATA_430973 100 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 101#endif 102 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 103 isb 1041: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 105 isb 106 mcr p15, 0, r1, c13, c0, 1 @ set context ID 107 isb 108#endif 109 mov pc, lr 110ENDPROC(cpu_v7_switch_mm) 111 112/* 113 * cpu_v7_set_pte_ext(ptep, pte) 114 * 115 * Set a level 2 translation table entry. 116 * 117 * - ptep - pointer to level 2 translation table entry 118 * (hardware version is stored at -1024 bytes) 119 * - pte - PTE value to store 120 * - ext - value for extended PTE bits 121 */ 122ENTRY(cpu_v7_set_pte_ext) 123#ifdef CONFIG_MMU 124 str r1, [r0], #-2048 @ linux version 125 126 bic r3, r1, #0x000003f0 127 bic r3, r3, #PTE_TYPE_MASK 128 orr r3, r3, r2 129 orr r3, r3, #PTE_EXT_AP0 | 2 130 131 tst r1, #1 << 4 132 orrne r3, r3, #PTE_EXT_TEX(1) 133 134 tst r1, #L_PTE_WRITE 135 tstne r1, #L_PTE_DIRTY 136 orreq r3, r3, #PTE_EXT_APX 137 138 tst r1, #L_PTE_USER 139 orrne r3, r3, #PTE_EXT_AP1 140 tstne r3, #PTE_EXT_APX 141 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 142 143 tst r1, #L_PTE_EXEC 144 orreq r3, r3, #PTE_EXT_XN 145 146 tst r1, #L_PTE_YOUNG 147 tstne r1, #L_PTE_PRESENT 148 moveq r3, #0 149 150 str r3, [r0] 151 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 152#endif 153 mov pc, lr 154ENDPROC(cpu_v7_set_pte_ext) 155 156cpu_v7_name: 157 .ascii "ARMv7 Processor" 158 .align 159 160 __INIT 161 162/* 163 * __v7_setup 164 * 165 * Initialise TLB, Caches, and MMU state ready to switch the MMU 166 * on. Return in r0 the new CP15 C1 control register setting. 167 * 168 * We automatically detect if we have a Harvard cache, and use the 169 * Harvard cache control instructions insead of the unified cache 170 * control instructions. 171 * 172 * This should be able to cover all ARMv7 cores. 173 * 174 * It is assumed that: 175 * - cache type register is implemented 176 */ 177__v7_setup: 178#ifdef CONFIG_SMP 179 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 180 orr r0, r0, #(0x1 << 6) 181 mcr p15, 0, r0, c1, c0, 1 182#endif 183 adr r12, __v7_setup_stack @ the local stack 184 stmia r12, {r0-r5, r7, r9, r11, lr} 185 bl v7_flush_dcache_all 186 ldmia r12, {r0-r5, r7, r9, r11, lr} 187#ifdef CONFIG_ARM_ERRATA_430973 188 mrc p15, 0, r10, c1, c0, 1 @ read aux control register 189 orr r10, r10, #(1 << 6) @ set IBE to 1 190 mcr p15, 0, r10, c1, c0, 1 @ write aux control register 191#endif 192#ifdef CONFIG_ARM_ERRATA_458693 193 mrc p15, 0, r10, c1, c0, 1 @ read aux control register 194 orr r10, r10, #(1 << 5) @ set L1NEON to 1 195 orr r10, r10, #(1 << 9) @ set PLDNOP to 1 196 mcr p15, 0, r10, c1, c0, 1 @ write aux control register 197#endif 198#ifdef CONFIG_ARM_ERRATA_460075 199 mrc p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 200 orr r10, r10, #(1 << 22) @ set the Write Allocate disable bit 201 mcr p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 202#endif 203 mov r10, #0 204#ifdef HARVARD_CACHE 205 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 206#endif 207 dsb 208#ifdef CONFIG_MMU 209 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 210 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 211 orr r4, r4, #TTB_FLAGS 212 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 213 mov r10, #0x1f @ domains 0, 1 = manager 214 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 215#endif 216 ldr r5, =0xff0aa1a8 217 ldr r6, =0x40e040e0 218 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 219 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 220 adr r5, v7_crval 221 ldmia r5, {r5, r6} 222 mrc p15, 0, r0, c1, c0, 0 @ read control register 223 bic r0, r0, r5 @ clear bits them 224 orr r0, r0, r6 @ set them 225 mov pc, lr @ return to head.S:__ret 226ENDPROC(__v7_setup) 227 228 /* AT 229 * TFR EV X F I D LR 230 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM 231 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced 232 * 1 0 110 0011 1.00 .111 1101 < we want 233 */ 234 .type v7_crval, #object 235v7_crval: 236 crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c 237 238__v7_setup_stack: 239 .space 4 * 11 @ 11 registers 240 241 .type v7_processor_functions, #object 242ENTRY(v7_processor_functions) 243 .word v7_early_abort 244 .word pabort_ifar 245 .word cpu_v7_proc_init 246 .word cpu_v7_proc_fin 247 .word cpu_v7_reset 248 .word cpu_v7_do_idle 249 .word cpu_v7_dcache_clean_area 250 .word cpu_v7_switch_mm 251 .word cpu_v7_set_pte_ext 252 .size v7_processor_functions, . - v7_processor_functions 253 254 .type cpu_arch_name, #object 255cpu_arch_name: 256 .asciz "armv7" 257 .size cpu_arch_name, . - cpu_arch_name 258 259 .type cpu_elf_name, #object 260cpu_elf_name: 261 .asciz "v7" 262 .size cpu_elf_name, . - cpu_elf_name 263 .align 264 265 .section ".proc.info.init", #alloc, #execinstr 266 267 /* 268 * Match any ARMv7 processor core. 269 */ 270 .type __v7_proc_info, #object 271__v7_proc_info: 272 .long 0x000f0000 @ Required ID value 273 .long 0x000f0000 @ Mask for ID 274 .long PMD_TYPE_SECT | \ 275 PMD_SECT_BUFFERABLE | \ 276 PMD_SECT_CACHEABLE | \ 277 PMD_SECT_AP_WRITE | \ 278 PMD_SECT_AP_READ 279 .long PMD_TYPE_SECT | \ 280 PMD_SECT_XN | \ 281 PMD_SECT_AP_WRITE | \ 282 PMD_SECT_AP_READ 283 b __v7_setup 284 .long cpu_arch_name 285 .long cpu_elf_name 286 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 287 .long cpu_v7_name 288 .long v7_processor_functions 289 .long v7wbi_tlb_fns 290 .long v6_user_fns 291 .long v7_cache_fns 292 .size __v7_proc_info, . - __v7_proc_info 293