1/* 2 * linux/arch/arm/mm/proc-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Modified by Catalin Marinas for noMMU support 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This is the "shell" of the ARMv6 processor support. 12 */ 13#include <linux/linkage.h> 14#include <asm/assembler.h> 15#include <asm/asm-offsets.h> 16#include <asm/elf.h> 17#include <asm/pgtable-hwdef.h> 18#include <asm/pgtable.h> 19 20#ifdef CONFIG_SMP 21#include <asm/hardware/arm_scu.h> 22#endif 23 24#include "proc-macros.S" 25 26#define D_CACHE_LINE_SIZE 32 27 28#define TTB_C (1 << 0) 29#define TTB_S (1 << 1) 30#define TTB_IMP (1 << 2) 31#define TTB_RGN_NC (0 << 3) 32#define TTB_RGN_WBWA (1 << 3) 33#define TTB_RGN_WT (2 << 3) 34#define TTB_RGN_WB (3 << 3) 35 36#ifndef CONFIG_SMP 37#define TTB_FLAGS TTB_RGN_WBWA 38#else 39#define TTB_FLAGS TTB_RGN_WBWA|TTB_S 40#endif 41 42ENTRY(cpu_v6_proc_init) 43 mov pc, lr 44 45ENTRY(cpu_v6_proc_fin) 46 stmfd sp!, {lr} 47 cpsid if @ disable interrupts 48 bl v6_flush_kern_cache_all 49 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 50 bic r0, r0, #0x1000 @ ...i............ 51 bic r0, r0, #0x0006 @ .............ca. 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches 53 ldmfd sp!, {pc} 54 55/* 56 * cpu_v6_reset(loc) 57 * 58 * Perform a soft reset of the system. Put the CPU into the 59 * same state as it would be if it had been reset, and branch 60 * to what would be the reset vector. 61 * 62 * - loc - location to jump to for soft reset 63 * 64 * It is assumed that: 65 */ 66 .align 5 67ENTRY(cpu_v6_reset) 68 mov pc, r0 69 70/* 71 * cpu_v6_do_idle() 72 * 73 * Idle the processor (eg, wait for interrupt). 74 * 75 * IRQs are already disabled. 76 */ 77ENTRY(cpu_v6_do_idle) 78 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 79 mov pc, lr 80 81ENTRY(cpu_v6_dcache_clean_area) 82#ifndef TLB_CAN_READ_FROM_L1_CACHE 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 84 add r0, r0, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE 86 bhi 1b 87#endif 88 mov pc, lr 89 90/* 91 * cpu_arm926_switch_mm(pgd_phys, tsk) 92 * 93 * Set the translation table base pointer to be pgd_phys 94 * 95 * - pgd_phys - physical address of new TTB 96 * 97 * It is assumed that: 98 * - we are not using split page tables 99 */ 100ENTRY(cpu_v6_switch_mm) 101#ifdef CONFIG_MMU 102 mov r2, #0 103 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 104 orr r0, r0, #TTB_FLAGS 105 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 106 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 107 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 108 mcr p15, 0, r1, c13, c0, 1 @ set context ID 109#endif 110 mov pc, lr 111 112/* 113 * cpu_v6_set_pte_ext(ptep, pte, ext) 114 * 115 * Set a level 2 translation table entry. 116 * 117 * - ptep - pointer to level 2 translation table entry 118 * (hardware version is stored at -1024 bytes) 119 * - pte - PTE value to store 120 * - ext - value for extended PTE bits 121 * 122 * Permissions: 123 * YUWD APX AP1 AP0 SVC User 124 * 0xxx 0 0 0 no acc no acc 125 * 100x 1 0 1 r/o no acc 126 * 10x0 1 0 1 r/o no acc 127 * 1011 0 0 1 r/w no acc 128 * 110x 0 1 0 r/w r/o 129 * 11x0 0 1 0 r/w r/o 130 * 1111 0 1 1 r/w r/w 131 */ 132ENTRY(cpu_v6_set_pte_ext) 133#ifdef CONFIG_MMU 134 str r1, [r0], #-2048 @ linux version 135 136 bic r3, r1, #0x000003f0 137 bic r3, r3, #0x00000003 138 orr r3, r3, r2 139 orr r3, r3, #PTE_EXT_AP0 | 2 140 141 tst r1, #L_PTE_WRITE 142 tstne r1, #L_PTE_DIRTY 143 orreq r3, r3, #PTE_EXT_APX 144 145 tst r1, #L_PTE_USER 146 orrne r3, r3, #PTE_EXT_AP1 147 tstne r3, #PTE_EXT_APX 148 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 149 150 tst r1, #L_PTE_YOUNG 151 biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK 152 153 tst r1, #L_PTE_EXEC 154 orreq r3, r3, #PTE_EXT_XN 155 156 tst r1, #L_PTE_PRESENT 157 moveq r3, #0 158 159 str r3, [r0] 160 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 161#endif 162 mov pc, lr 163 164 165 166 167cpu_v6_name: 168 .asciz "ARMv6-compatible processor" 169 .align 170 171 .section ".text.init", #alloc, #execinstr 172 173/* 174 * __v6_setup 175 * 176 * Initialise TLB, Caches, and MMU state ready to switch the MMU 177 * on. Return in r0 the new CP15 C1 control register setting. 178 * 179 * We automatically detect if we have a Harvard cache, and use the 180 * Harvard cache control instructions insead of the unified cache 181 * control instructions. 182 * 183 * This should be able to cover all ARMv6 cores. 184 * 185 * It is assumed that: 186 * - cache type register is implemented 187 */ 188__v6_setup: 189#ifdef CONFIG_SMP 190 /* Set up the SCU on core 0 only */ 191 mrc p15, 0, r0, c0, c0, 5 @ CPU core number 192 ands r0, r0, #15 193 ldreq r0, =SCU_BASE 194 ldreq r5, [r0, #SCU_CTRL] 195 orreq r5, r5, #1 196 streq r5, [r0, #SCU_CTRL] 197 198#ifndef CONFIG_CPU_DCACHE_DISABLE 199 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 200 orr r0, r0, #0x20 201 mcr p15, 0, r0, c1, c0, 1 202#endif 203#endif 204 205 mov r0, #0 206 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 207 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 208 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 209 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 210#ifdef CONFIG_MMU 211 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 212 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 213 orr r4, r4, #TTB_FLAGS 214 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 215#endif /* CONFIG_MMU */ 216 adr r5, v6_crval 217 ldmia r5, {r5, r6} 218 mrc p15, 0, r0, c1, c0, 0 @ read control register 219 bic r0, r0, r5 @ clear bits them 220 orr r0, r0, r6 @ set them 221 mov pc, lr @ return to head.S:__ret 222 223 /* 224 * V X F I D LR 225 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 226 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 227 * 0 110 0011 1.00 .111 1101 < we want 228 */ 229 .type v6_crval, #object 230v6_crval: 231 crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c 232 233 .type v6_processor_functions, #object 234ENTRY(v6_processor_functions) 235 .word v6_early_abort 236 .word cpu_v6_proc_init 237 .word cpu_v6_proc_fin 238 .word cpu_v6_reset 239 .word cpu_v6_do_idle 240 .word cpu_v6_dcache_clean_area 241 .word cpu_v6_switch_mm 242 .word cpu_v6_set_pte_ext 243 .size v6_processor_functions, . - v6_processor_functions 244 245 .type cpu_arch_name, #object 246cpu_arch_name: 247 .asciz "armv6" 248 .size cpu_arch_name, . - cpu_arch_name 249 250 .type cpu_elf_name, #object 251cpu_elf_name: 252 .asciz "v6" 253 .size cpu_elf_name, . - cpu_elf_name 254 .align 255 256 .section ".proc.info.init", #alloc, #execinstr 257 258 /* 259 * Match any ARMv6 processor core. 260 */ 261 .type __v6_proc_info, #object 262__v6_proc_info: 263 .long 0x0007b000 264 .long 0x0007f000 265 .long PMD_TYPE_SECT | \ 266 PMD_SECT_BUFFERABLE | \ 267 PMD_SECT_CACHEABLE | \ 268 PMD_SECT_AP_WRITE | \ 269 PMD_SECT_AP_READ 270 .long PMD_TYPE_SECT | \ 271 PMD_SECT_XN | \ 272 PMD_SECT_AP_WRITE | \ 273 PMD_SECT_AP_READ 274 b __v6_setup 275 .long cpu_arch_name 276 .long cpu_elf_name 277 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 278 .long cpu_v6_name 279 .long v6_processor_functions 280 .long v6wbi_tlb_fns 281 .long v6_user_fns 282 .long v6_cache_fns 283 .size __v6_proc_info, . - __v6_proc_info 284