1/* 2 * Based on arch/arm/mm/proc.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Catalin Marinas <catalin.marinas@arm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#include <linux/init.h> 22#include <linux/linkage.h> 23#include <asm/assembler.h> 24#include <asm/asm-offsets.h> 25#include <asm/hwcap.h> 26#include <asm/pgtable-hwdef.h> 27#include <asm/pgtable.h> 28 29#include "proc-macros.S" 30 31#ifdef CONFIG_ARM64_64K_PAGES 32#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 33#else 34#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 35#endif 36 37#ifdef CONFIG_SMP 38#define TCR_SMP_FLAGS TCR_SHARED 39#else 40#define TCR_SMP_FLAGS 0 41#endif 42 43/* PTWs cacheable, inner/outer WBWA */ 44#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 45 46#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 47 48/* 49 * cpu_cache_off() 50 * 51 * Turn the CPU D-cache off. 52 */ 53ENTRY(cpu_cache_off) 54 mrs x0, sctlr_el1 55 bic x0, x0, #1 << 2 // clear SCTLR.C 56 msr sctlr_el1, x0 57 isb 58 ret 59ENDPROC(cpu_cache_off) 60 61/* 62 * cpu_reset(loc) 63 * 64 * Perform a soft reset of the system. Put the CPU into the same state 65 * as it would be if it had been reset, and branch to what would be the 66 * reset vector. It must be executed with the flat identity mapping. 67 * 68 * - loc - location to jump to for soft reset 69 */ 70 .align 5 71ENTRY(cpu_reset) 72 mrs x1, sctlr_el1 73 bic x1, x1, #1 74 msr sctlr_el1, x1 // disable the MMU 75 isb 76 ret x0 77ENDPROC(cpu_reset) 78 79/* 80 * cpu_do_idle() 81 * 82 * Idle the processor (wait for interrupt). 83 */ 84ENTRY(cpu_do_idle) 85 dsb sy // WFI may enter a low-power mode 86 wfi 87 ret 88ENDPROC(cpu_do_idle) 89 90#ifdef CONFIG_ARM64_CPU_SUSPEND 91/** 92 * cpu_do_suspend - save CPU registers context 93 * 94 * x0: virtual address of context pointer 95 */ 96ENTRY(cpu_do_suspend) 97 mrs x2, tpidr_el0 98 mrs x3, tpidrro_el0 99 mrs x4, contextidr_el1 100 mrs x5, mair_el1 101 mrs x6, cpacr_el1 102 mrs x7, ttbr1_el1 103 mrs x8, tcr_el1 104 mrs x9, vbar_el1 105 mrs x10, mdscr_el1 106 mrs x11, oslsr_el1 107 mrs x12, sctlr_el1 108 stp x2, x3, [x0] 109 stp x4, x5, [x0, #16] 110 stp x6, x7, [x0, #32] 111 stp x8, x9, [x0, #48] 112 stp x10, x11, [x0, #64] 113 str x12, [x0, #80] 114 ret 115ENDPROC(cpu_do_suspend) 116 117/** 118 * cpu_do_resume - restore CPU register context 119 * 120 * x0: Physical address of context pointer 121 * x1: ttbr0_el1 to be restored 122 * 123 * Returns: 124 * sctlr_el1 value in x0 125 */ 126ENTRY(cpu_do_resume) 127 /* 128 * Invalidate local tlb entries before turning on MMU 129 */ 130 tlbi vmalle1 131 ldp x2, x3, [x0] 132 ldp x4, x5, [x0, #16] 133 ldp x6, x7, [x0, #32] 134 ldp x8, x9, [x0, #48] 135 ldp x10, x11, [x0, #64] 136 ldr x12, [x0, #80] 137 msr tpidr_el0, x2 138 msr tpidrro_el0, x3 139 msr contextidr_el1, x4 140 msr mair_el1, x5 141 msr cpacr_el1, x6 142 msr ttbr0_el1, x1 143 msr ttbr1_el1, x7 144 msr tcr_el1, x8 145 msr vbar_el1, x9 146 msr mdscr_el1, x10 147 /* 148 * Restore oslsr_el1 by writing oslar_el1 149 */ 150 ubfx x11, x11, #1, #1 151 msr oslar_el1, x11 152 mov x0, x12 153 dsb nsh // Make sure local tlb invalidation completed 154 isb 155 ret 156ENDPROC(cpu_do_resume) 157#endif 158 159/* 160 * cpu_do_switch_mm(pgd_phys, tsk) 161 * 162 * Set the translation table base pointer to be pgd_phys. 163 * 164 * - pgd_phys - physical address of new TTB 165 */ 166ENTRY(cpu_do_switch_mm) 167 mmid w1, x1 // get mm->context.id 168 bfi x0, x1, #48, #16 // set the ASID 169 msr ttbr0_el1, x0 // set TTBR0 170 isb 171 ret 172ENDPROC(cpu_do_switch_mm) 173 174 .section ".text.init", #alloc, #execinstr 175 176/* 177 * __cpu_setup 178 * 179 * Initialise the processor for turning the MMU on. Return in x0 the 180 * value of the SCTLR_EL1 register. 181 */ 182ENTRY(__cpu_setup) 183 ic iallu // I+BTB cache invalidate 184 tlbi vmalle1is // invalidate I + D TLBs 185 dsb sy 186 187 mov x0, #3 << 20 188 msr cpacr_el1, x0 // Enable FP/ASIMD 189 msr mdscr_el1, xzr // Reset mdscr_el1 190 /* 191 * Memory region attributes for LPAE: 192 * 193 * n = AttrIndx[2:0] 194 * n MAIR 195 * DEVICE_nGnRnE 000 00000000 196 * DEVICE_nGnRE 001 00000100 197 * DEVICE_GRE 010 00001100 198 * NORMAL_NC 011 01000100 199 * NORMAL 100 11111111 200 */ 201 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 202 MAIR(0x04, MT_DEVICE_nGnRE) | \ 203 MAIR(0x0c, MT_DEVICE_GRE) | \ 204 MAIR(0x44, MT_NORMAL_NC) | \ 205 MAIR(0xff, MT_NORMAL) 206 msr mair_el1, x5 207 /* 208 * Prepare SCTLR 209 */ 210 adr x5, crval 211 ldp w5, w6, [x5] 212 mrs x0, sctlr_el1 213 bic x0, x0, x5 // clear bits 214 orr x0, x0, x6 // set bits 215 /* 216 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for 217 * both user and kernel. 218 */ 219 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 220 TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 221 /* 222 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in 223 * TCR_EL1. 224 */ 225 mrs x9, ID_AA64MMFR0_EL1 226 bfi x10, x9, #32, #3 227 msr tcr_el1, x10 228 ret // return to head.S 229ENDPROC(__cpu_setup) 230 231 /* 232 * n n T 233 * U E WT T UD US IHBS 234 * CE0 XWHW CZ ME TEEA S 235 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM 236 * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved 237 * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings 238 */ 239 .type crval, #object 240crval: 241 .word 0x000802e2 // clear 242 .word 0x0405d11d // set 243