1/* 2 * Based on arch/arm/mm/proc.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2012 ARM Ltd. 6 * Author: Catalin Marinas <catalin.marinas@arm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#include <linux/init.h> 22#include <linux/linkage.h> 23#include <asm/assembler.h> 24#include <asm/asm-offsets.h> 25#include <asm/hwcap.h> 26#include <asm/pgtable-hwdef.h> 27#include <asm/pgtable.h> 28 29#include "proc-macros.S" 30 31#ifndef CONFIG_SMP 32/* PTWs cacheable, inner/outer WBWA not shareable */ 33#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 34#else 35/* PTWs cacheable, inner/outer WBWA shareable */ 36#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED 37#endif 38 39#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 40 41/* 42 * cpu_cache_off() 43 * 44 * Turn the CPU D-cache off. 45 */ 46ENTRY(cpu_cache_off) 47 mrs x0, sctlr_el1 48 bic x0, x0, #1 << 2 // clear SCTLR.C 49 msr sctlr_el1, x0 50 isb 51 ret 52ENDPROC(cpu_cache_off) 53 54/* 55 * cpu_reset(loc) 56 * 57 * Perform a soft reset of the system. Put the CPU into the same state 58 * as it would be if it had been reset, and branch to what would be the 59 * reset vector. It must be executed with the flat identity mapping. 60 * 61 * - loc - location to jump to for soft reset 62 */ 63 .align 5 64ENTRY(cpu_reset) 65 mrs x1, sctlr_el1 66 bic x1, x1, #1 67 msr sctlr_el1, x1 // disable the MMU 68 isb 69 ret x0 70ENDPROC(cpu_reset) 71 72/* 73 * cpu_do_idle() 74 * 75 * Idle the processor (wait for interrupt). 76 */ 77ENTRY(cpu_do_idle) 78 dsb sy // WFI may enter a low-power mode 79 wfi 80 ret 81ENDPROC(cpu_do_idle) 82 83#ifdef CONFIG_ARM64_CPU_SUSPEND 84/** 85 * cpu_do_suspend - save CPU registers context 86 * 87 * x0: virtual address of context pointer 88 */ 89ENTRY(cpu_do_suspend) 90 mrs x2, tpidr_el0 91 mrs x3, tpidrro_el0 92 mrs x4, contextidr_el1 93 mrs x5, mair_el1 94 mrs x6, cpacr_el1 95 mrs x7, ttbr1_el1 96 mrs x8, tcr_el1 97 mrs x9, vbar_el1 98 mrs x10, mdscr_el1 99 mrs x11, oslsr_el1 100 mrs x12, sctlr_el1 101 stp x2, x3, [x0] 102 stp x4, x5, [x0, #16] 103 stp x6, x7, [x0, #32] 104 stp x8, x9, [x0, #48] 105 stp x10, x11, [x0, #64] 106 str x12, [x0, #80] 107 ret 108ENDPROC(cpu_do_suspend) 109 110/** 111 * cpu_do_resume - restore CPU register context 112 * 113 * x0: Physical address of context pointer 114 * x1: ttbr0_el1 to be restored 115 * 116 * Returns: 117 * sctlr_el1 value in x0 118 */ 119ENTRY(cpu_do_resume) 120 /* 121 * Invalidate local tlb entries before turning on MMU 122 */ 123 tlbi vmalle1 124 ldp x2, x3, [x0] 125 ldp x4, x5, [x0, #16] 126 ldp x6, x7, [x0, #32] 127 ldp x8, x9, [x0, #48] 128 ldp x10, x11, [x0, #64] 129 ldr x12, [x0, #80] 130 msr tpidr_el0, x2 131 msr tpidrro_el0, x3 132 msr contextidr_el1, x4 133 msr mair_el1, x5 134 msr cpacr_el1, x6 135 msr ttbr0_el1, x1 136 msr ttbr1_el1, x7 137 msr tcr_el1, x8 138 msr vbar_el1, x9 139 msr mdscr_el1, x10 140 /* 141 * Restore oslsr_el1 by writing oslar_el1 142 */ 143 ubfx x11, x11, #1, #1 144 msr oslar_el1, x11 145 mov x0, x12 146 dsb nsh // Make sure local tlb invalidation completed 147 isb 148 ret 149ENDPROC(cpu_do_resume) 150#endif 151 152/* 153 * cpu_do_switch_mm(pgd_phys, tsk) 154 * 155 * Set the translation table base pointer to be pgd_phys. 156 * 157 * - pgd_phys - physical address of new TTB 158 */ 159ENTRY(cpu_do_switch_mm) 160 mmid w1, x1 // get mm->context.id 161 bfi x0, x1, #48, #16 // set the ASID 162 msr ttbr0_el1, x0 // set TTBR0 163 isb 164 ret 165ENDPROC(cpu_do_switch_mm) 166 167 .section ".text.init", #alloc, #execinstr 168 169/* 170 * __cpu_setup 171 * 172 * Initialise the processor for turning the MMU on. Return in x0 the 173 * value of the SCTLR_EL1 register. 174 */ 175ENTRY(__cpu_setup) 176 /* 177 * Preserve the link register across the function call. 178 */ 179 mov x28, lr 180 bl __flush_dcache_all 181 mov lr, x28 182 ic iallu // I+BTB cache invalidate 183 tlbi vmalle1is // invalidate I + D TLBs 184 dsb sy 185 186 mov x0, #3 << 20 187 msr cpacr_el1, x0 // Enable FP/ASIMD 188 msr mdscr_el1, xzr // Reset mdscr_el1 189 /* 190 * Memory region attributes for LPAE: 191 * 192 * n = AttrIndx[2:0] 193 * n MAIR 194 * DEVICE_nGnRnE 000 00000000 195 * DEVICE_nGnRE 001 00000100 196 * DEVICE_GRE 010 00001100 197 * NORMAL_NC 011 01000100 198 * NORMAL 100 11111111 199 */ 200 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 201 MAIR(0x04, MT_DEVICE_nGnRE) | \ 202 MAIR(0x0c, MT_DEVICE_GRE) | \ 203 MAIR(0x44, MT_NORMAL_NC) | \ 204 MAIR(0xff, MT_NORMAL) 205 msr mair_el1, x5 206 /* 207 * Prepare SCTLR 208 */ 209 adr x5, crval 210 ldp w5, w6, [x5] 211 mrs x0, sctlr_el1 212 bic x0, x0, x5 // clear bits 213 orr x0, x0, x6 // set bits 214 /* 215 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for 216 * both user and kernel. 217 */ 218 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \ 219 TCR_ASID16 | TCR_TBI0 | (1 << 31) 220#ifdef CONFIG_ARM64_64K_PAGES 221 orr x10, x10, TCR_TG0_64K 222 orr x10, x10, TCR_TG1_64K 223#endif 224 msr tcr_el1, x10 225 ret // return to head.S 226ENDPROC(__cpu_setup) 227 228 /* 229 * n n T 230 * U E WT T UD US IHBS 231 * CE0 XWHW CZ ME TEEA S 232 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM 233 * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved 234 * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings 235 */ 236 .type crval, #object 237crval: 238 .word 0x000802e2 // clear 239 .word 0x0405d11d // set 240