1 /* 2 * include/asm-arm/macro.h 3 * 4 * Copyright (C) 2009 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9 #ifndef __ASM_ARM_MACRO_H__ 10 #define __ASM_ARM_MACRO_H__ 11 #ifdef __ASSEMBLY__ 12 13 /* 14 * These macros provide a convenient way to write 8, 16 and 32 bit data 15 * to any address. 16 * Registers r4 and r5 are used, any data in these registers are 17 * overwritten by the macros. 18 * The macros are valid for any ARM architecture, they do not implement 19 * any memory barriers so caution is recommended when using these when the 20 * caches are enabled or on a multi-core system. 21 */ 22 23 .macro write32, addr, data 24 ldr r4, =\addr 25 ldr r5, =\data 26 str r5, [r4] 27 .endm 28 29 .macro write16, addr, data 30 ldr r4, =\addr 31 ldrh r5, =\data 32 strh r5, [r4] 33 .endm 34 35 .macro write8, addr, data 36 ldr r4, =\addr 37 ldrb r5, =\data 38 strb r5, [r4] 39 .endm 40 41 /* 42 * This macro generates a loop that can be used for delays in the code. 43 * Register r4 is used, any data in this register is overwritten by the 44 * macro. 45 * The macro is valid for any ARM architeture. The actual time spent in the 46 * loop will vary from CPU to CPU though. 47 */ 48 49 .macro wait_timer, time 50 ldr r4, =\time 51 1: 52 nop 53 subs r4, r4, #1 54 bcs 1b 55 .endm 56 57 #ifdef CONFIG_ARM64 58 /* 59 * Register aliases. 60 */ 61 lr .req x30 62 63 /* 64 * Branch according to exception level 65 */ 66 .macro switch_el, xreg, el3_label, el2_label, el1_label 67 mrs \xreg, CurrentEL 68 cmp \xreg, 0xc 69 b.eq \el3_label 70 cmp \xreg, 0x8 71 b.eq \el2_label 72 cmp \xreg, 0x4 73 b.eq \el1_label 74 .endm 75 76 /* 77 * Branch if current processor is a slave, 78 * choose processor with all zero affinity value as the master. 79 */ 80 .macro branch_if_slave, xreg, slave_label 81 mrs \xreg, mpidr_el1 82 tst \xreg, #0xff /* Test Affinity 0 */ 83 b.ne \slave_label 84 lsr \xreg, \xreg, #8 85 tst \xreg, #0xff /* Test Affinity 1 */ 86 b.ne \slave_label 87 lsr \xreg, \xreg, #8 88 tst \xreg, #0xff /* Test Affinity 2 */ 89 b.ne \slave_label 90 lsr \xreg, \xreg, #16 91 tst \xreg, #0xff /* Test Affinity 3 */ 92 b.ne \slave_label 93 .endm 94 95 /* 96 * Branch if current processor is a master, 97 * choose processor with all zero affinity value as the master. 98 */ 99 .macro branch_if_master, xreg1, xreg2, master_label 100 mrs \xreg1, mpidr_el1 101 lsr \xreg2, \xreg1, #32 102 lsl \xreg1, \xreg1, #40 103 lsr \xreg1, \xreg1, #40 104 orr \xreg1, \xreg1, \xreg2 105 cbz \xreg1, \master_label 106 .endm 107 108 .macro armv8_switch_to_el2_m, xreg1 109 /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */ 110 mov \xreg1, #0x5b1 111 msr scr_el3, \xreg1 112 msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */ 113 mov \xreg1, #0x33ff 114 msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */ 115 116 /* Initialize SCTLR_EL2 117 * 118 * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1 119 * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) + 120 * EE,WXN,I,SA,C,A,M to 0 121 */ 122 mov \xreg1, #0x0830 123 movk \xreg1, #0x30C5, lsl #16 124 msr sctlr_el2, \xreg1 125 126 /* Return to the EL2_SP2 mode from EL3 */ 127 mov \xreg1, sp 128 msr sp_el2, \xreg1 /* Migrate SP */ 129 mrs \xreg1, vbar_el3 130 msr vbar_el2, \xreg1 /* Migrate VBAR */ 131 mov \xreg1, #0x3c9 132 msr spsr_el3, \xreg1 /* EL2_SP2 | D | A | I | F */ 133 msr elr_el3, lr 134 eret 135 .endm 136 137 .macro armv8_switch_to_el1_m, xreg1, xreg2 138 /* Initialize Generic Timers */ 139 mrs \xreg1, cnthctl_el2 140 orr \xreg1, \xreg1, #0x3 /* Enable EL1 access to timers */ 141 msr cnthctl_el2, \xreg1 142 msr cntvoff_el2, xzr 143 144 /* Initilize MPID/MPIDR registers */ 145 mrs \xreg1, midr_el1 146 mrs \xreg2, mpidr_el1 147 msr vpidr_el2, \xreg1 148 msr vmpidr_el2, \xreg2 149 150 /* Disable coprocessor traps */ 151 mov \xreg1, #0x33ff 152 msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */ 153 msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */ 154 mov \xreg1, #3 << 20 155 msr cpacr_el1, \xreg1 /* Enable FP/SIMD at EL1 */ 156 157 /* Initialize HCR_EL2 */ 158 mov \xreg1, #(1 << 31) /* 64bit EL1 */ 159 orr \xreg1, \xreg1, #(1 << 29) /* Disable HVC */ 160 msr hcr_el2, \xreg1 161 162 /* SCTLR_EL1 initialization 163 * 164 * setting RES1 bits (29,28,23,22,20,11) to 1 165 * and RES0 bits (31,30,27,21,17,13,10,6) + 166 * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD, 167 * CP15BEN,SA0,SA,C,A,M to 0 168 */ 169 mov \xreg1, #0x0800 170 movk \xreg1, #0x30d0, lsl #16 171 msr sctlr_el1, \xreg1 172 173 /* Return to the EL1_SP1 mode from EL2 */ 174 mov \xreg1, sp 175 msr sp_el1, \xreg1 /* Migrate SP */ 176 mrs \xreg1, vbar_el2 177 msr vbar_el1, \xreg1 /* Migrate VBAR */ 178 mov \xreg1, #0x3c5 179 msr spsr_el2, \xreg1 /* EL1_SP1 | D | A | I | F */ 180 msr elr_el2, lr 181 eret 182 .endm 183 184 #if defined(CONFIG_GICV3) 185 .macro gic_wait_for_interrupt_m xreg1 186 0 : wfi 187 mrs \xreg1, ICC_IAR1_EL1 188 msr ICC_EOIR1_EL1, \xreg1 189 cbnz \xreg1, 0b 190 .endm 191 #elif defined(CONFIG_GICV2) 192 .macro gic_wait_for_interrupt_m xreg1, wreg2 193 0 : wfi 194 ldr \wreg2, [\xreg1, GICC_AIAR] 195 str \wreg2, [\xreg1, GICC_AEOIR] 196 and \wreg2, \wreg2, #3ff 197 cbnz \wreg2, 0b 198 .endm 199 #endif 200 201 #endif /* CONFIG_ARM64 */ 202 203 #endif /* __ASSEMBLY__ */ 204 #endif /* __ASM_ARM_MACRO_H__ */ 205