1 /* 2 * Copyright (C) 2017 ARM Ltd. 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/kvm_host.h> 19 #include <linux/random.h> 20 #include <linux/memblock.h> 21 #include <asm/alternative.h> 22 #include <asm/debug-monitors.h> 23 #include <asm/insn.h> 24 #include <asm/kvm_mmu.h> 25 26 /* 27 * The LSB of the random hyp VA tag or 0 if no randomization is used. 28 */ 29 static u8 tag_lsb; 30 /* 31 * The random hyp VA tag value with the region bit if hyp randomization is used 32 */ 33 static u64 tag_val; 34 static u64 va_mask; 35 36 static void compute_layout(void) 37 { 38 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); 39 u64 hyp_va_msb; 40 int kva_msb; 41 42 /* Where is my RAM region? */ 43 hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); 44 hyp_va_msb ^= BIT(VA_BITS - 1); 45 46 kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ 47 (u64)(high_memory - 1)); 48 49 if (kva_msb == (VA_BITS - 1)) { 50 /* 51 * No space in the address, let's compute the mask so 52 * that it covers (VA_BITS - 1) bits, and the region 53 * bit. The tag stays set to zero. 54 */ 55 va_mask = BIT(VA_BITS - 1) - 1; 56 va_mask |= hyp_va_msb; 57 } else { 58 /* 59 * We do have some free bits to insert a random tag. 60 * Hyp VAs are now created from kernel linear map VAs 61 * using the following formula (with V == VA_BITS): 62 * 63 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 64 * --------------------------------------------------------- 65 * | 0000000 | hyp_va_msb | random tag | kern linear VA | 66 */ 67 tag_lsb = kva_msb; 68 va_mask = GENMASK_ULL(tag_lsb - 1, 0); 69 tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb); 70 tag_val |= hyp_va_msb; 71 tag_val >>= tag_lsb; 72 } 73 } 74 75 static u32 compute_instruction(int n, u32 rd, u32 rn) 76 { 77 u32 insn = AARCH64_BREAK_FAULT; 78 79 switch (n) { 80 case 0: 81 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND, 82 AARCH64_INSN_VARIANT_64BIT, 83 rn, rd, va_mask); 84 break; 85 86 case 1: 87 /* ROR is a variant of EXTR with Rm = Rn */ 88 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, 89 rn, rn, rd, 90 tag_lsb); 91 break; 92 93 case 2: 94 insn = aarch64_insn_gen_add_sub_imm(rd, rn, 95 tag_val & GENMASK(11, 0), 96 AARCH64_INSN_VARIANT_64BIT, 97 AARCH64_INSN_ADSB_ADD); 98 break; 99 100 case 3: 101 insn = aarch64_insn_gen_add_sub_imm(rd, rn, 102 tag_val & GENMASK(23, 12), 103 AARCH64_INSN_VARIANT_64BIT, 104 AARCH64_INSN_ADSB_ADD); 105 break; 106 107 case 4: 108 /* ROR is a variant of EXTR with Rm = Rn */ 109 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, 110 rn, rn, rd, 64 - tag_lsb); 111 break; 112 } 113 114 return insn; 115 } 116 117 void __init kvm_update_va_mask(struct alt_instr *alt, 118 __le32 *origptr, __le32 *updptr, int nr_inst) 119 { 120 int i; 121 122 BUG_ON(nr_inst != 5); 123 124 if (!has_vhe() && !va_mask) 125 compute_layout(); 126 127 for (i = 0; i < nr_inst; i++) { 128 u32 rd, rn, insn, oinsn; 129 130 /* 131 * VHE doesn't need any address translation, let's NOP 132 * everything. 133 * 134 * Alternatively, if we don't have any spare bits in 135 * the address, NOP everything after masking that 136 * kernel VA. 137 */ 138 if (has_vhe() || (!tag_lsb && i > 0)) { 139 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); 140 continue; 141 } 142 143 oinsn = le32_to_cpu(origptr[i]); 144 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn); 145 rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn); 146 147 insn = compute_instruction(i, rd, rn); 148 BUG_ON(insn == AARCH64_BREAK_FAULT); 149 150 updptr[i] = cpu_to_le32(insn); 151 } 152 } 153 154 void *__kvm_bp_vect_base; 155 int __kvm_harden_el2_vector_slot; 156 157 void kvm_patch_vector_branch(struct alt_instr *alt, 158 __le32 *origptr, __le32 *updptr, int nr_inst) 159 { 160 u64 addr; 161 u32 insn; 162 163 BUG_ON(nr_inst != 5); 164 165 if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { 166 WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)); 167 return; 168 } 169 170 if (!va_mask) 171 compute_layout(); 172 173 /* 174 * Compute HYP VA by using the same computation as kern_hyp_va() 175 */ 176 addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); 177 addr &= va_mask; 178 addr |= tag_val << tag_lsb; 179 180 /* Use PC[10:7] to branch to the same vector in KVM */ 181 addr |= ((u64)origptr & GENMASK_ULL(10, 7)); 182 183 /* 184 * Branch to the second instruction in the vectors in order to 185 * avoid the initial store on the stack (which we already 186 * perform in the hardening vectors). 187 */ 188 addr += AARCH64_INSN_SIZE; 189 190 /* stp x0, x1, [sp, #-16]! */ 191 insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0, 192 AARCH64_INSN_REG_1, 193 AARCH64_INSN_REG_SP, 194 -16, 195 AARCH64_INSN_VARIANT_64BIT, 196 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX); 197 *updptr++ = cpu_to_le32(insn); 198 199 /* movz x0, #(addr & 0xffff) */ 200 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, 201 (u16)addr, 202 0, 203 AARCH64_INSN_VARIANT_64BIT, 204 AARCH64_INSN_MOVEWIDE_ZERO); 205 *updptr++ = cpu_to_le32(insn); 206 207 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */ 208 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, 209 (u16)(addr >> 16), 210 16, 211 AARCH64_INSN_VARIANT_64BIT, 212 AARCH64_INSN_MOVEWIDE_KEEP); 213 *updptr++ = cpu_to_le32(insn); 214 215 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */ 216 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, 217 (u16)(addr >> 32), 218 32, 219 AARCH64_INSN_VARIANT_64BIT, 220 AARCH64_INSN_MOVEWIDE_KEEP); 221 *updptr++ = cpu_to_le32(insn); 222 223 /* br x0 */ 224 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0, 225 AARCH64_INSN_BRANCH_NOLINK); 226 *updptr++ = cpu_to_le32(insn); 227 } 228