1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2007 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * 7 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 8 */ 9 10 #include <linux/jiffies.h> 11 #include <linux/hrtimer.h> 12 #include <linux/types.h> 13 #include <linux/string.h> 14 #include <linux/kvm_host.h> 15 #include <linux/clockchips.h> 16 17 #include <asm/reg.h> 18 #include <asm/time.h> 19 #include <asm/byteorder.h> 20 #include <asm/kvm_ppc.h> 21 #include <asm/disassemble.h> 22 #include <asm/ppc-opcode.h> 23 #include <asm/sstep.h> 24 #include "timing.h" 25 #include "trace.h" 26 27 #ifdef CONFIG_PPC_FPU 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) 29 { 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { 31 kvmppc_core_queue_fpunavail(vcpu); 32 return true; 33 } 34 35 return false; 36 } 37 #endif /* CONFIG_PPC_FPU */ 38 39 #ifdef CONFIG_VSX 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) 41 { 42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { 43 kvmppc_core_queue_vsx_unavail(vcpu); 44 return true; 45 } 46 47 return false; 48 } 49 #endif /* CONFIG_VSX */ 50 51 #ifdef CONFIG_ALTIVEC 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) 53 { 54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { 55 kvmppc_core_queue_vec_unavail(vcpu); 56 return true; 57 } 58 59 return false; 60 } 61 #endif /* CONFIG_ALTIVEC */ 62 63 /* 64 * XXX to do: 65 * lfiwax, lfiwzx 66 * vector loads and stores 67 * 68 * Instructions that trap when used on cache-inhibited mappings 69 * are not emulated here: multiple and string instructions, 70 * lq/stq, and the load-reserve/store-conditional instructions. 71 */ 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) 73 { 74 struct kvm_run *run = vcpu->run; 75 u32 inst; 76 int ra, rs, rt; 77 enum emulation_result emulated = EMULATE_FAIL; 78 int advance = 1; 79 struct instruction_op op; 80 81 /* this default type might be overwritten by subcategories */ 82 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 83 84 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); 85 if (emulated != EMULATE_DONE) 86 return emulated; 87 88 ra = get_ra(inst); 89 rs = get_rs(inst); 90 rt = get_rt(inst); 91 92 vcpu->arch.mmio_vsx_copy_nums = 0; 93 vcpu->arch.mmio_vsx_offset = 0; 94 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; 95 vcpu->arch.mmio_sp64_extend = 0; 96 vcpu->arch.mmio_sign_extend = 0; 97 vcpu->arch.mmio_vmx_copy_nums = 0; 98 vcpu->arch.mmio_vmx_offset = 0; 99 vcpu->arch.mmio_host_swabbed = 0; 100 101 emulated = EMULATE_FAIL; 102 vcpu->arch.regs.msr = vcpu->arch.shared->msr; 103 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { 104 int type = op.type & INSTR_TYPE_MASK; 105 int size = GETSIZE(op.type); 106 107 switch (type) { 108 case LOAD: { 109 int instr_byte_swap = op.type & BYTEREV; 110 111 if (op.type & SIGNEXT) 112 emulated = kvmppc_handle_loads(run, vcpu, 113 op.reg, size, !instr_byte_swap); 114 else 115 emulated = kvmppc_handle_load(run, vcpu, 116 op.reg, size, !instr_byte_swap); 117 118 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 119 kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 120 121 break; 122 } 123 #ifdef CONFIG_PPC_FPU 124 case LOAD_FP: 125 if (kvmppc_check_fp_disabled(vcpu)) 126 return EMULATE_DONE; 127 128 if (op.type & FPCONV) 129 vcpu->arch.mmio_sp64_extend = 1; 130 131 if (op.type & SIGNEXT) 132 emulated = kvmppc_handle_loads(run, vcpu, 133 KVM_MMIO_REG_FPR|op.reg, size, 1); 134 else 135 emulated = kvmppc_handle_load(run, vcpu, 136 KVM_MMIO_REG_FPR|op.reg, size, 1); 137 138 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 139 kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 140 141 break; 142 #endif 143 #ifdef CONFIG_ALTIVEC 144 case LOAD_VMX: 145 if (kvmppc_check_altivec_disabled(vcpu)) 146 return EMULATE_DONE; 147 148 /* Hardware enforces alignment of VMX accesses */ 149 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); 150 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); 151 152 if (size == 16) { /* lvx */ 153 vcpu->arch.mmio_copy_type = 154 KVMPPC_VMX_COPY_DWORD; 155 } else if (size == 4) { /* lvewx */ 156 vcpu->arch.mmio_copy_type = 157 KVMPPC_VMX_COPY_WORD; 158 } else if (size == 2) { /* lvehx */ 159 vcpu->arch.mmio_copy_type = 160 KVMPPC_VMX_COPY_HWORD; 161 } else if (size == 1) { /* lvebx */ 162 vcpu->arch.mmio_copy_type = 163 KVMPPC_VMX_COPY_BYTE; 164 } else 165 break; 166 167 vcpu->arch.mmio_vmx_offset = 168 (vcpu->arch.vaddr_accessed & 0xf)/size; 169 170 if (size == 16) { 171 vcpu->arch.mmio_vmx_copy_nums = 2; 172 emulated = kvmppc_handle_vmx_load(run, 173 vcpu, KVM_MMIO_REG_VMX|op.reg, 174 8, 1); 175 } else { 176 vcpu->arch.mmio_vmx_copy_nums = 1; 177 emulated = kvmppc_handle_vmx_load(run, vcpu, 178 KVM_MMIO_REG_VMX|op.reg, 179 size, 1); 180 } 181 break; 182 #endif 183 #ifdef CONFIG_VSX 184 case LOAD_VSX: { 185 int io_size_each; 186 187 if (op.vsx_flags & VSX_CHECK_VEC) { 188 if (kvmppc_check_altivec_disabled(vcpu)) 189 return EMULATE_DONE; 190 } else { 191 if (kvmppc_check_vsx_disabled(vcpu)) 192 return EMULATE_DONE; 193 } 194 195 if (op.vsx_flags & VSX_FPCONV) 196 vcpu->arch.mmio_sp64_extend = 1; 197 198 if (op.element_size == 8) { 199 if (op.vsx_flags & VSX_SPLAT) 200 vcpu->arch.mmio_copy_type = 201 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; 202 else 203 vcpu->arch.mmio_copy_type = 204 KVMPPC_VSX_COPY_DWORD; 205 } else if (op.element_size == 4) { 206 if (op.vsx_flags & VSX_SPLAT) 207 vcpu->arch.mmio_copy_type = 208 KVMPPC_VSX_COPY_WORD_LOAD_DUMP; 209 else 210 vcpu->arch.mmio_copy_type = 211 KVMPPC_VSX_COPY_WORD; 212 } else 213 break; 214 215 if (size < op.element_size) { 216 /* precision convert case: lxsspx, etc */ 217 vcpu->arch.mmio_vsx_copy_nums = 1; 218 io_size_each = size; 219 } else { /* lxvw4x, lxvd2x, etc */ 220 vcpu->arch.mmio_vsx_copy_nums = 221 size/op.element_size; 222 io_size_each = op.element_size; 223 } 224 225 emulated = kvmppc_handle_vsx_load(run, vcpu, 226 KVM_MMIO_REG_VSX|op.reg, io_size_each, 227 1, op.type & SIGNEXT); 228 break; 229 } 230 #endif 231 case STORE: 232 /* if need byte reverse, op.val has been reversed by 233 * analyse_instr(). 234 */ 235 emulated = kvmppc_handle_store(run, vcpu, op.val, 236 size, 1); 237 238 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 239 kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 240 241 break; 242 #ifdef CONFIG_PPC_FPU 243 case STORE_FP: 244 if (kvmppc_check_fp_disabled(vcpu)) 245 return EMULATE_DONE; 246 247 /* The FP registers need to be flushed so that 248 * kvmppc_handle_store() can read actual FP vals 249 * from vcpu->arch. 250 */ 251 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 252 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, 253 MSR_FP); 254 255 if (op.type & FPCONV) 256 vcpu->arch.mmio_sp64_extend = 1; 257 258 emulated = kvmppc_handle_store(run, vcpu, 259 VCPU_FPR(vcpu, op.reg), size, 1); 260 261 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 262 kvmppc_set_gpr(vcpu, op.update_reg, op.ea); 263 264 break; 265 #endif 266 #ifdef CONFIG_ALTIVEC 267 case STORE_VMX: 268 if (kvmppc_check_altivec_disabled(vcpu)) 269 return EMULATE_DONE; 270 271 /* Hardware enforces alignment of VMX accesses. */ 272 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); 273 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); 274 275 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 276 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, 277 MSR_VEC); 278 if (size == 16) { /* stvx */ 279 vcpu->arch.mmio_copy_type = 280 KVMPPC_VMX_COPY_DWORD; 281 } else if (size == 4) { /* stvewx */ 282 vcpu->arch.mmio_copy_type = 283 KVMPPC_VMX_COPY_WORD; 284 } else if (size == 2) { /* stvehx */ 285 vcpu->arch.mmio_copy_type = 286 KVMPPC_VMX_COPY_HWORD; 287 } else if (size == 1) { /* stvebx */ 288 vcpu->arch.mmio_copy_type = 289 KVMPPC_VMX_COPY_BYTE; 290 } else 291 break; 292 293 vcpu->arch.mmio_vmx_offset = 294 (vcpu->arch.vaddr_accessed & 0xf)/size; 295 296 if (size == 16) { 297 vcpu->arch.mmio_vmx_copy_nums = 2; 298 emulated = kvmppc_handle_vmx_store(run, 299 vcpu, op.reg, 8, 1); 300 } else { 301 vcpu->arch.mmio_vmx_copy_nums = 1; 302 emulated = kvmppc_handle_vmx_store(run, 303 vcpu, op.reg, size, 1); 304 } 305 306 break; 307 #endif 308 #ifdef CONFIG_VSX 309 case STORE_VSX: { 310 int io_size_each; 311 312 if (op.vsx_flags & VSX_CHECK_VEC) { 313 if (kvmppc_check_altivec_disabled(vcpu)) 314 return EMULATE_DONE; 315 } else { 316 if (kvmppc_check_vsx_disabled(vcpu)) 317 return EMULATE_DONE; 318 } 319 320 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 321 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, 322 MSR_VSX); 323 324 if (op.vsx_flags & VSX_FPCONV) 325 vcpu->arch.mmio_sp64_extend = 1; 326 327 if (op.element_size == 8) 328 vcpu->arch.mmio_copy_type = 329 KVMPPC_VSX_COPY_DWORD; 330 else if (op.element_size == 4) 331 vcpu->arch.mmio_copy_type = 332 KVMPPC_VSX_COPY_WORD; 333 else 334 break; 335 336 if (size < op.element_size) { 337 /* precise conversion case, like stxsspx */ 338 vcpu->arch.mmio_vsx_copy_nums = 1; 339 io_size_each = size; 340 } else { /* stxvw4x, stxvd2x, etc */ 341 vcpu->arch.mmio_vsx_copy_nums = 342 size/op.element_size; 343 io_size_each = op.element_size; 344 } 345 346 emulated = kvmppc_handle_vsx_store(run, vcpu, 347 op.reg, io_size_each, 1); 348 break; 349 } 350 #endif 351 case CACHEOP: 352 /* Do nothing. The guest is performing dcbi because 353 * hardware DMA is not snooped by the dcache, but 354 * emulated DMA either goes through the dcache as 355 * normal writes, or the host kernel has handled dcache 356 * coherence. 357 */ 358 emulated = EMULATE_DONE; 359 break; 360 default: 361 break; 362 } 363 } 364 365 if (emulated == EMULATE_FAIL) { 366 advance = 0; 367 kvmppc_core_queue_program(vcpu, 0); 368 } 369 370 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 371 372 /* Advance past emulated instruction. */ 373 if (advance) 374 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 375 376 return emulated; 377 } 378