1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2008 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * 7 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 8 */ 9 10 #include <linux/kvm_host.h> 11 #include <asm/disassemble.h> 12 13 #include "booke.h" 14 15 #define OP_19_XOP_RFI 50 16 #define OP_19_XOP_RFCI 51 17 #define OP_19_XOP_RFDI 39 18 19 #define OP_31_XOP_MFMSR 83 20 #define OP_31_XOP_WRTEE 131 21 #define OP_31_XOP_MTMSR 146 22 #define OP_31_XOP_WRTEEI 163 23 24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) 25 { 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); 28 } 29 30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) 31 { 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); 34 } 35 36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) 37 { 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); 40 } 41 42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, 43 unsigned int inst, int *advance) 44 { 45 int emulated = EMULATE_DONE; 46 int rs = get_rs(inst); 47 int rt = get_rt(inst); 48 49 switch (get_op(inst)) { 50 case 19: 51 switch (get_xop(inst)) { 52 case OP_19_XOP_RFI: 53 kvmppc_emul_rfi(vcpu); 54 kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); 55 *advance = 0; 56 break; 57 58 case OP_19_XOP_RFCI: 59 kvmppc_emul_rfci(vcpu); 60 kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); 61 *advance = 0; 62 break; 63 64 case OP_19_XOP_RFDI: 65 kvmppc_emul_rfdi(vcpu); 66 kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS); 67 *advance = 0; 68 break; 69 70 default: 71 emulated = EMULATE_FAIL; 72 break; 73 } 74 break; 75 76 case 31: 77 switch (get_xop(inst)) { 78 79 case OP_31_XOP_MFMSR: 80 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); 81 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 82 break; 83 84 case OP_31_XOP_MTMSR: 85 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); 86 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 87 break; 88 89 case OP_31_XOP_WRTEE: 90 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) 91 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); 92 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 93 break; 94 95 case OP_31_XOP_WRTEEI: 96 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) 97 | (inst & MSR_EE); 98 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 99 break; 100 101 default: 102 emulated = EMULATE_FAIL; 103 } 104 105 break; 106 107 default: 108 emulated = EMULATE_FAIL; 109 } 110 111 return emulated; 112 } 113 114 /* 115 * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). 116 * Their backing store is in real registers, and these functions 117 * will return the wrong result if called for them in another context 118 * (such as debugging). 119 */ 120 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 121 { 122 int emulated = EMULATE_DONE; 123 bool debug_inst = false; 124 125 switch (sprn) { 126 case SPRN_DEAR: 127 vcpu->arch.shared->dar = spr_val; 128 break; 129 case SPRN_ESR: 130 vcpu->arch.shared->esr = spr_val; 131 break; 132 case SPRN_CSRR0: 133 vcpu->arch.csrr0 = spr_val; 134 break; 135 case SPRN_CSRR1: 136 vcpu->arch.csrr1 = spr_val; 137 break; 138 case SPRN_DSRR0: 139 vcpu->arch.dsrr0 = spr_val; 140 break; 141 case SPRN_DSRR1: 142 vcpu->arch.dsrr1 = spr_val; 143 break; 144 case SPRN_IAC1: 145 /* 146 * If userspace is debugging guest then guest 147 * can not access debug registers. 148 */ 149 if (vcpu->guest_debug) 150 break; 151 152 debug_inst = true; 153 vcpu->arch.dbg_reg.iac1 = spr_val; 154 break; 155 case SPRN_IAC2: 156 /* 157 * If userspace is debugging guest then guest 158 * can not access debug registers. 159 */ 160 if (vcpu->guest_debug) 161 break; 162 163 debug_inst = true; 164 vcpu->arch.dbg_reg.iac2 = spr_val; 165 break; 166 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 167 case SPRN_IAC3: 168 /* 169 * If userspace is debugging guest then guest 170 * can not access debug registers. 171 */ 172 if (vcpu->guest_debug) 173 break; 174 175 debug_inst = true; 176 vcpu->arch.dbg_reg.iac3 = spr_val; 177 break; 178 case SPRN_IAC4: 179 /* 180 * If userspace is debugging guest then guest 181 * can not access debug registers. 182 */ 183 if (vcpu->guest_debug) 184 break; 185 186 debug_inst = true; 187 vcpu->arch.dbg_reg.iac4 = spr_val; 188 break; 189 #endif 190 case SPRN_DAC1: 191 /* 192 * If userspace is debugging guest then guest 193 * can not access debug registers. 194 */ 195 if (vcpu->guest_debug) 196 break; 197 198 debug_inst = true; 199 vcpu->arch.dbg_reg.dac1 = spr_val; 200 break; 201 case SPRN_DAC2: 202 /* 203 * If userspace is debugging guest then guest 204 * can not access debug registers. 205 */ 206 if (vcpu->guest_debug) 207 break; 208 209 debug_inst = true; 210 vcpu->arch.dbg_reg.dac2 = spr_val; 211 break; 212 case SPRN_DBCR0: 213 /* 214 * If userspace is debugging guest then guest 215 * can not access debug registers. 216 */ 217 if (vcpu->guest_debug) 218 break; 219 220 debug_inst = true; 221 spr_val &= (DBCR0_IDM | DBCR0_IC | DBCR0_BT | DBCR0_TIE | 222 DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 | 223 DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W); 224 225 vcpu->arch.dbg_reg.dbcr0 = spr_val; 226 break; 227 case SPRN_DBCR1: 228 /* 229 * If userspace is debugging guest then guest 230 * can not access debug registers. 231 */ 232 if (vcpu->guest_debug) 233 break; 234 235 debug_inst = true; 236 vcpu->arch.dbg_reg.dbcr1 = spr_val; 237 break; 238 case SPRN_DBCR2: 239 /* 240 * If userspace is debugging guest then guest 241 * can not access debug registers. 242 */ 243 if (vcpu->guest_debug) 244 break; 245 246 debug_inst = true; 247 vcpu->arch.dbg_reg.dbcr2 = spr_val; 248 break; 249 case SPRN_DBSR: 250 /* 251 * If userspace is debugging guest then guest 252 * can not access debug registers. 253 */ 254 if (vcpu->guest_debug) 255 break; 256 257 vcpu->arch.dbsr &= ~spr_val; 258 if (!(vcpu->arch.dbsr & ~DBSR_IDE)) 259 kvmppc_core_dequeue_debug(vcpu); 260 break; 261 case SPRN_TSR: 262 kvmppc_clr_tsr_bits(vcpu, spr_val); 263 break; 264 case SPRN_TCR: 265 /* 266 * WRC is a 2-bit field that is supposed to preserve its 267 * value once written to non-zero. 268 */ 269 if (vcpu->arch.tcr & TCR_WRC_MASK) { 270 spr_val &= ~TCR_WRC_MASK; 271 spr_val |= vcpu->arch.tcr & TCR_WRC_MASK; 272 } 273 kvmppc_set_tcr(vcpu, spr_val); 274 break; 275 276 case SPRN_DECAR: 277 vcpu->arch.decar = spr_val; 278 break; 279 /* 280 * Note: SPRG4-7 are user-readable. 281 * These values are loaded into the real SPRGs when resuming the 282 * guest (PR-mode only). 283 */ 284 case SPRN_SPRG4: 285 kvmppc_set_sprg4(vcpu, spr_val); 286 break; 287 case SPRN_SPRG5: 288 kvmppc_set_sprg5(vcpu, spr_val); 289 break; 290 case SPRN_SPRG6: 291 kvmppc_set_sprg6(vcpu, spr_val); 292 break; 293 case SPRN_SPRG7: 294 kvmppc_set_sprg7(vcpu, spr_val); 295 break; 296 297 case SPRN_IVPR: 298 vcpu->arch.ivpr = spr_val; 299 #ifdef CONFIG_KVM_BOOKE_HV 300 mtspr(SPRN_GIVPR, spr_val); 301 #endif 302 break; 303 case SPRN_IVOR0: 304 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; 305 break; 306 case SPRN_IVOR1: 307 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; 308 break; 309 case SPRN_IVOR2: 310 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; 311 #ifdef CONFIG_KVM_BOOKE_HV 312 mtspr(SPRN_GIVOR2, spr_val); 313 #endif 314 break; 315 case SPRN_IVOR3: 316 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; 317 break; 318 case SPRN_IVOR4: 319 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; 320 break; 321 case SPRN_IVOR5: 322 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; 323 break; 324 case SPRN_IVOR6: 325 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; 326 break; 327 case SPRN_IVOR7: 328 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; 329 break; 330 case SPRN_IVOR8: 331 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; 332 #ifdef CONFIG_KVM_BOOKE_HV 333 mtspr(SPRN_GIVOR8, spr_val); 334 #endif 335 break; 336 case SPRN_IVOR9: 337 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; 338 break; 339 case SPRN_IVOR10: 340 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; 341 break; 342 case SPRN_IVOR11: 343 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; 344 break; 345 case SPRN_IVOR12: 346 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; 347 break; 348 case SPRN_IVOR13: 349 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; 350 break; 351 case SPRN_IVOR14: 352 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; 353 break; 354 case SPRN_IVOR15: 355 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; 356 break; 357 case SPRN_MCSR: 358 vcpu->arch.mcsr &= ~spr_val; 359 break; 360 #if defined(CONFIG_64BIT) 361 case SPRN_EPCR: 362 kvmppc_set_epcr(vcpu, spr_val); 363 #ifdef CONFIG_KVM_BOOKE_HV 364 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); 365 #endif 366 break; 367 #endif 368 default: 369 emulated = EMULATE_FAIL; 370 } 371 372 if (debug_inst) { 373 current->thread.debug = vcpu->arch.dbg_reg; 374 switch_booke_debug_regs(&vcpu->arch.dbg_reg); 375 } 376 return emulated; 377 } 378 379 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 380 { 381 int emulated = EMULATE_DONE; 382 383 switch (sprn) { 384 case SPRN_IVPR: 385 *spr_val = vcpu->arch.ivpr; 386 break; 387 case SPRN_DEAR: 388 *spr_val = vcpu->arch.shared->dar; 389 break; 390 case SPRN_ESR: 391 *spr_val = vcpu->arch.shared->esr; 392 break; 393 case SPRN_EPR: 394 *spr_val = vcpu->arch.epr; 395 break; 396 case SPRN_CSRR0: 397 *spr_val = vcpu->arch.csrr0; 398 break; 399 case SPRN_CSRR1: 400 *spr_val = vcpu->arch.csrr1; 401 break; 402 case SPRN_DSRR0: 403 *spr_val = vcpu->arch.dsrr0; 404 break; 405 case SPRN_DSRR1: 406 *spr_val = vcpu->arch.dsrr1; 407 break; 408 case SPRN_IAC1: 409 *spr_val = vcpu->arch.dbg_reg.iac1; 410 break; 411 case SPRN_IAC2: 412 *spr_val = vcpu->arch.dbg_reg.iac2; 413 break; 414 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 415 case SPRN_IAC3: 416 *spr_val = vcpu->arch.dbg_reg.iac3; 417 break; 418 case SPRN_IAC4: 419 *spr_val = vcpu->arch.dbg_reg.iac4; 420 break; 421 #endif 422 case SPRN_DAC1: 423 *spr_val = vcpu->arch.dbg_reg.dac1; 424 break; 425 case SPRN_DAC2: 426 *spr_val = vcpu->arch.dbg_reg.dac2; 427 break; 428 case SPRN_DBCR0: 429 *spr_val = vcpu->arch.dbg_reg.dbcr0; 430 if (vcpu->guest_debug) 431 *spr_val = *spr_val | DBCR0_EDM; 432 break; 433 case SPRN_DBCR1: 434 *spr_val = vcpu->arch.dbg_reg.dbcr1; 435 break; 436 case SPRN_DBCR2: 437 *spr_val = vcpu->arch.dbg_reg.dbcr2; 438 break; 439 case SPRN_DBSR: 440 *spr_val = vcpu->arch.dbsr; 441 break; 442 case SPRN_TSR: 443 *spr_val = vcpu->arch.tsr; 444 break; 445 case SPRN_TCR: 446 *spr_val = vcpu->arch.tcr; 447 break; 448 449 case SPRN_IVOR0: 450 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; 451 break; 452 case SPRN_IVOR1: 453 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; 454 break; 455 case SPRN_IVOR2: 456 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; 457 break; 458 case SPRN_IVOR3: 459 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; 460 break; 461 case SPRN_IVOR4: 462 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; 463 break; 464 case SPRN_IVOR5: 465 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; 466 break; 467 case SPRN_IVOR6: 468 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; 469 break; 470 case SPRN_IVOR7: 471 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; 472 break; 473 case SPRN_IVOR8: 474 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; 475 break; 476 case SPRN_IVOR9: 477 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; 478 break; 479 case SPRN_IVOR10: 480 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; 481 break; 482 case SPRN_IVOR11: 483 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; 484 break; 485 case SPRN_IVOR12: 486 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; 487 break; 488 case SPRN_IVOR13: 489 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 490 break; 491 case SPRN_IVOR14: 492 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 493 break; 494 case SPRN_IVOR15: 495 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 496 break; 497 case SPRN_MCSR: 498 *spr_val = vcpu->arch.mcsr; 499 break; 500 #if defined(CONFIG_64BIT) 501 case SPRN_EPCR: 502 *spr_val = vcpu->arch.epcr; 503 break; 504 #endif 505 506 default: 507 emulated = EMULATE_FAIL; 508 } 509 510 return emulated; 511 } 512