1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 */ 19 20 #include <linux/jiffies.h> 21 #include <linux/hrtimer.h> 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/kvm_host.h> 25 26 #include <asm/reg.h> 27 #include <asm/time.h> 28 #include <asm/byteorder.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/disassemble.h> 31 #include "timing.h" 32 #include "trace.h" 33 34 #define OP_TRAP 3 35 #define OP_TRAP_64 2 36 37 #define OP_31_XOP_LWZX 23 38 #define OP_31_XOP_LBZX 87 39 #define OP_31_XOP_STWX 151 40 #define OP_31_XOP_STBX 215 41 #define OP_31_XOP_LBZUX 119 42 #define OP_31_XOP_STBUX 247 43 #define OP_31_XOP_LHZX 279 44 #define OP_31_XOP_LHZUX 311 45 #define OP_31_XOP_MFSPR 339 46 #define OP_31_XOP_LHAX 343 47 #define OP_31_XOP_STHX 407 48 #define OP_31_XOP_STHUX 439 49 #define OP_31_XOP_MTSPR 467 50 #define OP_31_XOP_DCBI 470 51 #define OP_31_XOP_LWBRX 534 52 #define OP_31_XOP_TLBSYNC 566 53 #define OP_31_XOP_STWBRX 662 54 #define OP_31_XOP_LHBRX 790 55 #define OP_31_XOP_STHBRX 918 56 57 #define OP_LWZ 32 58 #define OP_LWZU 33 59 #define OP_LBZ 34 60 #define OP_LBZU 35 61 #define OP_STW 36 62 #define OP_STWU 37 63 #define OP_STB 38 64 #define OP_STBU 39 65 #define OP_LHZ 40 66 #define OP_LHZU 41 67 #define OP_LHA 42 68 #define OP_LHAU 43 69 #define OP_STH 44 70 #define OP_STHU 45 71 72 #ifdef CONFIG_PPC_BOOK3S 73 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 74 { 75 return 1; 76 } 77 #else 78 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 79 { 80 return vcpu->arch.tcr & TCR_DIE; 81 } 82 #endif 83 84 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 85 { 86 unsigned long dec_nsec; 87 88 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 89 #ifdef CONFIG_PPC_BOOK3S 90 /* mtdec lowers the interrupt line when positive. */ 91 kvmppc_core_dequeue_dec(vcpu); 92 93 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 94 if (vcpu->arch.dec & 0x80000000) { 95 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 96 kvmppc_core_queue_dec(vcpu); 97 return; 98 } 99 #endif 100 if (kvmppc_dec_enabled(vcpu)) { 101 /* The decrementer ticks at the same rate as the timebase, so 102 * that's how we convert the guest DEC value to the number of 103 * host ticks. */ 104 105 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 106 dec_nsec = vcpu->arch.dec; 107 dec_nsec *= 1000; 108 dec_nsec /= tb_ticks_per_usec; 109 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 110 HRTIMER_MODE_REL); 111 vcpu->arch.dec_jiffies = get_tb(); 112 } else { 113 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 114 } 115 } 116 117 /* XXX to do: 118 * lhax 119 * lhaux 120 * lswx 121 * lswi 122 * stswx 123 * stswi 124 * lha 125 * lhau 126 * lmw 127 * stmw 128 * 129 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 130 */ 131 /* XXX Should probably auto-generate instruction decoding for a particular core 132 * from opcode tables in the future. */ 133 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 134 { 135 u32 inst = kvmppc_get_last_inst(vcpu); 136 u32 ea; 137 int ra; 138 int rb; 139 int rs; 140 int rt; 141 int sprn; 142 enum emulation_result emulated = EMULATE_DONE; 143 int advance = 1; 144 145 /* this default type might be overwritten by subcategories */ 146 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 147 148 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 149 150 switch (get_op(inst)) { 151 case OP_TRAP: 152 #ifdef CONFIG_PPC_BOOK3S 153 case OP_TRAP_64: 154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 155 #else 156 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); 157 #endif 158 advance = 0; 159 break; 160 161 case 31: 162 switch (get_xop(inst)) { 163 164 case OP_31_XOP_LWZX: 165 rt = get_rt(inst); 166 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 167 break; 168 169 case OP_31_XOP_LBZX: 170 rt = get_rt(inst); 171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 172 break; 173 174 case OP_31_XOP_LBZUX: 175 rt = get_rt(inst); 176 ra = get_ra(inst); 177 rb = get_rb(inst); 178 179 ea = kvmppc_get_gpr(vcpu, rb); 180 if (ra) 181 ea += kvmppc_get_gpr(vcpu, ra); 182 183 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 184 kvmppc_set_gpr(vcpu, ra, ea); 185 break; 186 187 case OP_31_XOP_STWX: 188 rs = get_rs(inst); 189 emulated = kvmppc_handle_store(run, vcpu, 190 kvmppc_get_gpr(vcpu, rs), 191 4, 1); 192 break; 193 194 case OP_31_XOP_STBX: 195 rs = get_rs(inst); 196 emulated = kvmppc_handle_store(run, vcpu, 197 kvmppc_get_gpr(vcpu, rs), 198 1, 1); 199 break; 200 201 case OP_31_XOP_STBUX: 202 rs = get_rs(inst); 203 ra = get_ra(inst); 204 rb = get_rb(inst); 205 206 ea = kvmppc_get_gpr(vcpu, rb); 207 if (ra) 208 ea += kvmppc_get_gpr(vcpu, ra); 209 210 emulated = kvmppc_handle_store(run, vcpu, 211 kvmppc_get_gpr(vcpu, rs), 212 1, 1); 213 kvmppc_set_gpr(vcpu, rs, ea); 214 break; 215 216 case OP_31_XOP_LHAX: 217 rt = get_rt(inst); 218 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 219 break; 220 221 case OP_31_XOP_LHZX: 222 rt = get_rt(inst); 223 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 224 break; 225 226 case OP_31_XOP_LHZUX: 227 rt = get_rt(inst); 228 ra = get_ra(inst); 229 rb = get_rb(inst); 230 231 ea = kvmppc_get_gpr(vcpu, rb); 232 if (ra) 233 ea += kvmppc_get_gpr(vcpu, ra); 234 235 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 236 kvmppc_set_gpr(vcpu, ra, ea); 237 break; 238 239 case OP_31_XOP_MFSPR: 240 sprn = get_sprn(inst); 241 rt = get_rt(inst); 242 243 switch (sprn) { 244 case SPRN_SRR0: 245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 246 break; 247 case SPRN_SRR1: 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); 249 break; 250 case SPRN_PVR: 251 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 252 case SPRN_PIR: 253 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 254 case SPRN_MSSSR0: 255 kvmppc_set_gpr(vcpu, rt, 0); break; 256 257 /* Note: mftb and TBRL/TBWL are user-accessible, so 258 * the guest can always access the real TB anyways. 259 * In fact, we probably will never see these traps. */ 260 case SPRN_TBWL: 261 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 262 case SPRN_TBWU: 263 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 264 265 case SPRN_SPRG0: 266 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); 267 break; 268 case SPRN_SPRG1: 269 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); 270 break; 271 case SPRN_SPRG2: 272 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); 273 break; 274 case SPRN_SPRG3: 275 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); 276 break; 277 /* Note: SPRG4-7 are user-readable, so we don't get 278 * a trap. */ 279 280 case SPRN_DEC: 281 { 282 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 283 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); 284 pr_debug("mfDEC: %x - %llx = %lx\n", 285 vcpu->arch.dec, jd, 286 kvmppc_get_gpr(vcpu, rt)); 287 break; 288 } 289 default: 290 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 291 if (emulated == EMULATE_FAIL) { 292 printk("mfspr: unknown spr %x\n", sprn); 293 kvmppc_set_gpr(vcpu, rt, 0); 294 } 295 break; 296 } 297 break; 298 299 case OP_31_XOP_STHX: 300 rs = get_rs(inst); 301 ra = get_ra(inst); 302 rb = get_rb(inst); 303 304 emulated = kvmppc_handle_store(run, vcpu, 305 kvmppc_get_gpr(vcpu, rs), 306 2, 1); 307 break; 308 309 case OP_31_XOP_STHUX: 310 rs = get_rs(inst); 311 ra = get_ra(inst); 312 rb = get_rb(inst); 313 314 ea = kvmppc_get_gpr(vcpu, rb); 315 if (ra) 316 ea += kvmppc_get_gpr(vcpu, ra); 317 318 emulated = kvmppc_handle_store(run, vcpu, 319 kvmppc_get_gpr(vcpu, rs), 320 2, 1); 321 kvmppc_set_gpr(vcpu, ra, ea); 322 break; 323 324 case OP_31_XOP_MTSPR: 325 sprn = get_sprn(inst); 326 rs = get_rs(inst); 327 switch (sprn) { 328 case SPRN_SRR0: 329 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 330 break; 331 case SPRN_SRR1: 332 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); 333 break; 334 335 /* XXX We need to context-switch the timebase for 336 * watchdog and FIT. */ 337 case SPRN_TBWL: break; 338 case SPRN_TBWU: break; 339 340 case SPRN_MSSSR0: break; 341 342 case SPRN_DEC: 343 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 344 kvmppc_emulate_dec(vcpu); 345 break; 346 347 case SPRN_SPRG0: 348 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); 349 break; 350 case SPRN_SPRG1: 351 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); 352 break; 353 case SPRN_SPRG2: 354 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); 355 break; 356 case SPRN_SPRG3: 357 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); 358 break; 359 360 default: 361 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 362 if (emulated == EMULATE_FAIL) 363 printk("mtspr: unknown spr %x\n", sprn); 364 break; 365 } 366 break; 367 368 case OP_31_XOP_DCBI: 369 /* Do nothing. The guest is performing dcbi because 370 * hardware DMA is not snooped by the dcache, but 371 * emulated DMA either goes through the dcache as 372 * normal writes, or the host kernel has handled dcache 373 * coherence. */ 374 break; 375 376 case OP_31_XOP_LWBRX: 377 rt = get_rt(inst); 378 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 379 break; 380 381 case OP_31_XOP_TLBSYNC: 382 break; 383 384 case OP_31_XOP_STWBRX: 385 rs = get_rs(inst); 386 ra = get_ra(inst); 387 rb = get_rb(inst); 388 389 emulated = kvmppc_handle_store(run, vcpu, 390 kvmppc_get_gpr(vcpu, rs), 391 4, 0); 392 break; 393 394 case OP_31_XOP_LHBRX: 395 rt = get_rt(inst); 396 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 397 break; 398 399 case OP_31_XOP_STHBRX: 400 rs = get_rs(inst); 401 ra = get_ra(inst); 402 rb = get_rb(inst); 403 404 emulated = kvmppc_handle_store(run, vcpu, 405 kvmppc_get_gpr(vcpu, rs), 406 2, 0); 407 break; 408 409 default: 410 /* Attempt core-specific emulation below. */ 411 emulated = EMULATE_FAIL; 412 } 413 break; 414 415 case OP_LWZ: 416 rt = get_rt(inst); 417 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 418 break; 419 420 case OP_LWZU: 421 ra = get_ra(inst); 422 rt = get_rt(inst); 423 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 424 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 425 break; 426 427 case OP_LBZ: 428 rt = get_rt(inst); 429 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 430 break; 431 432 case OP_LBZU: 433 ra = get_ra(inst); 434 rt = get_rt(inst); 435 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 436 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 437 break; 438 439 case OP_STW: 440 rs = get_rs(inst); 441 emulated = kvmppc_handle_store(run, vcpu, 442 kvmppc_get_gpr(vcpu, rs), 443 4, 1); 444 break; 445 446 case OP_STWU: 447 ra = get_ra(inst); 448 rs = get_rs(inst); 449 emulated = kvmppc_handle_store(run, vcpu, 450 kvmppc_get_gpr(vcpu, rs), 451 4, 1); 452 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 453 break; 454 455 case OP_STB: 456 rs = get_rs(inst); 457 emulated = kvmppc_handle_store(run, vcpu, 458 kvmppc_get_gpr(vcpu, rs), 459 1, 1); 460 break; 461 462 case OP_STBU: 463 ra = get_ra(inst); 464 rs = get_rs(inst); 465 emulated = kvmppc_handle_store(run, vcpu, 466 kvmppc_get_gpr(vcpu, rs), 467 1, 1); 468 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 469 break; 470 471 case OP_LHZ: 472 rt = get_rt(inst); 473 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 474 break; 475 476 case OP_LHZU: 477 ra = get_ra(inst); 478 rt = get_rt(inst); 479 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 480 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 481 break; 482 483 case OP_LHA: 484 rt = get_rt(inst); 485 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 486 break; 487 488 case OP_LHAU: 489 ra = get_ra(inst); 490 rt = get_rt(inst); 491 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 492 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 493 break; 494 495 case OP_STH: 496 rs = get_rs(inst); 497 emulated = kvmppc_handle_store(run, vcpu, 498 kvmppc_get_gpr(vcpu, rs), 499 2, 1); 500 break; 501 502 case OP_STHU: 503 ra = get_ra(inst); 504 rs = get_rs(inst); 505 emulated = kvmppc_handle_store(run, vcpu, 506 kvmppc_get_gpr(vcpu, rs), 507 2, 1); 508 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 509 break; 510 511 default: 512 emulated = EMULATE_FAIL; 513 } 514 515 if (emulated == EMULATE_FAIL) { 516 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 517 if (emulated == EMULATE_AGAIN) { 518 advance = 0; 519 } else if (emulated == EMULATE_FAIL) { 520 advance = 0; 521 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 522 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 523 kvmppc_core_queue_program(vcpu, 0); 524 } 525 } 526 527 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 528 529 /* Advance past emulated instruction. */ 530 if (advance) 531 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 532 533 return emulated; 534 } 535