1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * Copyright 2011 Freescale Semiconductor, Inc. 17 * 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 19 */ 20 21 #include <linux/jiffies.h> 22 #include <linux/hrtimer.h> 23 #include <linux/types.h> 24 #include <linux/string.h> 25 #include <linux/kvm_host.h> 26 27 #include <asm/reg.h> 28 #include <asm/time.h> 29 #include <asm/byteorder.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/disassemble.h> 32 #include "timing.h" 33 #include "trace.h" 34 35 #define OP_TRAP 3 36 #define OP_TRAP_64 2 37 38 #define OP_31_XOP_LWZX 23 39 #define OP_31_XOP_LBZX 87 40 #define OP_31_XOP_STWX 151 41 #define OP_31_XOP_STBX 215 42 #define OP_31_XOP_LBZUX 119 43 #define OP_31_XOP_STBUX 247 44 #define OP_31_XOP_LHZX 279 45 #define OP_31_XOP_LHZUX 311 46 #define OP_31_XOP_MFSPR 339 47 #define OP_31_XOP_LHAX 343 48 #define OP_31_XOP_STHX 407 49 #define OP_31_XOP_STHUX 439 50 #define OP_31_XOP_MTSPR 467 51 #define OP_31_XOP_DCBI 470 52 #define OP_31_XOP_LWBRX 534 53 #define OP_31_XOP_TLBSYNC 566 54 #define OP_31_XOP_STWBRX 662 55 #define OP_31_XOP_LHBRX 790 56 #define OP_31_XOP_STHBRX 918 57 58 #define OP_LWZ 32 59 #define OP_LWZU 33 60 #define OP_LBZ 34 61 #define OP_LBZU 35 62 #define OP_STW 36 63 #define OP_STWU 37 64 #define OP_STB 38 65 #define OP_STBU 39 66 #define OP_LHZ 40 67 #define OP_LHZU 41 68 #define OP_LHA 42 69 #define OP_LHAU 43 70 #define OP_STH 44 71 #define OP_STHU 45 72 73 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 74 { 75 unsigned long dec_nsec; 76 unsigned long long dec_time; 77 78 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 79 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 80 81 #ifdef CONFIG_PPC_BOOK3S 82 /* mtdec lowers the interrupt line when positive. */ 83 kvmppc_core_dequeue_dec(vcpu); 84 85 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 86 if (vcpu->arch.dec & 0x80000000) { 87 kvmppc_core_queue_dec(vcpu); 88 return; 89 } 90 #endif 91 92 #ifdef CONFIG_BOOKE 93 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ 94 if (vcpu->arch.dec == 0) 95 return; 96 #endif 97 98 /* 99 * The decrementer ticks at the same rate as the timebase, so 100 * that's how we convert the guest DEC value to the number of 101 * host ticks. 102 */ 103 104 dec_time = vcpu->arch.dec; 105 dec_time *= 1000; 106 do_div(dec_time, tb_ticks_per_usec); 107 dec_nsec = do_div(dec_time, NSEC_PER_SEC); 108 hrtimer_start(&vcpu->arch.dec_timer, 109 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); 110 vcpu->arch.dec_jiffies = get_tb(); 111 } 112 113 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) 114 { 115 u64 jd = tb - vcpu->arch.dec_jiffies; 116 117 #ifdef CONFIG_BOOKE 118 if (vcpu->arch.dec < jd) 119 return 0; 120 #endif 121 122 return vcpu->arch.dec - jd; 123 } 124 125 /* XXX to do: 126 * lhax 127 * lhaux 128 * lswx 129 * lswi 130 * stswx 131 * stswi 132 * lha 133 * lhau 134 * lmw 135 * stmw 136 * 137 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 138 */ 139 /* XXX Should probably auto-generate instruction decoding for a particular core 140 * from opcode tables in the future. */ 141 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 142 { 143 u32 inst = kvmppc_get_last_inst(vcpu); 144 u32 ea; 145 int ra; 146 int rb; 147 int rs; 148 int rt; 149 int sprn; 150 enum emulation_result emulated = EMULATE_DONE; 151 int advance = 1; 152 153 /* this default type might be overwritten by subcategories */ 154 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 155 156 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 157 158 switch (get_op(inst)) { 159 case OP_TRAP: 160 #ifdef CONFIG_PPC_BOOK3S 161 case OP_TRAP_64: 162 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 163 #else 164 kvmppc_core_queue_program(vcpu, 165 vcpu->arch.shared->esr | ESR_PTR); 166 #endif 167 advance = 0; 168 break; 169 170 case 31: 171 switch (get_xop(inst)) { 172 173 case OP_31_XOP_LWZX: 174 rt = get_rt(inst); 175 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 176 break; 177 178 case OP_31_XOP_LBZX: 179 rt = get_rt(inst); 180 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 181 break; 182 183 case OP_31_XOP_LBZUX: 184 rt = get_rt(inst); 185 ra = get_ra(inst); 186 rb = get_rb(inst); 187 188 ea = kvmppc_get_gpr(vcpu, rb); 189 if (ra) 190 ea += kvmppc_get_gpr(vcpu, ra); 191 192 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 193 kvmppc_set_gpr(vcpu, ra, ea); 194 break; 195 196 case OP_31_XOP_STWX: 197 rs = get_rs(inst); 198 emulated = kvmppc_handle_store(run, vcpu, 199 kvmppc_get_gpr(vcpu, rs), 200 4, 1); 201 break; 202 203 case OP_31_XOP_STBX: 204 rs = get_rs(inst); 205 emulated = kvmppc_handle_store(run, vcpu, 206 kvmppc_get_gpr(vcpu, rs), 207 1, 1); 208 break; 209 210 case OP_31_XOP_STBUX: 211 rs = get_rs(inst); 212 ra = get_ra(inst); 213 rb = get_rb(inst); 214 215 ea = kvmppc_get_gpr(vcpu, rb); 216 if (ra) 217 ea += kvmppc_get_gpr(vcpu, ra); 218 219 emulated = kvmppc_handle_store(run, vcpu, 220 kvmppc_get_gpr(vcpu, rs), 221 1, 1); 222 kvmppc_set_gpr(vcpu, rs, ea); 223 break; 224 225 case OP_31_XOP_LHAX: 226 rt = get_rt(inst); 227 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 228 break; 229 230 case OP_31_XOP_LHZX: 231 rt = get_rt(inst); 232 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 233 break; 234 235 case OP_31_XOP_LHZUX: 236 rt = get_rt(inst); 237 ra = get_ra(inst); 238 rb = get_rb(inst); 239 240 ea = kvmppc_get_gpr(vcpu, rb); 241 if (ra) 242 ea += kvmppc_get_gpr(vcpu, ra); 243 244 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 245 kvmppc_set_gpr(vcpu, ra, ea); 246 break; 247 248 case OP_31_XOP_MFSPR: 249 sprn = get_sprn(inst); 250 rt = get_rt(inst); 251 252 switch (sprn) { 253 case SPRN_SRR0: 254 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 255 break; 256 case SPRN_SRR1: 257 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); 258 break; 259 case SPRN_PVR: 260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 261 case SPRN_PIR: 262 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 263 case SPRN_MSSSR0: 264 kvmppc_set_gpr(vcpu, rt, 0); break; 265 266 /* Note: mftb and TBRL/TBWL are user-accessible, so 267 * the guest can always access the real TB anyways. 268 * In fact, we probably will never see these traps. */ 269 case SPRN_TBWL: 270 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 271 case SPRN_TBWU: 272 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 273 274 case SPRN_SPRG0: 275 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); 276 break; 277 case SPRN_SPRG1: 278 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); 279 break; 280 case SPRN_SPRG2: 281 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); 282 break; 283 case SPRN_SPRG3: 284 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); 285 break; 286 /* Note: SPRG4-7 are user-readable, so we don't get 287 * a trap. */ 288 289 case SPRN_DEC: 290 { 291 kvmppc_set_gpr(vcpu, rt, 292 kvmppc_get_dec(vcpu, get_tb())); 293 break; 294 } 295 default: 296 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 297 if (emulated == EMULATE_FAIL) { 298 printk("mfspr: unknown spr %x\n", sprn); 299 kvmppc_set_gpr(vcpu, rt, 0); 300 } 301 break; 302 } 303 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); 304 break; 305 306 case OP_31_XOP_STHX: 307 rs = get_rs(inst); 308 ra = get_ra(inst); 309 rb = get_rb(inst); 310 311 emulated = kvmppc_handle_store(run, vcpu, 312 kvmppc_get_gpr(vcpu, rs), 313 2, 1); 314 break; 315 316 case OP_31_XOP_STHUX: 317 rs = get_rs(inst); 318 ra = get_ra(inst); 319 rb = get_rb(inst); 320 321 ea = kvmppc_get_gpr(vcpu, rb); 322 if (ra) 323 ea += kvmppc_get_gpr(vcpu, ra); 324 325 emulated = kvmppc_handle_store(run, vcpu, 326 kvmppc_get_gpr(vcpu, rs), 327 2, 1); 328 kvmppc_set_gpr(vcpu, ra, ea); 329 break; 330 331 case OP_31_XOP_MTSPR: 332 sprn = get_sprn(inst); 333 rs = get_rs(inst); 334 switch (sprn) { 335 case SPRN_SRR0: 336 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 337 break; 338 case SPRN_SRR1: 339 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); 340 break; 341 342 /* XXX We need to context-switch the timebase for 343 * watchdog and FIT. */ 344 case SPRN_TBWL: break; 345 case SPRN_TBWU: break; 346 347 case SPRN_MSSSR0: break; 348 349 case SPRN_DEC: 350 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 351 kvmppc_emulate_dec(vcpu); 352 break; 353 354 case SPRN_SPRG0: 355 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); 356 break; 357 case SPRN_SPRG1: 358 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); 359 break; 360 case SPRN_SPRG2: 361 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); 362 break; 363 case SPRN_SPRG3: 364 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); 365 break; 366 367 default: 368 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 369 if (emulated == EMULATE_FAIL) 370 printk("mtspr: unknown spr %x\n", sprn); 371 break; 372 } 373 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); 374 break; 375 376 case OP_31_XOP_DCBI: 377 /* Do nothing. The guest is performing dcbi because 378 * hardware DMA is not snooped by the dcache, but 379 * emulated DMA either goes through the dcache as 380 * normal writes, or the host kernel has handled dcache 381 * coherence. */ 382 break; 383 384 case OP_31_XOP_LWBRX: 385 rt = get_rt(inst); 386 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 387 break; 388 389 case OP_31_XOP_TLBSYNC: 390 break; 391 392 case OP_31_XOP_STWBRX: 393 rs = get_rs(inst); 394 ra = get_ra(inst); 395 rb = get_rb(inst); 396 397 emulated = kvmppc_handle_store(run, vcpu, 398 kvmppc_get_gpr(vcpu, rs), 399 4, 0); 400 break; 401 402 case OP_31_XOP_LHBRX: 403 rt = get_rt(inst); 404 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 405 break; 406 407 case OP_31_XOP_STHBRX: 408 rs = get_rs(inst); 409 ra = get_ra(inst); 410 rb = get_rb(inst); 411 412 emulated = kvmppc_handle_store(run, vcpu, 413 kvmppc_get_gpr(vcpu, rs), 414 2, 0); 415 break; 416 417 default: 418 /* Attempt core-specific emulation below. */ 419 emulated = EMULATE_FAIL; 420 } 421 break; 422 423 case OP_LWZ: 424 rt = get_rt(inst); 425 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 426 break; 427 428 case OP_LWZU: 429 ra = get_ra(inst); 430 rt = get_rt(inst); 431 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 432 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 433 break; 434 435 case OP_LBZ: 436 rt = get_rt(inst); 437 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 438 break; 439 440 case OP_LBZU: 441 ra = get_ra(inst); 442 rt = get_rt(inst); 443 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 444 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 445 break; 446 447 case OP_STW: 448 rs = get_rs(inst); 449 emulated = kvmppc_handle_store(run, vcpu, 450 kvmppc_get_gpr(vcpu, rs), 451 4, 1); 452 break; 453 454 case OP_STWU: 455 ra = get_ra(inst); 456 rs = get_rs(inst); 457 emulated = kvmppc_handle_store(run, vcpu, 458 kvmppc_get_gpr(vcpu, rs), 459 4, 1); 460 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 461 break; 462 463 case OP_STB: 464 rs = get_rs(inst); 465 emulated = kvmppc_handle_store(run, vcpu, 466 kvmppc_get_gpr(vcpu, rs), 467 1, 1); 468 break; 469 470 case OP_STBU: 471 ra = get_ra(inst); 472 rs = get_rs(inst); 473 emulated = kvmppc_handle_store(run, vcpu, 474 kvmppc_get_gpr(vcpu, rs), 475 1, 1); 476 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 477 break; 478 479 case OP_LHZ: 480 rt = get_rt(inst); 481 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 482 break; 483 484 case OP_LHZU: 485 ra = get_ra(inst); 486 rt = get_rt(inst); 487 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 488 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 489 break; 490 491 case OP_LHA: 492 rt = get_rt(inst); 493 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 494 break; 495 496 case OP_LHAU: 497 ra = get_ra(inst); 498 rt = get_rt(inst); 499 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 500 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 501 break; 502 503 case OP_STH: 504 rs = get_rs(inst); 505 emulated = kvmppc_handle_store(run, vcpu, 506 kvmppc_get_gpr(vcpu, rs), 507 2, 1); 508 break; 509 510 case OP_STHU: 511 ra = get_ra(inst); 512 rs = get_rs(inst); 513 emulated = kvmppc_handle_store(run, vcpu, 514 kvmppc_get_gpr(vcpu, rs), 515 2, 1); 516 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 517 break; 518 519 default: 520 emulated = EMULATE_FAIL; 521 } 522 523 if (emulated == EMULATE_FAIL) { 524 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 525 if (emulated == EMULATE_AGAIN) { 526 advance = 0; 527 } else if (emulated == EMULATE_FAIL) { 528 advance = 0; 529 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 530 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 531 kvmppc_core_queue_program(vcpu, 0); 532 } 533 } 534 535 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 536 537 /* Advance past emulated instruction. */ 538 if (advance) 539 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 540 541 return emulated; 542 } 543