1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 */ 19 20 #include <linux/jiffies.h> 21 #include <linux/hrtimer.h> 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/kvm_host.h> 25 26 #include <asm/reg.h> 27 #include <asm/time.h> 28 #include <asm/byteorder.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/disassemble.h> 31 #include "timing.h" 32 #include "trace.h" 33 34 #define OP_TRAP 3 35 #define OP_TRAP_64 2 36 37 #define OP_31_XOP_LWZX 23 38 #define OP_31_XOP_LBZX 87 39 #define OP_31_XOP_STWX 151 40 #define OP_31_XOP_STBX 215 41 #define OP_31_XOP_STBUX 247 42 #define OP_31_XOP_LHZX 279 43 #define OP_31_XOP_LHZUX 311 44 #define OP_31_XOP_MFSPR 339 45 #define OP_31_XOP_STHX 407 46 #define OP_31_XOP_STHUX 439 47 #define OP_31_XOP_MTSPR 467 48 #define OP_31_XOP_DCBI 470 49 #define OP_31_XOP_LWBRX 534 50 #define OP_31_XOP_TLBSYNC 566 51 #define OP_31_XOP_STWBRX 662 52 #define OP_31_XOP_LHBRX 790 53 #define OP_31_XOP_STHBRX 918 54 55 #define OP_LWZ 32 56 #define OP_LWZU 33 57 #define OP_LBZ 34 58 #define OP_LBZU 35 59 #define OP_STW 36 60 #define OP_STWU 37 61 #define OP_STB 38 62 #define OP_STBU 39 63 #define OP_LHZ 40 64 #define OP_LHZU 41 65 #define OP_STH 44 66 #define OP_STHU 45 67 68 #ifdef CONFIG_PPC64 69 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 70 { 71 return 1; 72 } 73 #else 74 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 75 { 76 return vcpu->arch.tcr & TCR_DIE; 77 } 78 #endif 79 80 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 81 { 82 unsigned long dec_nsec; 83 84 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 85 #ifdef CONFIG_PPC64 86 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 87 if (vcpu->arch.dec & 0x80000000) { 88 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 89 kvmppc_core_queue_dec(vcpu); 90 return; 91 } 92 #endif 93 if (kvmppc_dec_enabled(vcpu)) { 94 /* The decrementer ticks at the same rate as the timebase, so 95 * that's how we convert the guest DEC value to the number of 96 * host ticks. */ 97 98 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 99 dec_nsec = vcpu->arch.dec; 100 dec_nsec *= 1000; 101 dec_nsec /= tb_ticks_per_usec; 102 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 103 HRTIMER_MODE_REL); 104 vcpu->arch.dec_jiffies = get_tb(); 105 } else { 106 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 107 } 108 } 109 110 /* XXX to do: 111 * lhax 112 * lhaux 113 * lswx 114 * lswi 115 * stswx 116 * stswi 117 * lha 118 * lhau 119 * lmw 120 * stmw 121 * 122 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 123 */ 124 /* XXX Should probably auto-generate instruction decoding for a particular core 125 * from opcode tables in the future. */ 126 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 127 { 128 u32 inst = vcpu->arch.last_inst; 129 u32 ea; 130 int ra; 131 int rb; 132 int rs; 133 int rt; 134 int sprn; 135 enum emulation_result emulated = EMULATE_DONE; 136 int advance = 1; 137 138 /* this default type might be overwritten by subcategories */ 139 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 140 141 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 142 143 switch (get_op(inst)) { 144 case OP_TRAP: 145 #ifdef CONFIG_PPC64 146 case OP_TRAP_64: 147 #else 148 vcpu->arch.esr |= ESR_PTR; 149 #endif 150 kvmppc_core_queue_program(vcpu); 151 advance = 0; 152 break; 153 154 case 31: 155 switch (get_xop(inst)) { 156 157 case OP_31_XOP_LWZX: 158 rt = get_rt(inst); 159 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 160 break; 161 162 case OP_31_XOP_LBZX: 163 rt = get_rt(inst); 164 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 165 break; 166 167 case OP_31_XOP_STWX: 168 rs = get_rs(inst); 169 emulated = kvmppc_handle_store(run, vcpu, 170 vcpu->arch.gpr[rs], 171 4, 1); 172 break; 173 174 case OP_31_XOP_STBX: 175 rs = get_rs(inst); 176 emulated = kvmppc_handle_store(run, vcpu, 177 vcpu->arch.gpr[rs], 178 1, 1); 179 break; 180 181 case OP_31_XOP_STBUX: 182 rs = get_rs(inst); 183 ra = get_ra(inst); 184 rb = get_rb(inst); 185 186 ea = vcpu->arch.gpr[rb]; 187 if (ra) 188 ea += vcpu->arch.gpr[ra]; 189 190 emulated = kvmppc_handle_store(run, vcpu, 191 vcpu->arch.gpr[rs], 192 1, 1); 193 vcpu->arch.gpr[rs] = ea; 194 break; 195 196 case OP_31_XOP_LHZX: 197 rt = get_rt(inst); 198 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 199 break; 200 201 case OP_31_XOP_LHZUX: 202 rt = get_rt(inst); 203 ra = get_ra(inst); 204 rb = get_rb(inst); 205 206 ea = vcpu->arch.gpr[rb]; 207 if (ra) 208 ea += vcpu->arch.gpr[ra]; 209 210 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 211 vcpu->arch.gpr[ra] = ea; 212 break; 213 214 case OP_31_XOP_MFSPR: 215 sprn = get_sprn(inst); 216 rt = get_rt(inst); 217 218 switch (sprn) { 219 case SPRN_SRR0: 220 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 221 case SPRN_SRR1: 222 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 223 case SPRN_PVR: 224 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 225 case SPRN_PIR: 226 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; 227 case SPRN_MSSSR0: 228 vcpu->arch.gpr[rt] = 0; break; 229 230 /* Note: mftb and TBRL/TBWL are user-accessible, so 231 * the guest can always access the real TB anyways. 232 * In fact, we probably will never see these traps. */ 233 case SPRN_TBWL: 234 vcpu->arch.gpr[rt] = get_tb() >> 32; break; 235 case SPRN_TBWU: 236 vcpu->arch.gpr[rt] = get_tb(); break; 237 238 case SPRN_SPRG0: 239 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 240 case SPRN_SPRG1: 241 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 242 case SPRN_SPRG2: 243 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 244 case SPRN_SPRG3: 245 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 246 /* Note: SPRG4-7 are user-readable, so we don't get 247 * a trap. */ 248 249 case SPRN_DEC: 250 { 251 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 252 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; 253 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); 254 break; 255 } 256 default: 257 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 258 if (emulated == EMULATE_FAIL) { 259 printk("mfspr: unknown spr %x\n", sprn); 260 vcpu->arch.gpr[rt] = 0; 261 } 262 break; 263 } 264 break; 265 266 case OP_31_XOP_STHX: 267 rs = get_rs(inst); 268 ra = get_ra(inst); 269 rb = get_rb(inst); 270 271 emulated = kvmppc_handle_store(run, vcpu, 272 vcpu->arch.gpr[rs], 273 2, 1); 274 break; 275 276 case OP_31_XOP_STHUX: 277 rs = get_rs(inst); 278 ra = get_ra(inst); 279 rb = get_rb(inst); 280 281 ea = vcpu->arch.gpr[rb]; 282 if (ra) 283 ea += vcpu->arch.gpr[ra]; 284 285 emulated = kvmppc_handle_store(run, vcpu, 286 vcpu->arch.gpr[rs], 287 2, 1); 288 vcpu->arch.gpr[ra] = ea; 289 break; 290 291 case OP_31_XOP_MTSPR: 292 sprn = get_sprn(inst); 293 rs = get_rs(inst); 294 switch (sprn) { 295 case SPRN_SRR0: 296 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 297 case SPRN_SRR1: 298 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 299 300 /* XXX We need to context-switch the timebase for 301 * watchdog and FIT. */ 302 case SPRN_TBWL: break; 303 case SPRN_TBWU: break; 304 305 case SPRN_MSSSR0: break; 306 307 case SPRN_DEC: 308 vcpu->arch.dec = vcpu->arch.gpr[rs]; 309 kvmppc_emulate_dec(vcpu); 310 break; 311 312 case SPRN_SPRG0: 313 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 314 case SPRN_SPRG1: 315 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 316 case SPRN_SPRG2: 317 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 318 case SPRN_SPRG3: 319 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 320 321 default: 322 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 323 if (emulated == EMULATE_FAIL) 324 printk("mtspr: unknown spr %x\n", sprn); 325 break; 326 } 327 break; 328 329 case OP_31_XOP_DCBI: 330 /* Do nothing. The guest is performing dcbi because 331 * hardware DMA is not snooped by the dcache, but 332 * emulated DMA either goes through the dcache as 333 * normal writes, or the host kernel has handled dcache 334 * coherence. */ 335 break; 336 337 case OP_31_XOP_LWBRX: 338 rt = get_rt(inst); 339 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 340 break; 341 342 case OP_31_XOP_TLBSYNC: 343 break; 344 345 case OP_31_XOP_STWBRX: 346 rs = get_rs(inst); 347 ra = get_ra(inst); 348 rb = get_rb(inst); 349 350 emulated = kvmppc_handle_store(run, vcpu, 351 vcpu->arch.gpr[rs], 352 4, 0); 353 break; 354 355 case OP_31_XOP_LHBRX: 356 rt = get_rt(inst); 357 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 358 break; 359 360 case OP_31_XOP_STHBRX: 361 rs = get_rs(inst); 362 ra = get_ra(inst); 363 rb = get_rb(inst); 364 365 emulated = kvmppc_handle_store(run, vcpu, 366 vcpu->arch.gpr[rs], 367 2, 0); 368 break; 369 370 default: 371 /* Attempt core-specific emulation below. */ 372 emulated = EMULATE_FAIL; 373 } 374 break; 375 376 case OP_LWZ: 377 rt = get_rt(inst); 378 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 379 break; 380 381 case OP_LWZU: 382 ra = get_ra(inst); 383 rt = get_rt(inst); 384 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 385 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 386 break; 387 388 case OP_LBZ: 389 rt = get_rt(inst); 390 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 391 break; 392 393 case OP_LBZU: 394 ra = get_ra(inst); 395 rt = get_rt(inst); 396 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 397 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 398 break; 399 400 case OP_STW: 401 rs = get_rs(inst); 402 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 403 4, 1); 404 break; 405 406 case OP_STWU: 407 ra = get_ra(inst); 408 rs = get_rs(inst); 409 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 410 4, 1); 411 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 412 break; 413 414 case OP_STB: 415 rs = get_rs(inst); 416 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 417 1, 1); 418 break; 419 420 case OP_STBU: 421 ra = get_ra(inst); 422 rs = get_rs(inst); 423 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 424 1, 1); 425 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 426 break; 427 428 case OP_LHZ: 429 rt = get_rt(inst); 430 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 431 break; 432 433 case OP_LHZU: 434 ra = get_ra(inst); 435 rt = get_rt(inst); 436 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 437 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 438 break; 439 440 case OP_STH: 441 rs = get_rs(inst); 442 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 443 2, 1); 444 break; 445 446 case OP_STHU: 447 ra = get_ra(inst); 448 rs = get_rs(inst); 449 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 450 2, 1); 451 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 452 break; 453 454 default: 455 emulated = EMULATE_FAIL; 456 } 457 458 if (emulated == EMULATE_FAIL) { 459 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 460 if (emulated == EMULATE_FAIL) { 461 advance = 0; 462 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 463 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 464 } 465 } 466 467 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); 468 469 if (advance) 470 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 471 472 return emulated; 473 } 474