1 /* 2 * Copyright (C) 2016 Veertu Inc, 3 * Copyright (C) 2017 Google Inc, 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU Lesser General Public 7 * License as published by the Free Software Foundation; either 8 * version 2.1 of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * Lesser General Public License for more details. 14 * 15 * You should have received a copy of the GNU Lesser General Public 16 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 ///////////////////////////////////////////////////////////////////////// 20 // 21 // Copyright (C) 2001-2012 The Bochs Project 22 // 23 // This library is free software; you can redistribute it and/or 24 // modify it under the terms of the GNU Lesser General Public 25 // License as published by the Free Software Foundation; either 26 // version 2.1 of the License, or (at your option) any later version. 27 // 28 // This library is distributed in the hope that it will be useful, 29 // but WITHOUT ANY WARRANTY; without even the implied warranty of 30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 31 // Lesser General Public License for more details. 32 // 33 // You should have received a copy of the GNU Lesser General Public 34 // License along with this library; if not, write to the Free Software 35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA 36 ///////////////////////////////////////////////////////////////////////// 37 38 #include "qemu/osdep.h" 39 #include "panic.h" 40 #include "x86_decode.h" 41 #include "x86.h" 42 #include "x86_emu.h" 43 #include "x86_mmu.h" 44 #include "x86_flags.h" 45 #include "vmcs.h" 46 #include "vmx.h" 47 48 void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data, 49 int direction, int size, uint32_t count); 50 51 #define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \ 52 { \ 53 fetch_operands(env, decode, 2, true, true, false); \ 54 switch (decode->operand_size) { \ 55 case 1: \ 56 { \ 57 uint8_t v1 = (uint8_t)decode->op[0].val; \ 58 uint8_t v2 = (uint8_t)decode->op[1].val; \ 59 uint8_t diff = v1 cmd v2; \ 60 if (save_res) { \ 61 write_val_ext(env, decode->op[0].ptr, diff, 1); \ 62 } \ 63 FLAGS_FUNC##8(env, v1, v2, diff); \ 64 break; \ 65 } \ 66 case 2: \ 67 { \ 68 uint16_t v1 = (uint16_t)decode->op[0].val; \ 69 uint16_t v2 = (uint16_t)decode->op[1].val; \ 70 uint16_t diff = v1 cmd v2; \ 71 if (save_res) { \ 72 write_val_ext(env, decode->op[0].ptr, diff, 2); \ 73 } \ 74 FLAGS_FUNC##16(env, v1, v2, diff); \ 75 break; \ 76 } \ 77 case 4: \ 78 { \ 79 uint32_t v1 = (uint32_t)decode->op[0].val; \ 80 uint32_t v2 = (uint32_t)decode->op[1].val; \ 81 uint32_t diff = v1 cmd v2; \ 82 if (save_res) { \ 83 write_val_ext(env, decode->op[0].ptr, diff, 4); \ 84 } \ 85 FLAGS_FUNC##32(env, v1, v2, diff); \ 86 break; \ 87 } \ 88 default: \ 89 VM_PANIC("bad size\n"); \ 90 } \ 91 } \ 92 93 target_ulong read_reg(CPUX86State *env, int reg, int size) 94 { 95 switch (size) { 96 case 1: 97 return x86_reg(env, reg)->lx; 98 case 2: 99 return x86_reg(env, reg)->rx; 100 case 4: 101 return x86_reg(env, reg)->erx; 102 case 8: 103 return x86_reg(env, reg)->rrx; 104 default: 105 abort(); 106 } 107 return 0; 108 } 109 110 void write_reg(CPUX86State *env, int reg, target_ulong val, int size) 111 { 112 switch (size) { 113 case 1: 114 x86_reg(env, reg)->lx = val; 115 break; 116 case 2: 117 x86_reg(env, reg)->rx = val; 118 break; 119 case 4: 120 x86_reg(env, reg)->rrx = (uint32_t)val; 121 break; 122 case 8: 123 x86_reg(env, reg)->rrx = val; 124 break; 125 default: 126 abort(); 127 } 128 } 129 130 target_ulong read_val_from_reg(target_ulong reg_ptr, int size) 131 { 132 target_ulong val; 133 134 switch (size) { 135 case 1: 136 val = *(uint8_t *)reg_ptr; 137 break; 138 case 2: 139 val = *(uint16_t *)reg_ptr; 140 break; 141 case 4: 142 val = *(uint32_t *)reg_ptr; 143 break; 144 case 8: 145 val = *(uint64_t *)reg_ptr; 146 break; 147 default: 148 abort(); 149 } 150 return val; 151 } 152 153 void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size) 154 { 155 switch (size) { 156 case 1: 157 *(uint8_t *)reg_ptr = val; 158 break; 159 case 2: 160 *(uint16_t *)reg_ptr = val; 161 break; 162 case 4: 163 *(uint64_t *)reg_ptr = (uint32_t)val; 164 break; 165 case 8: 166 *(uint64_t *)reg_ptr = val; 167 break; 168 default: 169 abort(); 170 } 171 } 172 173 static bool is_host_reg(CPUX86State *env, target_ulong ptr) 174 { 175 return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs); 176 } 177 178 void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size) 179 { 180 if (is_host_reg(env, ptr)) { 181 write_val_to_reg(ptr, val, size); 182 return; 183 } 184 vmx_write_mem(env_cpu(env), ptr, &val, size); 185 } 186 187 uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes) 188 { 189 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes); 190 return env->hvf_mmio_buf; 191 } 192 193 194 target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size) 195 { 196 target_ulong val; 197 uint8_t *mmio_ptr; 198 199 if (is_host_reg(env, ptr)) { 200 return read_val_from_reg(ptr, size); 201 } 202 203 mmio_ptr = read_mmio(env, ptr, size); 204 switch (size) { 205 case 1: 206 val = *(uint8_t *)mmio_ptr; 207 break; 208 case 2: 209 val = *(uint16_t *)mmio_ptr; 210 break; 211 case 4: 212 val = *(uint32_t *)mmio_ptr; 213 break; 214 case 8: 215 val = *(uint64_t *)mmio_ptr; 216 break; 217 default: 218 VM_PANIC("bad size\n"); 219 break; 220 } 221 return val; 222 } 223 224 static void fetch_operands(CPUX86State *env, struct x86_decode *decode, 225 int n, bool val_op0, bool val_op1, bool val_op2) 226 { 227 int i; 228 bool calc_val[3] = {val_op0, val_op1, val_op2}; 229 230 for (i = 0; i < n; i++) { 231 switch (decode->op[i].type) { 232 case X86_VAR_IMMEDIATE: 233 break; 234 case X86_VAR_REG: 235 VM_PANIC_ON(!decode->op[i].ptr); 236 if (calc_val[i]) { 237 decode->op[i].val = read_val_from_reg(decode->op[i].ptr, 238 decode->operand_size); 239 } 240 break; 241 case X86_VAR_RM: 242 calc_modrm_operand(env, decode, &decode->op[i]); 243 if (calc_val[i]) { 244 decode->op[i].val = read_val_ext(env, decode->op[i].ptr, 245 decode->operand_size); 246 } 247 break; 248 case X86_VAR_OFFSET: 249 decode->op[i].ptr = decode_linear_addr(env, decode, 250 decode->op[i].ptr, 251 R_DS); 252 if (calc_val[i]) { 253 decode->op[i].val = read_val_ext(env, decode->op[i].ptr, 254 decode->operand_size); 255 } 256 break; 257 default: 258 break; 259 } 260 } 261 } 262 263 static void exec_mov(CPUX86State *env, struct x86_decode *decode) 264 { 265 fetch_operands(env, decode, 2, false, true, false); 266 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, 267 decode->operand_size); 268 269 env->eip += decode->len; 270 } 271 272 static void exec_add(CPUX86State *env, struct x86_decode *decode) 273 { 274 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); 275 env->eip += decode->len; 276 } 277 278 static void exec_or(CPUX86State *env, struct x86_decode *decode) 279 { 280 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true); 281 env->eip += decode->len; 282 } 283 284 static void exec_adc(CPUX86State *env, struct x86_decode *decode) 285 { 286 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true); 287 env->eip += decode->len; 288 } 289 290 static void exec_sbb(CPUX86State *env, struct x86_decode *decode) 291 { 292 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true); 293 env->eip += decode->len; 294 } 295 296 static void exec_and(CPUX86State *env, struct x86_decode *decode) 297 { 298 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true); 299 env->eip += decode->len; 300 } 301 302 static void exec_sub(CPUX86State *env, struct x86_decode *decode) 303 { 304 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true); 305 env->eip += decode->len; 306 } 307 308 static void exec_xor(CPUX86State *env, struct x86_decode *decode) 309 { 310 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true); 311 env->eip += decode->len; 312 } 313 314 static void exec_neg(CPUX86State *env, struct x86_decode *decode) 315 { 316 /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/ 317 int32_t val; 318 fetch_operands(env, decode, 2, true, true, false); 319 320 val = 0 - sign(decode->op[1].val, decode->operand_size); 321 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size); 322 323 if (4 == decode->operand_size) { 324 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val); 325 } else if (2 == decode->operand_size) { 326 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val); 327 } else if (1 == decode->operand_size) { 328 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val); 329 } else { 330 VM_PANIC("bad op size\n"); 331 } 332 333 /*lflags_to_rflags(env);*/ 334 env->eip += decode->len; 335 } 336 337 static void exec_cmp(CPUX86State *env, struct x86_decode *decode) 338 { 339 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 340 env->eip += decode->len; 341 } 342 343 static void exec_inc(CPUX86State *env, struct x86_decode *decode) 344 { 345 decode->op[1].type = X86_VAR_IMMEDIATE; 346 decode->op[1].val = 0; 347 348 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true); 349 350 env->eip += decode->len; 351 } 352 353 static void exec_dec(CPUX86State *env, struct x86_decode *decode) 354 { 355 decode->op[1].type = X86_VAR_IMMEDIATE; 356 decode->op[1].val = 0; 357 358 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true); 359 env->eip += decode->len; 360 } 361 362 static void exec_tst(CPUX86State *env, struct x86_decode *decode) 363 { 364 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false); 365 env->eip += decode->len; 366 } 367 368 static void exec_not(CPUX86State *env, struct x86_decode *decode) 369 { 370 fetch_operands(env, decode, 1, true, false, false); 371 372 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val, 373 decode->operand_size); 374 env->eip += decode->len; 375 } 376 377 void exec_movzx(CPUX86State *env, struct x86_decode *decode) 378 { 379 int src_op_size; 380 int op_size = decode->operand_size; 381 382 fetch_operands(env, decode, 1, false, false, false); 383 384 if (0xb6 == decode->opcode[1]) { 385 src_op_size = 1; 386 } else { 387 src_op_size = 2; 388 } 389 decode->operand_size = src_op_size; 390 calc_modrm_operand(env, decode, &decode->op[1]); 391 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size); 392 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); 393 394 env->eip += decode->len; 395 } 396 397 static void exec_out(CPUX86State *env, struct x86_decode *decode) 398 { 399 switch (decode->opcode[0]) { 400 case 0xe6: 401 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1); 402 break; 403 case 0xe7: 404 hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1, 405 decode->operand_size, 1); 406 break; 407 case 0xee: 408 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1); 409 break; 410 case 0xef: 411 hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1, 412 decode->operand_size, 1); 413 break; 414 default: 415 VM_PANIC("Bad out opcode\n"); 416 break; 417 } 418 env->eip += decode->len; 419 } 420 421 static void exec_in(CPUX86State *env, struct x86_decode *decode) 422 { 423 target_ulong val = 0; 424 switch (decode->opcode[0]) { 425 case 0xe4: 426 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1); 427 break; 428 case 0xe5: 429 hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0, 430 decode->operand_size, 1); 431 if (decode->operand_size == 2) { 432 AX(env) = val; 433 } else { 434 RAX(env) = (uint32_t)val; 435 } 436 break; 437 case 0xec: 438 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1); 439 break; 440 case 0xed: 441 hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1); 442 if (decode->operand_size == 2) { 443 AX(env) = val; 444 } else { 445 RAX(env) = (uint32_t)val; 446 } 447 448 break; 449 default: 450 VM_PANIC("Bad in opcode\n"); 451 break; 452 } 453 454 env->eip += decode->len; 455 } 456 457 static inline void string_increment_reg(CPUX86State *env, int reg, 458 struct x86_decode *decode) 459 { 460 target_ulong val = read_reg(env, reg, decode->addressing_size); 461 if (env->eflags & DF_MASK) { 462 val -= decode->operand_size; 463 } else { 464 val += decode->operand_size; 465 } 466 write_reg(env, reg, val, decode->addressing_size); 467 } 468 469 static inline void string_rep(CPUX86State *env, struct x86_decode *decode, 470 void (*func)(CPUX86State *env, 471 struct x86_decode *ins), int rep) 472 { 473 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size); 474 while (rcx--) { 475 func(env, decode); 476 write_reg(env, R_ECX, rcx, decode->addressing_size); 477 if ((PREFIX_REP == rep) && !get_ZF(env)) { 478 break; 479 } 480 if ((PREFIX_REPN == rep) && get_ZF(env)) { 481 break; 482 } 483 } 484 } 485 486 static void exec_ins_single(CPUX86State *env, struct x86_decode *decode) 487 { 488 target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), 489 decode->addressing_size, R_ES); 490 491 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0, 492 decode->operand_size, 1); 493 vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf, 494 decode->operand_size); 495 496 string_increment_reg(env, R_EDI, decode); 497 } 498 499 static void exec_ins(CPUX86State *env, struct x86_decode *decode) 500 { 501 if (decode->rep) { 502 string_rep(env, decode, exec_ins_single, 0); 503 } else { 504 exec_ins_single(env, decode); 505 } 506 507 env->eip += decode->len; 508 } 509 510 static void exec_outs_single(CPUX86State *env, struct x86_decode *decode) 511 { 512 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); 513 514 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr, 515 decode->operand_size); 516 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1, 517 decode->operand_size, 1); 518 519 string_increment_reg(env, R_ESI, decode); 520 } 521 522 static void exec_outs(CPUX86State *env, struct x86_decode *decode) 523 { 524 if (decode->rep) { 525 string_rep(env, decode, exec_outs_single, 0); 526 } else { 527 exec_outs_single(env, decode); 528 } 529 530 env->eip += decode->len; 531 } 532 533 static void exec_movs_single(CPUX86State *env, struct x86_decode *decode) 534 { 535 target_ulong src_addr; 536 target_ulong dst_addr; 537 target_ulong val; 538 539 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 540 dst_addr = linear_addr_size(env_cpu(env), RDI(env), 541 decode->addressing_size, R_ES); 542 543 val = read_val_ext(env, src_addr, decode->operand_size); 544 write_val_ext(env, dst_addr, val, decode->operand_size); 545 546 string_increment_reg(env, R_ESI, decode); 547 string_increment_reg(env, R_EDI, decode); 548 } 549 550 static void exec_movs(CPUX86State *env, struct x86_decode *decode) 551 { 552 if (decode->rep) { 553 string_rep(env, decode, exec_movs_single, 0); 554 } else { 555 exec_movs_single(env, decode); 556 } 557 558 env->eip += decode->len; 559 } 560 561 static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode) 562 { 563 target_ulong src_addr; 564 target_ulong dst_addr; 565 566 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 567 dst_addr = linear_addr_size(env_cpu(env), RDI(env), 568 decode->addressing_size, R_ES); 569 570 decode->op[0].type = X86_VAR_IMMEDIATE; 571 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size); 572 decode->op[1].type = X86_VAR_IMMEDIATE; 573 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size); 574 575 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 576 577 string_increment_reg(env, R_ESI, decode); 578 string_increment_reg(env, R_EDI, decode); 579 } 580 581 static void exec_cmps(CPUX86State *env, struct x86_decode *decode) 582 { 583 if (decode->rep) { 584 string_rep(env, decode, exec_cmps_single, decode->rep); 585 } else { 586 exec_cmps_single(env, decode); 587 } 588 env->eip += decode->len; 589 } 590 591 592 static void exec_stos_single(CPUX86State *env, struct x86_decode *decode) 593 { 594 target_ulong addr; 595 target_ulong val; 596 597 addr = linear_addr_size(env_cpu(env), RDI(env), 598 decode->addressing_size, R_ES); 599 val = read_reg(env, R_EAX, decode->operand_size); 600 vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size); 601 602 string_increment_reg(env, R_EDI, decode); 603 } 604 605 606 static void exec_stos(CPUX86State *env, struct x86_decode *decode) 607 { 608 if (decode->rep) { 609 string_rep(env, decode, exec_stos_single, 0); 610 } else { 611 exec_stos_single(env, decode); 612 } 613 614 env->eip += decode->len; 615 } 616 617 static void exec_scas_single(CPUX86State *env, struct x86_decode *decode) 618 { 619 target_ulong addr; 620 621 addr = linear_addr_size(env_cpu(env), RDI(env), 622 decode->addressing_size, R_ES); 623 decode->op[1].type = X86_VAR_IMMEDIATE; 624 vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size); 625 626 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 627 string_increment_reg(env, R_EDI, decode); 628 } 629 630 static void exec_scas(CPUX86State *env, struct x86_decode *decode) 631 { 632 decode->op[0].type = X86_VAR_REG; 633 decode->op[0].reg = R_EAX; 634 if (decode->rep) { 635 string_rep(env, decode, exec_scas_single, decode->rep); 636 } else { 637 exec_scas_single(env, decode); 638 } 639 640 env->eip += decode->len; 641 } 642 643 static void exec_lods_single(CPUX86State *env, struct x86_decode *decode) 644 { 645 target_ulong addr; 646 target_ulong val = 0; 647 648 addr = decode_linear_addr(env, decode, RSI(env), R_DS); 649 vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size); 650 write_reg(env, R_EAX, val, decode->operand_size); 651 652 string_increment_reg(env, R_ESI, decode); 653 } 654 655 static void exec_lods(CPUX86State *env, struct x86_decode *decode) 656 { 657 if (decode->rep) { 658 string_rep(env, decode, exec_lods_single, 0); 659 } else { 660 exec_lods_single(env, decode); 661 } 662 663 env->eip += decode->len; 664 } 665 666 void simulate_rdmsr(struct CPUState *cpu) 667 { 668 X86CPU *x86_cpu = X86_CPU(cpu); 669 CPUX86State *env = &x86_cpu->env; 670 CPUState *cs = env_cpu(env); 671 uint32_t msr = ECX(env); 672 uint64_t val = 0; 673 674 switch (msr) { 675 case MSR_IA32_TSC: 676 val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET); 677 break; 678 case MSR_IA32_APICBASE: 679 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); 680 break; 681 case MSR_IA32_UCODE_REV: 682 val = x86_cpu->ucode_rev; 683 break; 684 case MSR_EFER: 685 val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER); 686 break; 687 case MSR_FSBASE: 688 val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE); 689 break; 690 case MSR_GSBASE: 691 val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE); 692 break; 693 case MSR_KERNELGSBASE: 694 val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE); 695 break; 696 case MSR_STAR: 697 abort(); 698 break; 699 case MSR_LSTAR: 700 abort(); 701 break; 702 case MSR_CSTAR: 703 abort(); 704 break; 705 case MSR_IA32_MISC_ENABLE: 706 val = env->msr_ia32_misc_enable; 707 break; 708 case MSR_MTRRphysBase(0): 709 case MSR_MTRRphysBase(1): 710 case MSR_MTRRphysBase(2): 711 case MSR_MTRRphysBase(3): 712 case MSR_MTRRphysBase(4): 713 case MSR_MTRRphysBase(5): 714 case MSR_MTRRphysBase(6): 715 case MSR_MTRRphysBase(7): 716 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base; 717 break; 718 case MSR_MTRRphysMask(0): 719 case MSR_MTRRphysMask(1): 720 case MSR_MTRRphysMask(2): 721 case MSR_MTRRphysMask(3): 722 case MSR_MTRRphysMask(4): 723 case MSR_MTRRphysMask(5): 724 case MSR_MTRRphysMask(6): 725 case MSR_MTRRphysMask(7): 726 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask; 727 break; 728 case MSR_MTRRfix64K_00000: 729 val = env->mtrr_fixed[0]; 730 break; 731 case MSR_MTRRfix16K_80000: 732 case MSR_MTRRfix16K_A0000: 733 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1]; 734 break; 735 case MSR_MTRRfix4K_C0000: 736 case MSR_MTRRfix4K_C8000: 737 case MSR_MTRRfix4K_D0000: 738 case MSR_MTRRfix4K_D8000: 739 case MSR_MTRRfix4K_E0000: 740 case MSR_MTRRfix4K_E8000: 741 case MSR_MTRRfix4K_F0000: 742 case MSR_MTRRfix4K_F8000: 743 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3]; 744 break; 745 case MSR_MTRRdefType: 746 val = env->mtrr_deftype; 747 break; 748 case MSR_CORE_THREAD_COUNT: 749 val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ 750 val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ 751 break; 752 default: 753 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */ 754 val = 0; 755 break; 756 } 757 758 RAX(env) = (uint32_t)val; 759 RDX(env) = (uint32_t)(val >> 32); 760 } 761 762 static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) 763 { 764 simulate_rdmsr(env_cpu(env)); 765 env->eip += decode->len; 766 } 767 768 void simulate_wrmsr(struct CPUState *cpu) 769 { 770 X86CPU *x86_cpu = X86_CPU(cpu); 771 CPUX86State *env = &x86_cpu->env; 772 uint32_t msr = ECX(env); 773 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env); 774 775 switch (msr) { 776 case MSR_IA32_TSC: 777 break; 778 case MSR_IA32_APICBASE: 779 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data); 780 break; 781 case MSR_FSBASE: 782 wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data); 783 break; 784 case MSR_GSBASE: 785 wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data); 786 break; 787 case MSR_KERNELGSBASE: 788 wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data); 789 break; 790 case MSR_STAR: 791 abort(); 792 break; 793 case MSR_LSTAR: 794 abort(); 795 break; 796 case MSR_CSTAR: 797 abort(); 798 break; 799 case MSR_EFER: 800 /*printf("new efer %llx\n", EFER(cpu));*/ 801 wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data); 802 if (data & MSR_EFER_NXE) { 803 hv_vcpu_invalidate_tlb(cpu->hvf->fd); 804 } 805 break; 806 case MSR_MTRRphysBase(0): 807 case MSR_MTRRphysBase(1): 808 case MSR_MTRRphysBase(2): 809 case MSR_MTRRphysBase(3): 810 case MSR_MTRRphysBase(4): 811 case MSR_MTRRphysBase(5): 812 case MSR_MTRRphysBase(6): 813 case MSR_MTRRphysBase(7): 814 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data; 815 break; 816 case MSR_MTRRphysMask(0): 817 case MSR_MTRRphysMask(1): 818 case MSR_MTRRphysMask(2): 819 case MSR_MTRRphysMask(3): 820 case MSR_MTRRphysMask(4): 821 case MSR_MTRRphysMask(5): 822 case MSR_MTRRphysMask(6): 823 case MSR_MTRRphysMask(7): 824 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data; 825 break; 826 case MSR_MTRRfix64K_00000: 827 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data; 828 break; 829 case MSR_MTRRfix16K_80000: 830 case MSR_MTRRfix16K_A0000: 831 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data; 832 break; 833 case MSR_MTRRfix4K_C0000: 834 case MSR_MTRRfix4K_C8000: 835 case MSR_MTRRfix4K_D0000: 836 case MSR_MTRRfix4K_D8000: 837 case MSR_MTRRfix4K_E0000: 838 case MSR_MTRRfix4K_E8000: 839 case MSR_MTRRfix4K_F0000: 840 case MSR_MTRRfix4K_F8000: 841 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data; 842 break; 843 case MSR_MTRRdefType: 844 env->mtrr_deftype = data; 845 break; 846 default: 847 break; 848 } 849 850 /* Related to support known hypervisor interface */ 851 /* if (g_hypervisor_iface) 852 g_hypervisor_iface->wrmsr_handler(cpu, msr, data); 853 854 printf("write msr %llx\n", RCX(cpu));*/ 855 } 856 857 static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) 858 { 859 simulate_wrmsr(env_cpu(env)); 860 env->eip += decode->len; 861 } 862 863 /* 864 * flag: 865 * 0 - bt, 1 - btc, 2 - bts, 3 - btr 866 */ 867 static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag) 868 { 869 int32_t displacement; 870 uint8_t index; 871 bool cf; 872 int mask = (4 == decode->operand_size) ? 0x1f : 0xf; 873 874 VM_PANIC_ON(decode->rex.rex); 875 876 fetch_operands(env, decode, 2, false, true, false); 877 index = decode->op[1].val & mask; 878 879 if (decode->op[0].type != X86_VAR_REG) { 880 if (4 == decode->operand_size) { 881 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32; 882 decode->op[0].ptr += 4 * displacement; 883 } else if (2 == decode->operand_size) { 884 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16; 885 decode->op[0].ptr += 2 * displacement; 886 } else { 887 VM_PANIC("bt 64bit\n"); 888 } 889 } 890 decode->op[0].val = read_val_ext(env, decode->op[0].ptr, 891 decode->operand_size); 892 cf = (decode->op[0].val >> index) & 0x01; 893 894 switch (flag) { 895 case 0: 896 set_CF(env, cf); 897 return; 898 case 1: 899 decode->op[0].val ^= (1u << index); 900 break; 901 case 2: 902 decode->op[0].val |= (1u << index); 903 break; 904 case 3: 905 decode->op[0].val &= ~(1u << index); 906 break; 907 } 908 write_val_ext(env, decode->op[0].ptr, decode->op[0].val, 909 decode->operand_size); 910 set_CF(env, cf); 911 } 912 913 static void exec_bt(CPUX86State *env, struct x86_decode *decode) 914 { 915 do_bt(env, decode, 0); 916 env->eip += decode->len; 917 } 918 919 static void exec_btc(CPUX86State *env, struct x86_decode *decode) 920 { 921 do_bt(env, decode, 1); 922 env->eip += decode->len; 923 } 924 925 static void exec_btr(CPUX86State *env, struct x86_decode *decode) 926 { 927 do_bt(env, decode, 3); 928 env->eip += decode->len; 929 } 930 931 static void exec_bts(CPUX86State *env, struct x86_decode *decode) 932 { 933 do_bt(env, decode, 2); 934 env->eip += decode->len; 935 } 936 937 void exec_shl(CPUX86State *env, struct x86_decode *decode) 938 { 939 uint8_t count; 940 int of = 0, cf = 0; 941 942 fetch_operands(env, decode, 2, true, true, false); 943 944 count = decode->op[1].val; 945 count &= 0x1f; /* count is masked to 5 bits*/ 946 if (!count) { 947 goto exit; 948 } 949 950 switch (decode->operand_size) { 951 case 1: 952 { 953 uint8_t res = 0; 954 if (count <= 8) { 955 res = (decode->op[0].val << count); 956 cf = (decode->op[0].val >> (8 - count)) & 0x1; 957 of = cf ^ (res >> 7); 958 } 959 960 write_val_ext(env, decode->op[0].ptr, res, 1); 961 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res); 962 SET_FLAGS_OxxxxC(env, of, cf); 963 break; 964 } 965 case 2: 966 { 967 uint16_t res = 0; 968 969 /* from bochs */ 970 if (count <= 16) { 971 res = (decode->op[0].val << count); 972 cf = (decode->op[0].val >> (16 - count)) & 0x1; 973 of = cf ^ (res >> 15); /* of = cf ^ result15 */ 974 } 975 976 write_val_ext(env, decode->op[0].ptr, res, 2); 977 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res); 978 SET_FLAGS_OxxxxC(env, of, cf); 979 break; 980 } 981 case 4: 982 { 983 uint32_t res = decode->op[0].val << count; 984 985 write_val_ext(env, decode->op[0].ptr, res, 4); 986 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res); 987 cf = (decode->op[0].val >> (32 - count)) & 0x1; 988 of = cf ^ (res >> 31); /* of = cf ^ result31 */ 989 SET_FLAGS_OxxxxC(env, of, cf); 990 break; 991 } 992 default: 993 abort(); 994 } 995 996 exit: 997 /* lflags_to_rflags(env); */ 998 env->eip += decode->len; 999 } 1000 1001 void exec_movsx(CPUX86State *env, struct x86_decode *decode) 1002 { 1003 int src_op_size; 1004 int op_size = decode->operand_size; 1005 1006 fetch_operands(env, decode, 2, false, false, false); 1007 1008 if (0xbe == decode->opcode[1]) { 1009 src_op_size = 1; 1010 } else { 1011 src_op_size = 2; 1012 } 1013 1014 decode->operand_size = src_op_size; 1015 calc_modrm_operand(env, decode, &decode->op[1]); 1016 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size), 1017 src_op_size); 1018 1019 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); 1020 1021 env->eip += decode->len; 1022 } 1023 1024 void exec_ror(CPUX86State *env, struct x86_decode *decode) 1025 { 1026 uint8_t count; 1027 1028 fetch_operands(env, decode, 2, true, true, false); 1029 count = decode->op[1].val; 1030 1031 switch (decode->operand_size) { 1032 case 1: 1033 { 1034 uint32_t bit6, bit7; 1035 uint8_t res; 1036 1037 if ((count & 0x07) == 0) { 1038 if (count & 0x18) { 1039 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1; 1040 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1; 1041 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); 1042 } 1043 } else { 1044 count &= 0x7; /* use only bottom 3 bits */ 1045 res = ((uint8_t)decode->op[0].val >> count) | 1046 ((uint8_t)decode->op[0].val << (8 - count)); 1047 write_val_ext(env, decode->op[0].ptr, res, 1); 1048 bit6 = (res >> 6) & 1; 1049 bit7 = (res >> 7) & 1; 1050 /* set eflags: ROR count affects the following flags: C, O */ 1051 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); 1052 } 1053 break; 1054 } 1055 case 2: 1056 { 1057 uint32_t bit14, bit15; 1058 uint16_t res; 1059 1060 if ((count & 0x0f) == 0) { 1061 if (count & 0x10) { 1062 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1; 1063 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1; 1064 /* of = result14 ^ result15 */ 1065 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); 1066 } 1067 } else { 1068 count &= 0x0f; /* use only 4 LSB's */ 1069 res = ((uint16_t)decode->op[0].val >> count) | 1070 ((uint16_t)decode->op[0].val << (16 - count)); 1071 write_val_ext(env, decode->op[0].ptr, res, 2); 1072 1073 bit14 = (res >> 14) & 1; 1074 bit15 = (res >> 15) & 1; 1075 /* of = result14 ^ result15 */ 1076 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); 1077 } 1078 break; 1079 } 1080 case 4: 1081 { 1082 uint32_t bit31, bit30; 1083 uint32_t res; 1084 1085 count &= 0x1f; 1086 if (count) { 1087 res = ((uint32_t)decode->op[0].val >> count) | 1088 ((uint32_t)decode->op[0].val << (32 - count)); 1089 write_val_ext(env, decode->op[0].ptr, res, 4); 1090 1091 bit31 = (res >> 31) & 1; 1092 bit30 = (res >> 30) & 1; 1093 /* of = result30 ^ result31 */ 1094 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31); 1095 } 1096 break; 1097 } 1098 } 1099 env->eip += decode->len; 1100 } 1101 1102 void exec_rol(CPUX86State *env, struct x86_decode *decode) 1103 { 1104 uint8_t count; 1105 1106 fetch_operands(env, decode, 2, true, true, false); 1107 count = decode->op[1].val; 1108 1109 switch (decode->operand_size) { 1110 case 1: 1111 { 1112 uint32_t bit0, bit7; 1113 uint8_t res; 1114 1115 if ((count & 0x07) == 0) { 1116 if (count & 0x18) { 1117 bit0 = ((uint8_t)decode->op[0].val & 1); 1118 bit7 = ((uint8_t)decode->op[0].val >> 7); 1119 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); 1120 } 1121 } else { 1122 count &= 0x7; /* use only lowest 3 bits */ 1123 res = ((uint8_t)decode->op[0].val << count) | 1124 ((uint8_t)decode->op[0].val >> (8 - count)); 1125 1126 write_val_ext(env, decode->op[0].ptr, res, 1); 1127 /* set eflags: 1128 * ROL count affects the following flags: C, O 1129 */ 1130 bit0 = (res & 1); 1131 bit7 = (res >> 7); 1132 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); 1133 } 1134 break; 1135 } 1136 case 2: 1137 { 1138 uint32_t bit0, bit15; 1139 uint16_t res; 1140 1141 if ((count & 0x0f) == 0) { 1142 if (count & 0x10) { 1143 bit0 = ((uint16_t)decode->op[0].val & 0x1); 1144 bit15 = ((uint16_t)decode->op[0].val >> 15); 1145 /* of = cf ^ result15 */ 1146 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); 1147 } 1148 } else { 1149 count &= 0x0f; /* only use bottom 4 bits */ 1150 res = ((uint16_t)decode->op[0].val << count) | 1151 ((uint16_t)decode->op[0].val >> (16 - count)); 1152 1153 write_val_ext(env, decode->op[0].ptr, res, 2); 1154 bit0 = (res & 0x1); 1155 bit15 = (res >> 15); 1156 /* of = cf ^ result15 */ 1157 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); 1158 } 1159 break; 1160 } 1161 case 4: 1162 { 1163 uint32_t bit0, bit31; 1164 uint32_t res; 1165 1166 count &= 0x1f; 1167 if (count) { 1168 res = ((uint32_t)decode->op[0].val << count) | 1169 ((uint32_t)decode->op[0].val >> (32 - count)); 1170 1171 write_val_ext(env, decode->op[0].ptr, res, 4); 1172 bit0 = (res & 0x1); 1173 bit31 = (res >> 31); 1174 /* of = cf ^ result31 */ 1175 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0); 1176 } 1177 break; 1178 } 1179 } 1180 env->eip += decode->len; 1181 } 1182 1183 1184 void exec_rcl(CPUX86State *env, struct x86_decode *decode) 1185 { 1186 uint8_t count; 1187 int of = 0, cf = 0; 1188 1189 fetch_operands(env, decode, 2, true, true, false); 1190 count = decode->op[1].val & 0x1f; 1191 1192 switch (decode->operand_size) { 1193 case 1: 1194 { 1195 uint8_t op1_8 = decode->op[0].val; 1196 uint8_t res; 1197 count %= 9; 1198 if (!count) { 1199 break; 1200 } 1201 1202 if (1 == count) { 1203 res = (op1_8 << 1) | get_CF(env); 1204 } else { 1205 res = (op1_8 << count) | (get_CF(env) << (count - 1)) | 1206 (op1_8 >> (9 - count)); 1207 } 1208 1209 write_val_ext(env, decode->op[0].ptr, res, 1); 1210 1211 cf = (op1_8 >> (8 - count)) & 0x01; 1212 of = cf ^ (res >> 7); /* of = cf ^ result7 */ 1213 SET_FLAGS_OxxxxC(env, of, cf); 1214 break; 1215 } 1216 case 2: 1217 { 1218 uint16_t res; 1219 uint16_t op1_16 = decode->op[0].val; 1220 1221 count %= 17; 1222 if (!count) { 1223 break; 1224 } 1225 1226 if (1 == count) { 1227 res = (op1_16 << 1) | get_CF(env); 1228 } else if (count == 16) { 1229 res = (get_CF(env) << 15) | (op1_16 >> 1); 1230 } else { /* 2..15 */ 1231 res = (op1_16 << count) | (get_CF(env) << (count - 1)) | 1232 (op1_16 >> (17 - count)); 1233 } 1234 1235 write_val_ext(env, decode->op[0].ptr, res, 2); 1236 1237 cf = (op1_16 >> (16 - count)) & 0x1; 1238 of = cf ^ (res >> 15); /* of = cf ^ result15 */ 1239 SET_FLAGS_OxxxxC(env, of, cf); 1240 break; 1241 } 1242 case 4: 1243 { 1244 uint32_t res; 1245 uint32_t op1_32 = decode->op[0].val; 1246 1247 if (!count) { 1248 break; 1249 } 1250 1251 if (1 == count) { 1252 res = (op1_32 << 1) | get_CF(env); 1253 } else { 1254 res = (op1_32 << count) | (get_CF(env) << (count - 1)) | 1255 (op1_32 >> (33 - count)); 1256 } 1257 1258 write_val_ext(env, decode->op[0].ptr, res, 4); 1259 1260 cf = (op1_32 >> (32 - count)) & 0x1; 1261 of = cf ^ (res >> 31); /* of = cf ^ result31 */ 1262 SET_FLAGS_OxxxxC(env, of, cf); 1263 break; 1264 } 1265 } 1266 env->eip += decode->len; 1267 } 1268 1269 void exec_rcr(CPUX86State *env, struct x86_decode *decode) 1270 { 1271 uint8_t count; 1272 int of = 0, cf = 0; 1273 1274 fetch_operands(env, decode, 2, true, true, false); 1275 count = decode->op[1].val & 0x1f; 1276 1277 switch (decode->operand_size) { 1278 case 1: 1279 { 1280 uint8_t op1_8 = decode->op[0].val; 1281 uint8_t res; 1282 1283 count %= 9; 1284 if (!count) { 1285 break; 1286 } 1287 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) | 1288 (op1_8 << (9 - count)); 1289 1290 write_val_ext(env, decode->op[0].ptr, res, 1); 1291 1292 cf = (op1_8 >> (count - 1)) & 0x1; 1293 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */ 1294 SET_FLAGS_OxxxxC(env, of, cf); 1295 break; 1296 } 1297 case 2: 1298 { 1299 uint16_t op1_16 = decode->op[0].val; 1300 uint16_t res; 1301 1302 count %= 17; 1303 if (!count) { 1304 break; 1305 } 1306 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) | 1307 (op1_16 << (17 - count)); 1308 1309 write_val_ext(env, decode->op[0].ptr, res, 2); 1310 1311 cf = (op1_16 >> (count - 1)) & 0x1; 1312 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^ 1313 result14 */ 1314 SET_FLAGS_OxxxxC(env, of, cf); 1315 break; 1316 } 1317 case 4: 1318 { 1319 uint32_t res; 1320 uint32_t op1_32 = decode->op[0].val; 1321 1322 if (!count) { 1323 break; 1324 } 1325 1326 if (1 == count) { 1327 res = (op1_32 >> 1) | (get_CF(env) << 31); 1328 } else { 1329 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) | 1330 (op1_32 << (33 - count)); 1331 } 1332 1333 write_val_ext(env, decode->op[0].ptr, res, 4); 1334 1335 cf = (op1_32 >> (count - 1)) & 0x1; 1336 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */ 1337 SET_FLAGS_OxxxxC(env, of, cf); 1338 break; 1339 } 1340 } 1341 env->eip += decode->len; 1342 } 1343 1344 static void exec_xchg(CPUX86State *env, struct x86_decode *decode) 1345 { 1346 fetch_operands(env, decode, 2, true, true, false); 1347 1348 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, 1349 decode->operand_size); 1350 write_val_ext(env, decode->op[1].ptr, decode->op[0].val, 1351 decode->operand_size); 1352 1353 env->eip += decode->len; 1354 } 1355 1356 static void exec_xadd(CPUX86State *env, struct x86_decode *decode) 1357 { 1358 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); 1359 write_val_ext(env, decode->op[1].ptr, decode->op[0].val, 1360 decode->operand_size); 1361 1362 env->eip += decode->len; 1363 } 1364 1365 static struct cmd_handler { 1366 enum x86_decode_cmd cmd; 1367 void (*handler)(CPUX86State *env, struct x86_decode *ins); 1368 } handlers[] = { 1369 {X86_DECODE_CMD_INVL, NULL,}, 1370 {X86_DECODE_CMD_MOV, exec_mov}, 1371 {X86_DECODE_CMD_ADD, exec_add}, 1372 {X86_DECODE_CMD_OR, exec_or}, 1373 {X86_DECODE_CMD_ADC, exec_adc}, 1374 {X86_DECODE_CMD_SBB, exec_sbb}, 1375 {X86_DECODE_CMD_AND, exec_and}, 1376 {X86_DECODE_CMD_SUB, exec_sub}, 1377 {X86_DECODE_CMD_NEG, exec_neg}, 1378 {X86_DECODE_CMD_XOR, exec_xor}, 1379 {X86_DECODE_CMD_CMP, exec_cmp}, 1380 {X86_DECODE_CMD_INC, exec_inc}, 1381 {X86_DECODE_CMD_DEC, exec_dec}, 1382 {X86_DECODE_CMD_TST, exec_tst}, 1383 {X86_DECODE_CMD_NOT, exec_not}, 1384 {X86_DECODE_CMD_MOVZX, exec_movzx}, 1385 {X86_DECODE_CMD_OUT, exec_out}, 1386 {X86_DECODE_CMD_IN, exec_in}, 1387 {X86_DECODE_CMD_INS, exec_ins}, 1388 {X86_DECODE_CMD_OUTS, exec_outs}, 1389 {X86_DECODE_CMD_RDMSR, exec_rdmsr}, 1390 {X86_DECODE_CMD_WRMSR, exec_wrmsr}, 1391 {X86_DECODE_CMD_BT, exec_bt}, 1392 {X86_DECODE_CMD_BTR, exec_btr}, 1393 {X86_DECODE_CMD_BTC, exec_btc}, 1394 {X86_DECODE_CMD_BTS, exec_bts}, 1395 {X86_DECODE_CMD_SHL, exec_shl}, 1396 {X86_DECODE_CMD_ROL, exec_rol}, 1397 {X86_DECODE_CMD_ROR, exec_ror}, 1398 {X86_DECODE_CMD_RCR, exec_rcr}, 1399 {X86_DECODE_CMD_RCL, exec_rcl}, 1400 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/ 1401 {X86_DECODE_CMD_MOVS, exec_movs}, 1402 {X86_DECODE_CMD_CMPS, exec_cmps}, 1403 {X86_DECODE_CMD_STOS, exec_stos}, 1404 {X86_DECODE_CMD_SCAS, exec_scas}, 1405 {X86_DECODE_CMD_LODS, exec_lods}, 1406 {X86_DECODE_CMD_MOVSX, exec_movsx}, 1407 {X86_DECODE_CMD_XCHG, exec_xchg}, 1408 {X86_DECODE_CMD_XADD, exec_xadd}, 1409 }; 1410 1411 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST]; 1412 1413 static void init_cmd_handler() 1414 { 1415 int i; 1416 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 1417 _cmd_handler[handlers[i].cmd] = handlers[i]; 1418 } 1419 } 1420 1421 void load_regs(struct CPUState *cpu) 1422 { 1423 X86CPU *x86_cpu = X86_CPU(cpu); 1424 CPUX86State *env = &x86_cpu->env; 1425 1426 int i = 0; 1427 RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX); 1428 RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX); 1429 RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX); 1430 RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX); 1431 RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI); 1432 RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI); 1433 RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP); 1434 RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP); 1435 for (i = 8; i < 16; i++) { 1436 RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i); 1437 } 1438 1439 env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS); 1440 rflags_to_lflags(env); 1441 env->eip = rreg(cpu->hvf->fd, HV_X86_RIP); 1442 } 1443 1444 void store_regs(struct CPUState *cpu) 1445 { 1446 X86CPU *x86_cpu = X86_CPU(cpu); 1447 CPUX86State *env = &x86_cpu->env; 1448 1449 int i = 0; 1450 wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env)); 1451 wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env)); 1452 wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env)); 1453 wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env)); 1454 wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env)); 1455 wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env)); 1456 wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env)); 1457 wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env)); 1458 for (i = 8; i < 16; i++) { 1459 wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i)); 1460 } 1461 1462 lflags_to_rflags(env); 1463 wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags); 1464 macvm_set_rip(cpu, env->eip); 1465 } 1466 1467 bool exec_instruction(CPUX86State *env, struct x86_decode *ins) 1468 { 1469 /*if (hvf_vcpu_id(cpu)) 1470 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), env->eip, 1471 decode_cmd_to_string(ins->cmd));*/ 1472 1473 if (!_cmd_handler[ins->cmd].handler) { 1474 printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip, 1475 ins->cmd, ins->opcode[0], 1476 ins->opcode_len > 1 ? ins->opcode[1] : 0); 1477 env->eip += ins->len; 1478 return true; 1479 } 1480 1481 _cmd_handler[ins->cmd].handler(env, ins); 1482 return true; 1483 } 1484 1485 void init_emu() 1486 { 1487 init_cmd_handler(); 1488 } 1489