1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 /////////////////////////////////////////////////////////////////////////
20 //
21 // Copyright (C) 2001-2012 The Bochs Project
22 //
23 // This library is free software; you can redistribute it and/or
24 // modify it under the terms of the GNU Lesser General Public
25 // License as published by the Free Software Foundation; either
26 // version 2.1 of the License, or (at your option) any later version.
27 //
28 // This library is distributed in the hope that it will be useful,
29 // but WITHOUT ANY WARRANTY; without even the implied warranty of
30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31 // Lesser General Public License for more details.
32 //
33 // You should have received a copy of the GNU Lesser General Public
34 // License along with this library; if not, write to the Free Software
35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
36 /////////////////////////////////////////////////////////////////////////
37
38 #include "qemu/osdep.h"
39 #include "panic.h"
40 #include "x86_decode.h"
41 #include "x86.h"
42 #include "x86_emu.h"
43 #include "x86_mmu.h"
44 #include "x86_flags.h"
45 #include "vmcs.h"
46 #include "vmx.h"
47
48 void hvf_handle_io(CPUState *cs, uint16_t port, void *data,
49 int direction, int size, uint32_t count);
50
51 #define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
52 { \
53 fetch_operands(env, decode, 2, true, true, false); \
54 switch (decode->operand_size) { \
55 case 1: \
56 { \
57 uint8_t v1 = (uint8_t)decode->op[0].val; \
58 uint8_t v2 = (uint8_t)decode->op[1].val; \
59 uint8_t diff = v1 cmd v2; \
60 if (save_res) { \
61 write_val_ext(env, decode->op[0].ptr, diff, 1); \
62 } \
63 FLAGS_FUNC##8(env, v1, v2, diff); \
64 break; \
65 } \
66 case 2: \
67 { \
68 uint16_t v1 = (uint16_t)decode->op[0].val; \
69 uint16_t v2 = (uint16_t)decode->op[1].val; \
70 uint16_t diff = v1 cmd v2; \
71 if (save_res) { \
72 write_val_ext(env, decode->op[0].ptr, diff, 2); \
73 } \
74 FLAGS_FUNC##16(env, v1, v2, diff); \
75 break; \
76 } \
77 case 4: \
78 { \
79 uint32_t v1 = (uint32_t)decode->op[0].val; \
80 uint32_t v2 = (uint32_t)decode->op[1].val; \
81 uint32_t diff = v1 cmd v2; \
82 if (save_res) { \
83 write_val_ext(env, decode->op[0].ptr, diff, 4); \
84 } \
85 FLAGS_FUNC##32(env, v1, v2, diff); \
86 break; \
87 } \
88 default: \
89 VM_PANIC("bad size\n"); \
90 } \
91 } \
92
read_reg(CPUX86State * env,int reg,int size)93 target_ulong read_reg(CPUX86State *env, int reg, int size)
94 {
95 switch (size) {
96 case 1:
97 return x86_reg(env, reg)->lx;
98 case 2:
99 return x86_reg(env, reg)->rx;
100 case 4:
101 return x86_reg(env, reg)->erx;
102 case 8:
103 return x86_reg(env, reg)->rrx;
104 default:
105 abort();
106 }
107 return 0;
108 }
109
write_reg(CPUX86State * env,int reg,target_ulong val,int size)110 void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
111 {
112 switch (size) {
113 case 1:
114 x86_reg(env, reg)->lx = val;
115 break;
116 case 2:
117 x86_reg(env, reg)->rx = val;
118 break;
119 case 4:
120 x86_reg(env, reg)->rrx = (uint32_t)val;
121 break;
122 case 8:
123 x86_reg(env, reg)->rrx = val;
124 break;
125 default:
126 abort();
127 }
128 }
129
read_val_from_reg(target_ulong reg_ptr,int size)130 target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
131 {
132 target_ulong val;
133
134 switch (size) {
135 case 1:
136 val = *(uint8_t *)reg_ptr;
137 break;
138 case 2:
139 val = *(uint16_t *)reg_ptr;
140 break;
141 case 4:
142 val = *(uint32_t *)reg_ptr;
143 break;
144 case 8:
145 val = *(uint64_t *)reg_ptr;
146 break;
147 default:
148 abort();
149 }
150 return val;
151 }
152
write_val_to_reg(target_ulong reg_ptr,target_ulong val,int size)153 void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
154 {
155 switch (size) {
156 case 1:
157 *(uint8_t *)reg_ptr = val;
158 break;
159 case 2:
160 *(uint16_t *)reg_ptr = val;
161 break;
162 case 4:
163 *(uint64_t *)reg_ptr = (uint32_t)val;
164 break;
165 case 8:
166 *(uint64_t *)reg_ptr = val;
167 break;
168 default:
169 abort();
170 }
171 }
172
is_host_reg(CPUX86State * env,target_ulong ptr)173 static bool is_host_reg(CPUX86State *env, target_ulong ptr)
174 {
175 return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
176 }
177
write_val_ext(CPUX86State * env,target_ulong ptr,target_ulong val,int size)178 void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
179 {
180 if (is_host_reg(env, ptr)) {
181 write_val_to_reg(ptr, val, size);
182 return;
183 }
184 vmx_write_mem(env_cpu(env), ptr, &val, size);
185 }
186
read_mmio(CPUX86State * env,target_ulong ptr,int bytes)187 uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
188 {
189 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes);
190 return env->hvf_mmio_buf;
191 }
192
193
read_val_ext(CPUX86State * env,target_ulong ptr,int size)194 target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
195 {
196 target_ulong val;
197 uint8_t *mmio_ptr;
198
199 if (is_host_reg(env, ptr)) {
200 return read_val_from_reg(ptr, size);
201 }
202
203 mmio_ptr = read_mmio(env, ptr, size);
204 switch (size) {
205 case 1:
206 val = *(uint8_t *)mmio_ptr;
207 break;
208 case 2:
209 val = *(uint16_t *)mmio_ptr;
210 break;
211 case 4:
212 val = *(uint32_t *)mmio_ptr;
213 break;
214 case 8:
215 val = *(uint64_t *)mmio_ptr;
216 break;
217 default:
218 VM_PANIC("bad size\n");
219 break;
220 }
221 return val;
222 }
223
fetch_operands(CPUX86State * env,struct x86_decode * decode,int n,bool val_op0,bool val_op1,bool val_op2)224 static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
225 int n, bool val_op0, bool val_op1, bool val_op2)
226 {
227 int i;
228 bool calc_val[3] = {val_op0, val_op1, val_op2};
229
230 for (i = 0; i < n; i++) {
231 switch (decode->op[i].type) {
232 case X86_VAR_IMMEDIATE:
233 break;
234 case X86_VAR_REG:
235 VM_PANIC_ON(!decode->op[i].ptr);
236 if (calc_val[i]) {
237 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
238 decode->operand_size);
239 }
240 break;
241 case X86_VAR_RM:
242 calc_modrm_operand(env, decode, &decode->op[i]);
243 if (calc_val[i]) {
244 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
245 decode->operand_size);
246 }
247 break;
248 case X86_VAR_OFFSET:
249 decode->op[i].ptr = decode_linear_addr(env, decode,
250 decode->op[i].ptr,
251 R_DS);
252 if (calc_val[i]) {
253 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
254 decode->operand_size);
255 }
256 break;
257 default:
258 break;
259 }
260 }
261 }
262
exec_mov(CPUX86State * env,struct x86_decode * decode)263 static void exec_mov(CPUX86State *env, struct x86_decode *decode)
264 {
265 fetch_operands(env, decode, 2, false, true, false);
266 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
267 decode->operand_size);
268
269 env->eip += decode->len;
270 }
271
exec_add(CPUX86State * env,struct x86_decode * decode)272 static void exec_add(CPUX86State *env, struct x86_decode *decode)
273 {
274 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
275 env->eip += decode->len;
276 }
277
exec_or(CPUX86State * env,struct x86_decode * decode)278 static void exec_or(CPUX86State *env, struct x86_decode *decode)
279 {
280 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
281 env->eip += decode->len;
282 }
283
exec_adc(CPUX86State * env,struct x86_decode * decode)284 static void exec_adc(CPUX86State *env, struct x86_decode *decode)
285 {
286 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
287 env->eip += decode->len;
288 }
289
exec_sbb(CPUX86State * env,struct x86_decode * decode)290 static void exec_sbb(CPUX86State *env, struct x86_decode *decode)
291 {
292 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
293 env->eip += decode->len;
294 }
295
exec_and(CPUX86State * env,struct x86_decode * decode)296 static void exec_and(CPUX86State *env, struct x86_decode *decode)
297 {
298 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
299 env->eip += decode->len;
300 }
301
exec_sub(CPUX86State * env,struct x86_decode * decode)302 static void exec_sub(CPUX86State *env, struct x86_decode *decode)
303 {
304 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
305 env->eip += decode->len;
306 }
307
exec_xor(CPUX86State * env,struct x86_decode * decode)308 static void exec_xor(CPUX86State *env, struct x86_decode *decode)
309 {
310 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
311 env->eip += decode->len;
312 }
313
exec_neg(CPUX86State * env,struct x86_decode * decode)314 static void exec_neg(CPUX86State *env, struct x86_decode *decode)
315 {
316 /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
317 int32_t val;
318 fetch_operands(env, decode, 2, true, true, false);
319
320 val = 0 - sign(decode->op[1].val, decode->operand_size);
321 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
322
323 if (4 == decode->operand_size) {
324 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
325 } else if (2 == decode->operand_size) {
326 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
327 } else if (1 == decode->operand_size) {
328 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
329 } else {
330 VM_PANIC("bad op size\n");
331 }
332
333 /*lflags_to_rflags(env);*/
334 env->eip += decode->len;
335 }
336
exec_cmp(CPUX86State * env,struct x86_decode * decode)337 static void exec_cmp(CPUX86State *env, struct x86_decode *decode)
338 {
339 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
340 env->eip += decode->len;
341 }
342
exec_inc(CPUX86State * env,struct x86_decode * decode)343 static void exec_inc(CPUX86State *env, struct x86_decode *decode)
344 {
345 decode->op[1].type = X86_VAR_IMMEDIATE;
346 decode->op[1].val = 0;
347
348 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
349
350 env->eip += decode->len;
351 }
352
exec_dec(CPUX86State * env,struct x86_decode * decode)353 static void exec_dec(CPUX86State *env, struct x86_decode *decode)
354 {
355 decode->op[1].type = X86_VAR_IMMEDIATE;
356 decode->op[1].val = 0;
357
358 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
359 env->eip += decode->len;
360 }
361
exec_tst(CPUX86State * env,struct x86_decode * decode)362 static void exec_tst(CPUX86State *env, struct x86_decode *decode)
363 {
364 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
365 env->eip += decode->len;
366 }
367
exec_not(CPUX86State * env,struct x86_decode * decode)368 static void exec_not(CPUX86State *env, struct x86_decode *decode)
369 {
370 fetch_operands(env, decode, 1, true, false, false);
371
372 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
373 decode->operand_size);
374 env->eip += decode->len;
375 }
376
exec_movzx(CPUX86State * env,struct x86_decode * decode)377 void exec_movzx(CPUX86State *env, struct x86_decode *decode)
378 {
379 int src_op_size;
380 int op_size = decode->operand_size;
381
382 fetch_operands(env, decode, 1, false, false, false);
383
384 if (0xb6 == decode->opcode[1]) {
385 src_op_size = 1;
386 } else {
387 src_op_size = 2;
388 }
389 decode->operand_size = src_op_size;
390 calc_modrm_operand(env, decode, &decode->op[1]);
391 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
392 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
393
394 env->eip += decode->len;
395 }
396
exec_out(CPUX86State * env,struct x86_decode * decode)397 static void exec_out(CPUX86State *env, struct x86_decode *decode)
398 {
399 switch (decode->opcode[0]) {
400 case 0xe6:
401 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
402 break;
403 case 0xe7:
404 hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
405 decode->operand_size, 1);
406 break;
407 case 0xee:
408 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
409 break;
410 case 0xef:
411 hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1,
412 decode->operand_size, 1);
413 break;
414 default:
415 VM_PANIC("Bad out opcode\n");
416 break;
417 }
418 env->eip += decode->len;
419 }
420
exec_in(CPUX86State * env,struct x86_decode * decode)421 static void exec_in(CPUX86State *env, struct x86_decode *decode)
422 {
423 target_ulong val = 0;
424 switch (decode->opcode[0]) {
425 case 0xe4:
426 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
427 break;
428 case 0xe5:
429 hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0,
430 decode->operand_size, 1);
431 if (decode->operand_size == 2) {
432 AX(env) = val;
433 } else {
434 RAX(env) = (uint32_t)val;
435 }
436 break;
437 case 0xec:
438 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
439 break;
440 case 0xed:
441 hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1);
442 if (decode->operand_size == 2) {
443 AX(env) = val;
444 } else {
445 RAX(env) = (uint32_t)val;
446 }
447
448 break;
449 default:
450 VM_PANIC("Bad in opcode\n");
451 break;
452 }
453
454 env->eip += decode->len;
455 }
456
string_increment_reg(CPUX86State * env,int reg,struct x86_decode * decode)457 static inline void string_increment_reg(CPUX86State *env, int reg,
458 struct x86_decode *decode)
459 {
460 target_ulong val = read_reg(env, reg, decode->addressing_size);
461 if (env->eflags & DF_MASK) {
462 val -= decode->operand_size;
463 } else {
464 val += decode->operand_size;
465 }
466 write_reg(env, reg, val, decode->addressing_size);
467 }
468
string_rep(CPUX86State * env,struct x86_decode * decode,void (* func)(CPUX86State * env,struct x86_decode * ins),int rep)469 static inline void string_rep(CPUX86State *env, struct x86_decode *decode,
470 void (*func)(CPUX86State *env,
471 struct x86_decode *ins), int rep)
472 {
473 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
474 while (rcx--) {
475 func(env, decode);
476 write_reg(env, R_ECX, rcx, decode->addressing_size);
477 if ((PREFIX_REP == rep) && !get_ZF(env)) {
478 break;
479 }
480 if ((PREFIX_REPN == rep) && get_ZF(env)) {
481 break;
482 }
483 }
484 }
485
exec_ins_single(CPUX86State * env,struct x86_decode * decode)486 static void exec_ins_single(CPUX86State *env, struct x86_decode *decode)
487 {
488 target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
489 decode->addressing_size, R_ES);
490
491 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0,
492 decode->operand_size, 1);
493 vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf,
494 decode->operand_size);
495
496 string_increment_reg(env, R_EDI, decode);
497 }
498
exec_ins(CPUX86State * env,struct x86_decode * decode)499 static void exec_ins(CPUX86State *env, struct x86_decode *decode)
500 {
501 if (decode->rep) {
502 string_rep(env, decode, exec_ins_single, 0);
503 } else {
504 exec_ins_single(env, decode);
505 }
506
507 env->eip += decode->len;
508 }
509
exec_outs_single(CPUX86State * env,struct x86_decode * decode)510 static void exec_outs_single(CPUX86State *env, struct x86_decode *decode)
511 {
512 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
513
514 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr,
515 decode->operand_size);
516 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1,
517 decode->operand_size, 1);
518
519 string_increment_reg(env, R_ESI, decode);
520 }
521
exec_outs(CPUX86State * env,struct x86_decode * decode)522 static void exec_outs(CPUX86State *env, struct x86_decode *decode)
523 {
524 if (decode->rep) {
525 string_rep(env, decode, exec_outs_single, 0);
526 } else {
527 exec_outs_single(env, decode);
528 }
529
530 env->eip += decode->len;
531 }
532
exec_movs_single(CPUX86State * env,struct x86_decode * decode)533 static void exec_movs_single(CPUX86State *env, struct x86_decode *decode)
534 {
535 target_ulong src_addr;
536 target_ulong dst_addr;
537 target_ulong val;
538
539 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
540 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
541 decode->addressing_size, R_ES);
542
543 val = read_val_ext(env, src_addr, decode->operand_size);
544 write_val_ext(env, dst_addr, val, decode->operand_size);
545
546 string_increment_reg(env, R_ESI, decode);
547 string_increment_reg(env, R_EDI, decode);
548 }
549
exec_movs(CPUX86State * env,struct x86_decode * decode)550 static void exec_movs(CPUX86State *env, struct x86_decode *decode)
551 {
552 if (decode->rep) {
553 string_rep(env, decode, exec_movs_single, 0);
554 } else {
555 exec_movs_single(env, decode);
556 }
557
558 env->eip += decode->len;
559 }
560
exec_cmps_single(CPUX86State * env,struct x86_decode * decode)561 static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode)
562 {
563 target_ulong src_addr;
564 target_ulong dst_addr;
565
566 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
567 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
568 decode->addressing_size, R_ES);
569
570 decode->op[0].type = X86_VAR_IMMEDIATE;
571 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
572 decode->op[1].type = X86_VAR_IMMEDIATE;
573 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
574
575 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
576
577 string_increment_reg(env, R_ESI, decode);
578 string_increment_reg(env, R_EDI, decode);
579 }
580
exec_cmps(CPUX86State * env,struct x86_decode * decode)581 static void exec_cmps(CPUX86State *env, struct x86_decode *decode)
582 {
583 if (decode->rep) {
584 string_rep(env, decode, exec_cmps_single, decode->rep);
585 } else {
586 exec_cmps_single(env, decode);
587 }
588 env->eip += decode->len;
589 }
590
591
exec_stos_single(CPUX86State * env,struct x86_decode * decode)592 static void exec_stos_single(CPUX86State *env, struct x86_decode *decode)
593 {
594 target_ulong addr;
595 target_ulong val;
596
597 addr = linear_addr_size(env_cpu(env), RDI(env),
598 decode->addressing_size, R_ES);
599 val = read_reg(env, R_EAX, decode->operand_size);
600 vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size);
601
602 string_increment_reg(env, R_EDI, decode);
603 }
604
605
exec_stos(CPUX86State * env,struct x86_decode * decode)606 static void exec_stos(CPUX86State *env, struct x86_decode *decode)
607 {
608 if (decode->rep) {
609 string_rep(env, decode, exec_stos_single, 0);
610 } else {
611 exec_stos_single(env, decode);
612 }
613
614 env->eip += decode->len;
615 }
616
exec_scas_single(CPUX86State * env,struct x86_decode * decode)617 static void exec_scas_single(CPUX86State *env, struct x86_decode *decode)
618 {
619 target_ulong addr;
620
621 addr = linear_addr_size(env_cpu(env), RDI(env),
622 decode->addressing_size, R_ES);
623 decode->op[1].type = X86_VAR_IMMEDIATE;
624 vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
625
626 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
627 string_increment_reg(env, R_EDI, decode);
628 }
629
exec_scas(CPUX86State * env,struct x86_decode * decode)630 static void exec_scas(CPUX86State *env, struct x86_decode *decode)
631 {
632 decode->op[0].type = X86_VAR_REG;
633 decode->op[0].reg = R_EAX;
634 if (decode->rep) {
635 string_rep(env, decode, exec_scas_single, decode->rep);
636 } else {
637 exec_scas_single(env, decode);
638 }
639
640 env->eip += decode->len;
641 }
642
exec_lods_single(CPUX86State * env,struct x86_decode * decode)643 static void exec_lods_single(CPUX86State *env, struct x86_decode *decode)
644 {
645 target_ulong addr;
646 target_ulong val = 0;
647
648 addr = decode_linear_addr(env, decode, RSI(env), R_DS);
649 vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size);
650 write_reg(env, R_EAX, val, decode->operand_size);
651
652 string_increment_reg(env, R_ESI, decode);
653 }
654
exec_lods(CPUX86State * env,struct x86_decode * decode)655 static void exec_lods(CPUX86State *env, struct x86_decode *decode)
656 {
657 if (decode->rep) {
658 string_rep(env, decode, exec_lods_single, 0);
659 } else {
660 exec_lods_single(env, decode);
661 }
662
663 env->eip += decode->len;
664 }
665
raise_exception(CPUX86State * env,int exception_index,int error_code)666 static void raise_exception(CPUX86State *env, int exception_index,
667 int error_code)
668 {
669 env->exception_nr = exception_index;
670 env->error_code = error_code;
671 env->has_error_code = true;
672 env->exception_injected = 1;
673 }
674
simulate_rdmsr(CPUX86State * env)675 void simulate_rdmsr(CPUX86State *env)
676 {
677 X86CPU *cpu = env_archcpu(env);
678 CPUState *cs = env_cpu(env);
679 uint32_t msr = ECX(env);
680 uint64_t val = 0;
681
682 switch (msr) {
683 case MSR_IA32_TSC:
684 val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
685 break;
686 case MSR_IA32_APICBASE:
687 val = cpu_get_apic_base(cpu->apic_state);
688 break;
689 case MSR_APIC_START ... MSR_APIC_END: {
690 int ret;
691 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
692
693 ret = apic_msr_read(index, &val);
694 if (ret < 0) {
695 raise_exception(env, EXCP0D_GPF, 0);
696 }
697
698 break;
699 }
700 case MSR_IA32_UCODE_REV:
701 val = cpu->ucode_rev;
702 break;
703 case MSR_EFER:
704 val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER);
705 break;
706 case MSR_FSBASE:
707 val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE);
708 break;
709 case MSR_GSBASE:
710 val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE);
711 break;
712 case MSR_KERNELGSBASE:
713 val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE);
714 break;
715 case MSR_STAR:
716 abort();
717 break;
718 case MSR_LSTAR:
719 abort();
720 break;
721 case MSR_CSTAR:
722 abort();
723 break;
724 case MSR_IA32_MISC_ENABLE:
725 val = env->msr_ia32_misc_enable;
726 break;
727 case MSR_MTRRphysBase(0):
728 case MSR_MTRRphysBase(1):
729 case MSR_MTRRphysBase(2):
730 case MSR_MTRRphysBase(3):
731 case MSR_MTRRphysBase(4):
732 case MSR_MTRRphysBase(5):
733 case MSR_MTRRphysBase(6):
734 case MSR_MTRRphysBase(7):
735 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
736 break;
737 case MSR_MTRRphysMask(0):
738 case MSR_MTRRphysMask(1):
739 case MSR_MTRRphysMask(2):
740 case MSR_MTRRphysMask(3):
741 case MSR_MTRRphysMask(4):
742 case MSR_MTRRphysMask(5):
743 case MSR_MTRRphysMask(6):
744 case MSR_MTRRphysMask(7):
745 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
746 break;
747 case MSR_MTRRfix64K_00000:
748 val = env->mtrr_fixed[0];
749 break;
750 case MSR_MTRRfix16K_80000:
751 case MSR_MTRRfix16K_A0000:
752 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
753 break;
754 case MSR_MTRRfix4K_C0000:
755 case MSR_MTRRfix4K_C8000:
756 case MSR_MTRRfix4K_D0000:
757 case MSR_MTRRfix4K_D8000:
758 case MSR_MTRRfix4K_E0000:
759 case MSR_MTRRfix4K_E8000:
760 case MSR_MTRRfix4K_F0000:
761 case MSR_MTRRfix4K_F8000:
762 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
763 break;
764 case MSR_MTRRdefType:
765 val = env->mtrr_deftype;
766 break;
767 case MSR_CORE_THREAD_COUNT:
768 val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
769 val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
770 break;
771 default:
772 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
773 val = 0;
774 break;
775 }
776
777 RAX(env) = (uint32_t)val;
778 RDX(env) = (uint32_t)(val >> 32);
779 }
780
exec_rdmsr(CPUX86State * env,struct x86_decode * decode)781 static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode)
782 {
783 simulate_rdmsr(env);
784 env->eip += decode->len;
785 }
786
simulate_wrmsr(CPUX86State * env)787 void simulate_wrmsr(CPUX86State *env)
788 {
789 X86CPU *cpu = env_archcpu(env);
790 CPUState *cs = env_cpu(env);
791 uint32_t msr = ECX(env);
792 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
793
794 switch (msr) {
795 case MSR_IA32_TSC:
796 break;
797 case MSR_IA32_APICBASE: {
798 int r;
799
800 r = cpu_set_apic_base(cpu->apic_state, data);
801 if (r < 0) {
802 raise_exception(env, EXCP0D_GPF, 0);
803 }
804
805 break;
806 }
807 case MSR_APIC_START ... MSR_APIC_END: {
808 int ret;
809 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START;
810
811 ret = apic_msr_write(index, data);
812 if (ret < 0) {
813 raise_exception(env, EXCP0D_GPF, 0);
814 }
815
816 break;
817 }
818 case MSR_FSBASE:
819 wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data);
820 break;
821 case MSR_GSBASE:
822 wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data);
823 break;
824 case MSR_KERNELGSBASE:
825 wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data);
826 break;
827 case MSR_STAR:
828 abort();
829 break;
830 case MSR_LSTAR:
831 abort();
832 break;
833 case MSR_CSTAR:
834 abort();
835 break;
836 case MSR_EFER:
837 /*printf("new efer %llx\n", EFER(cs));*/
838 wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data);
839 if (data & MSR_EFER_NXE) {
840 hv_vcpu_invalidate_tlb(cs->accel->fd);
841 }
842 break;
843 case MSR_MTRRphysBase(0):
844 case MSR_MTRRphysBase(1):
845 case MSR_MTRRphysBase(2):
846 case MSR_MTRRphysBase(3):
847 case MSR_MTRRphysBase(4):
848 case MSR_MTRRphysBase(5):
849 case MSR_MTRRphysBase(6):
850 case MSR_MTRRphysBase(7):
851 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
852 break;
853 case MSR_MTRRphysMask(0):
854 case MSR_MTRRphysMask(1):
855 case MSR_MTRRphysMask(2):
856 case MSR_MTRRphysMask(3):
857 case MSR_MTRRphysMask(4):
858 case MSR_MTRRphysMask(5):
859 case MSR_MTRRphysMask(6):
860 case MSR_MTRRphysMask(7):
861 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
862 break;
863 case MSR_MTRRfix64K_00000:
864 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
865 break;
866 case MSR_MTRRfix16K_80000:
867 case MSR_MTRRfix16K_A0000:
868 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
869 break;
870 case MSR_MTRRfix4K_C0000:
871 case MSR_MTRRfix4K_C8000:
872 case MSR_MTRRfix4K_D0000:
873 case MSR_MTRRfix4K_D8000:
874 case MSR_MTRRfix4K_E0000:
875 case MSR_MTRRfix4K_E8000:
876 case MSR_MTRRfix4K_F0000:
877 case MSR_MTRRfix4K_F8000:
878 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
879 break;
880 case MSR_MTRRdefType:
881 env->mtrr_deftype = data;
882 break;
883 default:
884 break;
885 }
886
887 /* Related to support known hypervisor interface */
888 /* if (g_hypervisor_iface)
889 g_hypervisor_iface->wrmsr_handler(cs, msr, data);
890
891 printf("write msr %llx\n", RCX(cs));*/
892 }
893
exec_wrmsr(CPUX86State * env,struct x86_decode * decode)894 static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode)
895 {
896 simulate_wrmsr(env);
897 env->eip += decode->len;
898 }
899
900 /*
901 * flag:
902 * 0 - bt, 1 - btc, 2 - bts, 3 - btr
903 */
do_bt(CPUX86State * env,struct x86_decode * decode,int flag)904 static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
905 {
906 int32_t displacement;
907 uint8_t index;
908 bool cf;
909 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
910
911 VM_PANIC_ON(decode->rex.rex);
912
913 fetch_operands(env, decode, 2, false, true, false);
914 index = decode->op[1].val & mask;
915
916 if (decode->op[0].type != X86_VAR_REG) {
917 if (4 == decode->operand_size) {
918 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
919 decode->op[0].ptr += 4 * displacement;
920 } else if (2 == decode->operand_size) {
921 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
922 decode->op[0].ptr += 2 * displacement;
923 } else {
924 VM_PANIC("bt 64bit\n");
925 }
926 }
927 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
928 decode->operand_size);
929 cf = (decode->op[0].val >> index) & 0x01;
930
931 switch (flag) {
932 case 0:
933 set_CF(env, cf);
934 return;
935 case 1:
936 decode->op[0].val ^= (1u << index);
937 break;
938 case 2:
939 decode->op[0].val |= (1u << index);
940 break;
941 case 3:
942 decode->op[0].val &= ~(1u << index);
943 break;
944 }
945 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
946 decode->operand_size);
947 set_CF(env, cf);
948 }
949
exec_bt(CPUX86State * env,struct x86_decode * decode)950 static void exec_bt(CPUX86State *env, struct x86_decode *decode)
951 {
952 do_bt(env, decode, 0);
953 env->eip += decode->len;
954 }
955
exec_btc(CPUX86State * env,struct x86_decode * decode)956 static void exec_btc(CPUX86State *env, struct x86_decode *decode)
957 {
958 do_bt(env, decode, 1);
959 env->eip += decode->len;
960 }
961
exec_btr(CPUX86State * env,struct x86_decode * decode)962 static void exec_btr(CPUX86State *env, struct x86_decode *decode)
963 {
964 do_bt(env, decode, 3);
965 env->eip += decode->len;
966 }
967
exec_bts(CPUX86State * env,struct x86_decode * decode)968 static void exec_bts(CPUX86State *env, struct x86_decode *decode)
969 {
970 do_bt(env, decode, 2);
971 env->eip += decode->len;
972 }
973
exec_shl(CPUX86State * env,struct x86_decode * decode)974 void exec_shl(CPUX86State *env, struct x86_decode *decode)
975 {
976 uint8_t count;
977 int of = 0, cf = 0;
978
979 fetch_operands(env, decode, 2, true, true, false);
980
981 count = decode->op[1].val;
982 count &= 0x1f; /* count is masked to 5 bits*/
983 if (!count) {
984 goto exit;
985 }
986
987 switch (decode->operand_size) {
988 case 1:
989 {
990 uint8_t res = 0;
991 if (count <= 8) {
992 res = (decode->op[0].val << count);
993 cf = (decode->op[0].val >> (8 - count)) & 0x1;
994 of = cf ^ (res >> 7);
995 }
996
997 write_val_ext(env, decode->op[0].ptr, res, 1);
998 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
999 SET_FLAGS_OxxxxC(env, of, cf);
1000 break;
1001 }
1002 case 2:
1003 {
1004 uint16_t res = 0;
1005
1006 /* from bochs */
1007 if (count <= 16) {
1008 res = (decode->op[0].val << count);
1009 cf = (decode->op[0].val >> (16 - count)) & 0x1;
1010 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1011 }
1012
1013 write_val_ext(env, decode->op[0].ptr, res, 2);
1014 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
1015 SET_FLAGS_OxxxxC(env, of, cf);
1016 break;
1017 }
1018 case 4:
1019 {
1020 uint32_t res = decode->op[0].val << count;
1021
1022 write_val_ext(env, decode->op[0].ptr, res, 4);
1023 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
1024 cf = (decode->op[0].val >> (32 - count)) & 0x1;
1025 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1026 SET_FLAGS_OxxxxC(env, of, cf);
1027 break;
1028 }
1029 default:
1030 abort();
1031 }
1032
1033 exit:
1034 /* lflags_to_rflags(env); */
1035 env->eip += decode->len;
1036 }
1037
exec_movsx(CPUX86State * env,struct x86_decode * decode)1038 void exec_movsx(CPUX86State *env, struct x86_decode *decode)
1039 {
1040 int src_op_size;
1041 int op_size = decode->operand_size;
1042
1043 fetch_operands(env, decode, 2, false, false, false);
1044
1045 if (0xbe == decode->opcode[1]) {
1046 src_op_size = 1;
1047 } else {
1048 src_op_size = 2;
1049 }
1050
1051 decode->operand_size = src_op_size;
1052 calc_modrm_operand(env, decode, &decode->op[1]);
1053 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1054 src_op_size);
1055
1056 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1057
1058 env->eip += decode->len;
1059 }
1060
exec_ror(CPUX86State * env,struct x86_decode * decode)1061 void exec_ror(CPUX86State *env, struct x86_decode *decode)
1062 {
1063 uint8_t count;
1064
1065 fetch_operands(env, decode, 2, true, true, false);
1066 count = decode->op[1].val;
1067
1068 switch (decode->operand_size) {
1069 case 1:
1070 {
1071 uint32_t bit6, bit7;
1072 uint8_t res;
1073
1074 if ((count & 0x07) == 0) {
1075 if (count & 0x18) {
1076 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1077 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1078 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1079 }
1080 } else {
1081 count &= 0x7; /* use only bottom 3 bits */
1082 res = ((uint8_t)decode->op[0].val >> count) |
1083 ((uint8_t)decode->op[0].val << (8 - count));
1084 write_val_ext(env, decode->op[0].ptr, res, 1);
1085 bit6 = (res >> 6) & 1;
1086 bit7 = (res >> 7) & 1;
1087 /* set eflags: ROR count affects the following flags: C, O */
1088 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1089 }
1090 break;
1091 }
1092 case 2:
1093 {
1094 uint32_t bit14, bit15;
1095 uint16_t res;
1096
1097 if ((count & 0x0f) == 0) {
1098 if (count & 0x10) {
1099 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1100 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1101 /* of = result14 ^ result15 */
1102 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1103 }
1104 } else {
1105 count &= 0x0f; /* use only 4 LSB's */
1106 res = ((uint16_t)decode->op[0].val >> count) |
1107 ((uint16_t)decode->op[0].val << (16 - count));
1108 write_val_ext(env, decode->op[0].ptr, res, 2);
1109
1110 bit14 = (res >> 14) & 1;
1111 bit15 = (res >> 15) & 1;
1112 /* of = result14 ^ result15 */
1113 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1114 }
1115 break;
1116 }
1117 case 4:
1118 {
1119 uint32_t bit31, bit30;
1120 uint32_t res;
1121
1122 count &= 0x1f;
1123 if (count) {
1124 res = ((uint32_t)decode->op[0].val >> count) |
1125 ((uint32_t)decode->op[0].val << (32 - count));
1126 write_val_ext(env, decode->op[0].ptr, res, 4);
1127
1128 bit31 = (res >> 31) & 1;
1129 bit30 = (res >> 30) & 1;
1130 /* of = result30 ^ result31 */
1131 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1132 }
1133 break;
1134 }
1135 }
1136 env->eip += decode->len;
1137 }
1138
exec_rol(CPUX86State * env,struct x86_decode * decode)1139 void exec_rol(CPUX86State *env, struct x86_decode *decode)
1140 {
1141 uint8_t count;
1142
1143 fetch_operands(env, decode, 2, true, true, false);
1144 count = decode->op[1].val;
1145
1146 switch (decode->operand_size) {
1147 case 1:
1148 {
1149 uint32_t bit0, bit7;
1150 uint8_t res;
1151
1152 if ((count & 0x07) == 0) {
1153 if (count & 0x18) {
1154 bit0 = ((uint8_t)decode->op[0].val & 1);
1155 bit7 = ((uint8_t)decode->op[0].val >> 7);
1156 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1157 }
1158 } else {
1159 count &= 0x7; /* use only lowest 3 bits */
1160 res = ((uint8_t)decode->op[0].val << count) |
1161 ((uint8_t)decode->op[0].val >> (8 - count));
1162
1163 write_val_ext(env, decode->op[0].ptr, res, 1);
1164 /* set eflags:
1165 * ROL count affects the following flags: C, O
1166 */
1167 bit0 = (res & 1);
1168 bit7 = (res >> 7);
1169 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1170 }
1171 break;
1172 }
1173 case 2:
1174 {
1175 uint32_t bit0, bit15;
1176 uint16_t res;
1177
1178 if ((count & 0x0f) == 0) {
1179 if (count & 0x10) {
1180 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1181 bit15 = ((uint16_t)decode->op[0].val >> 15);
1182 /* of = cf ^ result15 */
1183 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1184 }
1185 } else {
1186 count &= 0x0f; /* only use bottom 4 bits */
1187 res = ((uint16_t)decode->op[0].val << count) |
1188 ((uint16_t)decode->op[0].val >> (16 - count));
1189
1190 write_val_ext(env, decode->op[0].ptr, res, 2);
1191 bit0 = (res & 0x1);
1192 bit15 = (res >> 15);
1193 /* of = cf ^ result15 */
1194 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1195 }
1196 break;
1197 }
1198 case 4:
1199 {
1200 uint32_t bit0, bit31;
1201 uint32_t res;
1202
1203 count &= 0x1f;
1204 if (count) {
1205 res = ((uint32_t)decode->op[0].val << count) |
1206 ((uint32_t)decode->op[0].val >> (32 - count));
1207
1208 write_val_ext(env, decode->op[0].ptr, res, 4);
1209 bit0 = (res & 0x1);
1210 bit31 = (res >> 31);
1211 /* of = cf ^ result31 */
1212 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1213 }
1214 break;
1215 }
1216 }
1217 env->eip += decode->len;
1218 }
1219
1220
exec_rcl(CPUX86State * env,struct x86_decode * decode)1221 void exec_rcl(CPUX86State *env, struct x86_decode *decode)
1222 {
1223 uint8_t count;
1224 int of = 0, cf = 0;
1225
1226 fetch_operands(env, decode, 2, true, true, false);
1227 count = decode->op[1].val & 0x1f;
1228
1229 switch (decode->operand_size) {
1230 case 1:
1231 {
1232 uint8_t op1_8 = decode->op[0].val;
1233 uint8_t res;
1234 count %= 9;
1235 if (!count) {
1236 break;
1237 }
1238
1239 if (1 == count) {
1240 res = (op1_8 << 1) | get_CF(env);
1241 } else {
1242 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1243 (op1_8 >> (9 - count));
1244 }
1245
1246 write_val_ext(env, decode->op[0].ptr, res, 1);
1247
1248 cf = (op1_8 >> (8 - count)) & 0x01;
1249 of = cf ^ (res >> 7); /* of = cf ^ result7 */
1250 SET_FLAGS_OxxxxC(env, of, cf);
1251 break;
1252 }
1253 case 2:
1254 {
1255 uint16_t res;
1256 uint16_t op1_16 = decode->op[0].val;
1257
1258 count %= 17;
1259 if (!count) {
1260 break;
1261 }
1262
1263 if (1 == count) {
1264 res = (op1_16 << 1) | get_CF(env);
1265 } else if (count == 16) {
1266 res = (get_CF(env) << 15) | (op1_16 >> 1);
1267 } else { /* 2..15 */
1268 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1269 (op1_16 >> (17 - count));
1270 }
1271
1272 write_val_ext(env, decode->op[0].ptr, res, 2);
1273
1274 cf = (op1_16 >> (16 - count)) & 0x1;
1275 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1276 SET_FLAGS_OxxxxC(env, of, cf);
1277 break;
1278 }
1279 case 4:
1280 {
1281 uint32_t res;
1282 uint32_t op1_32 = decode->op[0].val;
1283
1284 if (!count) {
1285 break;
1286 }
1287
1288 if (1 == count) {
1289 res = (op1_32 << 1) | get_CF(env);
1290 } else {
1291 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1292 (op1_32 >> (33 - count));
1293 }
1294
1295 write_val_ext(env, decode->op[0].ptr, res, 4);
1296
1297 cf = (op1_32 >> (32 - count)) & 0x1;
1298 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1299 SET_FLAGS_OxxxxC(env, of, cf);
1300 break;
1301 }
1302 }
1303 env->eip += decode->len;
1304 }
1305
exec_rcr(CPUX86State * env,struct x86_decode * decode)1306 void exec_rcr(CPUX86State *env, struct x86_decode *decode)
1307 {
1308 uint8_t count;
1309 int of = 0, cf = 0;
1310
1311 fetch_operands(env, decode, 2, true, true, false);
1312 count = decode->op[1].val & 0x1f;
1313
1314 switch (decode->operand_size) {
1315 case 1:
1316 {
1317 uint8_t op1_8 = decode->op[0].val;
1318 uint8_t res;
1319
1320 count %= 9;
1321 if (!count) {
1322 break;
1323 }
1324 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1325 (op1_8 << (9 - count));
1326
1327 write_val_ext(env, decode->op[0].ptr, res, 1);
1328
1329 cf = (op1_8 >> (count - 1)) & 0x1;
1330 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
1331 SET_FLAGS_OxxxxC(env, of, cf);
1332 break;
1333 }
1334 case 2:
1335 {
1336 uint16_t op1_16 = decode->op[0].val;
1337 uint16_t res;
1338
1339 count %= 17;
1340 if (!count) {
1341 break;
1342 }
1343 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1344 (op1_16 << (17 - count));
1345
1346 write_val_ext(env, decode->op[0].ptr, res, 2);
1347
1348 cf = (op1_16 >> (count - 1)) & 0x1;
1349 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
1350 result14 */
1351 SET_FLAGS_OxxxxC(env, of, cf);
1352 break;
1353 }
1354 case 4:
1355 {
1356 uint32_t res;
1357 uint32_t op1_32 = decode->op[0].val;
1358
1359 if (!count) {
1360 break;
1361 }
1362
1363 if (1 == count) {
1364 res = (op1_32 >> 1) | (get_CF(env) << 31);
1365 } else {
1366 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1367 (op1_32 << (33 - count));
1368 }
1369
1370 write_val_ext(env, decode->op[0].ptr, res, 4);
1371
1372 cf = (op1_32 >> (count - 1)) & 0x1;
1373 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
1374 SET_FLAGS_OxxxxC(env, of, cf);
1375 break;
1376 }
1377 }
1378 env->eip += decode->len;
1379 }
1380
exec_xchg(CPUX86State * env,struct x86_decode * decode)1381 static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
1382 {
1383 fetch_operands(env, decode, 2, true, true, false);
1384
1385 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1386 decode->operand_size);
1387 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1388 decode->operand_size);
1389
1390 env->eip += decode->len;
1391 }
1392
exec_xadd(CPUX86State * env,struct x86_decode * decode)1393 static void exec_xadd(CPUX86State *env, struct x86_decode *decode)
1394 {
1395 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1396 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1397 decode->operand_size);
1398
1399 env->eip += decode->len;
1400 }
1401
1402 static struct cmd_handler {
1403 enum x86_decode_cmd cmd;
1404 void (*handler)(CPUX86State *env, struct x86_decode *ins);
1405 } handlers[] = {
1406 {X86_DECODE_CMD_INVL, NULL,},
1407 {X86_DECODE_CMD_MOV, exec_mov},
1408 {X86_DECODE_CMD_ADD, exec_add},
1409 {X86_DECODE_CMD_OR, exec_or},
1410 {X86_DECODE_CMD_ADC, exec_adc},
1411 {X86_DECODE_CMD_SBB, exec_sbb},
1412 {X86_DECODE_CMD_AND, exec_and},
1413 {X86_DECODE_CMD_SUB, exec_sub},
1414 {X86_DECODE_CMD_NEG, exec_neg},
1415 {X86_DECODE_CMD_XOR, exec_xor},
1416 {X86_DECODE_CMD_CMP, exec_cmp},
1417 {X86_DECODE_CMD_INC, exec_inc},
1418 {X86_DECODE_CMD_DEC, exec_dec},
1419 {X86_DECODE_CMD_TST, exec_tst},
1420 {X86_DECODE_CMD_NOT, exec_not},
1421 {X86_DECODE_CMD_MOVZX, exec_movzx},
1422 {X86_DECODE_CMD_OUT, exec_out},
1423 {X86_DECODE_CMD_IN, exec_in},
1424 {X86_DECODE_CMD_INS, exec_ins},
1425 {X86_DECODE_CMD_OUTS, exec_outs},
1426 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1427 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1428 {X86_DECODE_CMD_BT, exec_bt},
1429 {X86_DECODE_CMD_BTR, exec_btr},
1430 {X86_DECODE_CMD_BTC, exec_btc},
1431 {X86_DECODE_CMD_BTS, exec_bts},
1432 {X86_DECODE_CMD_SHL, exec_shl},
1433 {X86_DECODE_CMD_ROL, exec_rol},
1434 {X86_DECODE_CMD_ROR, exec_ror},
1435 {X86_DECODE_CMD_RCR, exec_rcr},
1436 {X86_DECODE_CMD_RCL, exec_rcl},
1437 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
1438 {X86_DECODE_CMD_MOVS, exec_movs},
1439 {X86_DECODE_CMD_CMPS, exec_cmps},
1440 {X86_DECODE_CMD_STOS, exec_stos},
1441 {X86_DECODE_CMD_SCAS, exec_scas},
1442 {X86_DECODE_CMD_LODS, exec_lods},
1443 {X86_DECODE_CMD_MOVSX, exec_movsx},
1444 {X86_DECODE_CMD_XCHG, exec_xchg},
1445 {X86_DECODE_CMD_XADD, exec_xadd},
1446 };
1447
1448 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1449
init_cmd_handler(void)1450 static void init_cmd_handler(void)
1451 {
1452 int i;
1453 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1454 _cmd_handler[handlers[i].cmd] = handlers[i];
1455 }
1456 }
1457
load_regs(CPUState * cs)1458 void load_regs(CPUState *cs)
1459 {
1460 X86CPU *cpu = X86_CPU(cs);
1461 CPUX86State *env = &cpu->env;
1462
1463 int i = 0;
1464 RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX);
1465 RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX);
1466 RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX);
1467 RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX);
1468 RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI);
1469 RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI);
1470 RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP);
1471 RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP);
1472 for (i = 8; i < 16; i++) {
1473 RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i);
1474 }
1475
1476 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
1477 rflags_to_lflags(env);
1478 env->eip = rreg(cs->accel->fd, HV_X86_RIP);
1479 }
1480
store_regs(CPUState * cs)1481 void store_regs(CPUState *cs)
1482 {
1483 X86CPU *cpu = X86_CPU(cs);
1484 CPUX86State *env = &cpu->env;
1485
1486 int i = 0;
1487 wreg(cs->accel->fd, HV_X86_RAX, RAX(env));
1488 wreg(cs->accel->fd, HV_X86_RBX, RBX(env));
1489 wreg(cs->accel->fd, HV_X86_RCX, RCX(env));
1490 wreg(cs->accel->fd, HV_X86_RDX, RDX(env));
1491 wreg(cs->accel->fd, HV_X86_RSI, RSI(env));
1492 wreg(cs->accel->fd, HV_X86_RDI, RDI(env));
1493 wreg(cs->accel->fd, HV_X86_RBP, RBP(env));
1494 wreg(cs->accel->fd, HV_X86_RSP, RSP(env));
1495 for (i = 8; i < 16; i++) {
1496 wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i));
1497 }
1498
1499 lflags_to_rflags(env);
1500 wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags);
1501 macvm_set_rip(cs, env->eip);
1502 }
1503
exec_instruction(CPUX86State * env,struct x86_decode * ins)1504 bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
1505 {
1506 /*if (hvf_vcpu_id(cs))
1507 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip,
1508 decode_cmd_to_string(ins->cmd));*/
1509
1510 if (!_cmd_handler[ins->cmd].handler) {
1511 printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
1512 ins->cmd, ins->opcode[0],
1513 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1514 env->eip += ins->len;
1515 return true;
1516 }
1517
1518 _cmd_handler[ins->cmd].handler(env, ins);
1519 return true;
1520 }
1521
init_emu(void)1522 void init_emu(void)
1523 {
1524 init_cmd_handler();
1525 }
1526