1 /* 2 * vm86 linux syscall support 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <errno.h> 25 #include <unistd.h> 26 27 #include "qemu.h" 28 29 //#define DEBUG_VM86 30 31 #define set_flags(X,new,mask) \ 32 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 33 34 #define SAFE_MASK (0xDD5) 35 #define RETURN_MASK (0xDFF) 36 37 static inline int is_revectored(int nr, struct target_revectored_struct *bitmap) 38 { 39 return (((uint8_t *)bitmap)[nr >> 3] >> (nr & 7)) & 1; 40 } 41 42 static inline void vm_putw(uint8_t *segptr, unsigned int reg16, unsigned int val) 43 { 44 stw(segptr + (reg16 & 0xffff), val); 45 } 46 47 static inline void vm_putl(uint8_t *segptr, unsigned int reg16, unsigned int val) 48 { 49 stl(segptr + (reg16 & 0xffff), val); 50 } 51 52 static inline unsigned int vm_getw(uint8_t *segptr, unsigned int reg16) 53 { 54 return lduw(segptr + (reg16 & 0xffff)); 55 } 56 57 static inline unsigned int vm_getl(uint8_t *segptr, unsigned int reg16) 58 { 59 return ldl(segptr + (reg16 & 0xffff)); 60 } 61 62 void save_v86_state(CPUX86State *env) 63 { 64 TaskState *ts = env->opaque; 65 66 /* put the VM86 registers in the userspace register structure */ 67 ts->target_v86->regs.eax = tswap32(env->regs[R_EAX]); 68 ts->target_v86->regs.ebx = tswap32(env->regs[R_EBX]); 69 ts->target_v86->regs.ecx = tswap32(env->regs[R_ECX]); 70 ts->target_v86->regs.edx = tswap32(env->regs[R_EDX]); 71 ts->target_v86->regs.esi = tswap32(env->regs[R_ESI]); 72 ts->target_v86->regs.edi = tswap32(env->regs[R_EDI]); 73 ts->target_v86->regs.ebp = tswap32(env->regs[R_EBP]); 74 ts->target_v86->regs.esp = tswap32(env->regs[R_ESP]); 75 ts->target_v86->regs.eip = tswap32(env->eip); 76 ts->target_v86->regs.cs = tswap16(env->segs[R_CS].selector); 77 ts->target_v86->regs.ss = tswap16(env->segs[R_SS].selector); 78 ts->target_v86->regs.ds = tswap16(env->segs[R_DS].selector); 79 ts->target_v86->regs.es = tswap16(env->segs[R_ES].selector); 80 ts->target_v86->regs.fs = tswap16(env->segs[R_FS].selector); 81 ts->target_v86->regs.gs = tswap16(env->segs[R_GS].selector); 82 set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask); 83 ts->target_v86->regs.eflags = tswap32(env->eflags); 84 #ifdef DEBUG_VM86 85 fprintf(logfile, "save_v86_state: eflags=%08x cs:ip=%04x:%04x\n", 86 env->eflags, env->segs[R_CS].selector, env->eip); 87 #endif 88 89 /* restore 32 bit registers */ 90 env->regs[R_EAX] = ts->vm86_saved_regs.eax; 91 env->regs[R_EBX] = ts->vm86_saved_regs.ebx; 92 env->regs[R_ECX] = ts->vm86_saved_regs.ecx; 93 env->regs[R_EDX] = ts->vm86_saved_regs.edx; 94 env->regs[R_ESI] = ts->vm86_saved_regs.esi; 95 env->regs[R_EDI] = ts->vm86_saved_regs.edi; 96 env->regs[R_EBP] = ts->vm86_saved_regs.ebp; 97 env->regs[R_ESP] = ts->vm86_saved_regs.esp; 98 env->eflags = ts->vm86_saved_regs.eflags; 99 env->eip = ts->vm86_saved_regs.eip; 100 101 cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs); 102 cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss); 103 cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds); 104 cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es); 105 cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs); 106 cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs); 107 } 108 109 /* return from vm86 mode to 32 bit. The vm86() syscall will return 110 'retval' */ 111 static inline void return_to_32bit(CPUX86State *env, int retval) 112 { 113 #ifdef DEBUG_VM86 114 fprintf(logfile, "return_to_32bit: ret=0x%x\n", retval); 115 #endif 116 save_v86_state(env); 117 env->regs[R_EAX] = retval; 118 } 119 120 static inline int set_IF(CPUX86State *env) 121 { 122 TaskState *ts = env->opaque; 123 124 ts->v86flags |= VIF_MASK; 125 if (ts->v86flags & VIP_MASK) { 126 return_to_32bit(env, TARGET_VM86_STI); 127 return 1; 128 } 129 return 0; 130 } 131 132 static inline void clear_IF(CPUX86State *env) 133 { 134 TaskState *ts = env->opaque; 135 136 ts->v86flags &= ~VIF_MASK; 137 } 138 139 static inline void clear_TF(CPUX86State *env) 140 { 141 env->eflags &= ~TF_MASK; 142 } 143 144 static inline void clear_AC(CPUX86State *env) 145 { 146 env->eflags &= ~AC_MASK; 147 } 148 149 static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) 150 { 151 TaskState *ts = env->opaque; 152 153 set_flags(ts->v86flags, eflags, ts->v86mask); 154 set_flags(env->eflags, eflags, SAFE_MASK); 155 if (eflags & IF_MASK) 156 return set_IF(env); 157 else 158 clear_IF(env); 159 return 0; 160 } 161 162 static inline int set_vflags_short(unsigned short flags, CPUX86State *env) 163 { 164 TaskState *ts = env->opaque; 165 166 set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); 167 set_flags(env->eflags, flags, SAFE_MASK); 168 if (flags & IF_MASK) 169 return set_IF(env); 170 else 171 clear_IF(env); 172 return 0; 173 } 174 175 static inline unsigned int get_vflags(CPUX86State *env) 176 { 177 TaskState *ts = env->opaque; 178 unsigned int flags; 179 180 flags = env->eflags & RETURN_MASK; 181 if (ts->v86flags & VIF_MASK) 182 flags |= IF_MASK; 183 flags |= IOPL_MASK; 184 return flags | (ts->v86flags & ts->v86mask); 185 } 186 187 #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff) 188 189 /* handle VM86 interrupt (NOTE: the CPU core currently does not 190 support TSS interrupt revectoring, so this code is always executed) */ 191 static void do_int(CPUX86State *env, int intno) 192 { 193 TaskState *ts = env->opaque; 194 uint32_t *int_ptr, segoffs; 195 uint8_t *ssp; 196 unsigned int sp; 197 198 if (env->segs[R_CS].selector == TARGET_BIOSSEG) 199 goto cannot_handle; 200 if (is_revectored(intno, &ts->vm86plus.int_revectored)) 201 goto cannot_handle; 202 if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff, 203 &ts->vm86plus.int21_revectored)) 204 goto cannot_handle; 205 int_ptr = (uint32_t *)(intno << 2); 206 segoffs = tswap32(*int_ptr); 207 if ((segoffs >> 16) == TARGET_BIOSSEG) 208 goto cannot_handle; 209 #if defined(DEBUG_VM86) 210 fprintf(logfile, "VM86: emulating int 0x%x. CS:IP=%04x:%04x\n", 211 intno, segoffs >> 16, segoffs & 0xffff); 212 #endif 213 /* save old state */ 214 ssp = (uint8_t *)(env->segs[R_SS].selector << 4); 215 sp = env->regs[R_ESP] & 0xffff; 216 vm_putw(ssp, sp - 2, get_vflags(env)); 217 vm_putw(ssp, sp - 4, env->segs[R_CS].selector); 218 vm_putw(ssp, sp - 6, env->eip); 219 ADD16(env->regs[R_ESP], -6); 220 /* goto interrupt handler */ 221 env->eip = segoffs & 0xffff; 222 cpu_x86_load_seg(env, R_CS, segoffs >> 16); 223 clear_TF(env); 224 clear_IF(env); 225 clear_AC(env); 226 return; 227 cannot_handle: 228 #if defined(DEBUG_VM86) 229 fprintf(logfile, "VM86: return to 32 bits int 0x%x\n", intno); 230 #endif 231 return_to_32bit(env, TARGET_VM86_INTx | (intno << 8)); 232 } 233 234 void handle_vm86_trap(CPUX86State *env, int trapno) 235 { 236 if (trapno == 1 || trapno == 3) { 237 return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8)); 238 } else { 239 do_int(env, trapno); 240 } 241 } 242 243 #define CHECK_IF_IN_TRAP() \ 244 if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \ 245 (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \ 246 newflags |= TF_MASK 247 248 #define VM86_FAULT_RETURN \ 249 if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \ 250 (ts->v86flags & (IF_MASK | VIF_MASK))) \ 251 return_to_32bit(env, TARGET_VM86_PICRETURN); \ 252 return 253 254 void handle_vm86_fault(CPUX86State *env) 255 { 256 TaskState *ts = env->opaque; 257 uint8_t *csp, *pc, *ssp; 258 unsigned int ip, sp, newflags, newip, newcs, opcode, intno; 259 int data32, pref_done; 260 261 csp = (uint8_t *)(env->segs[R_CS].selector << 4); 262 ip = env->eip & 0xffff; 263 pc = csp + ip; 264 265 ssp = (uint8_t *)(env->segs[R_SS].selector << 4); 266 sp = env->regs[R_ESP] & 0xffff; 267 268 #if defined(DEBUG_VM86) 269 fprintf(logfile, "VM86 exception %04x:%08x %02x %02x\n", 270 env->segs[R_CS].selector, env->eip, pc[0], pc[1]); 271 #endif 272 273 data32 = 0; 274 pref_done = 0; 275 do { 276 opcode = csp[ip]; 277 ADD16(ip, 1); 278 switch (opcode) { 279 case 0x66: /* 32-bit data */ data32=1; break; 280 case 0x67: /* 32-bit address */ break; 281 case 0x2e: /* CS */ break; 282 case 0x3e: /* DS */ break; 283 case 0x26: /* ES */ break; 284 case 0x36: /* SS */ break; 285 case 0x65: /* GS */ break; 286 case 0x64: /* FS */ break; 287 case 0xf2: /* repnz */ break; 288 case 0xf3: /* rep */ break; 289 default: pref_done = 1; 290 } 291 } while (!pref_done); 292 293 /* VM86 mode */ 294 switch(opcode) { 295 case 0x9c: /* pushf */ 296 if (data32) { 297 vm_putl(ssp, sp - 4, get_vflags(env)); 298 ADD16(env->regs[R_ESP], -4); 299 } else { 300 vm_putw(ssp, sp - 2, get_vflags(env)); 301 ADD16(env->regs[R_ESP], -2); 302 } 303 env->eip = ip; 304 VM86_FAULT_RETURN; 305 306 case 0x9d: /* popf */ 307 if (data32) { 308 newflags = vm_getl(ssp, sp); 309 ADD16(env->regs[R_ESP], 4); 310 } else { 311 newflags = vm_getw(ssp, sp); 312 ADD16(env->regs[R_ESP], 2); 313 } 314 env->eip = ip; 315 CHECK_IF_IN_TRAP(); 316 if (data32) { 317 if (set_vflags_long(newflags, env)) 318 return; 319 } else { 320 if (set_vflags_short(newflags, env)) 321 return; 322 } 323 VM86_FAULT_RETURN; 324 325 case 0xcd: /* int */ 326 intno = csp[ip]; 327 ADD16(ip, 1); 328 env->eip = ip; 329 if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) { 330 if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >> 331 (intno &7)) & 1) { 332 return_to_32bit(env, TARGET_VM86_INTx + (intno << 8)); 333 return; 334 } 335 } 336 do_int(env, intno); 337 break; 338 339 case 0xcf: /* iret */ 340 if (data32) { 341 newip = vm_getl(ssp, sp) & 0xffff; 342 newcs = vm_getl(ssp, sp + 4) & 0xffff; 343 newflags = vm_getl(ssp, sp + 8); 344 ADD16(env->regs[R_ESP], 12); 345 } else { 346 newip = vm_getw(ssp, sp); 347 newcs = vm_getw(ssp, sp + 2); 348 newflags = vm_getw(ssp, sp + 4); 349 ADD16(env->regs[R_ESP], 6); 350 } 351 env->eip = newip; 352 cpu_x86_load_seg(env, R_CS, newcs); 353 CHECK_IF_IN_TRAP(); 354 if (data32) { 355 if (set_vflags_long(newflags, env)) 356 return; 357 } else { 358 if (set_vflags_short(newflags, env)) 359 return; 360 } 361 VM86_FAULT_RETURN; 362 363 case 0xfa: /* cli */ 364 env->eip = ip; 365 clear_IF(env); 366 VM86_FAULT_RETURN; 367 368 case 0xfb: /* sti */ 369 env->eip = ip; 370 if (set_IF(env)) 371 return; 372 VM86_FAULT_RETURN; 373 374 default: 375 /* real VM86 GPF exception */ 376 return_to_32bit(env, TARGET_VM86_UNKNOWN); 377 break; 378 } 379 } 380 381 int do_vm86(CPUX86State *env, long subfunction, 382 struct target_vm86plus_struct * target_v86) 383 { 384 TaskState *ts = env->opaque; 385 int ret; 386 387 switch (subfunction) { 388 case TARGET_VM86_REQUEST_IRQ: 389 case TARGET_VM86_FREE_IRQ: 390 case TARGET_VM86_GET_IRQ_BITS: 391 case TARGET_VM86_GET_AND_RESET_IRQ: 392 gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction); 393 ret = -EINVAL; 394 goto out; 395 case TARGET_VM86_PLUS_INSTALL_CHECK: 396 /* NOTE: on old vm86 stuff this will return the error 397 from verify_area(), because the subfunction is 398 interpreted as (invalid) address to vm86_struct. 399 So the installation check works. 400 */ 401 ret = 0; 402 goto out; 403 } 404 405 ts->target_v86 = target_v86; 406 /* save current CPU regs */ 407 ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */ 408 ts->vm86_saved_regs.ebx = env->regs[R_EBX]; 409 ts->vm86_saved_regs.ecx = env->regs[R_ECX]; 410 ts->vm86_saved_regs.edx = env->regs[R_EDX]; 411 ts->vm86_saved_regs.esi = env->regs[R_ESI]; 412 ts->vm86_saved_regs.edi = env->regs[R_EDI]; 413 ts->vm86_saved_regs.ebp = env->regs[R_EBP]; 414 ts->vm86_saved_regs.esp = env->regs[R_ESP]; 415 ts->vm86_saved_regs.eflags = env->eflags; 416 ts->vm86_saved_regs.eip = env->eip; 417 ts->vm86_saved_regs.cs = env->segs[R_CS].selector; 418 ts->vm86_saved_regs.ss = env->segs[R_SS].selector; 419 ts->vm86_saved_regs.ds = env->segs[R_DS].selector; 420 ts->vm86_saved_regs.es = env->segs[R_ES].selector; 421 ts->vm86_saved_regs.fs = env->segs[R_FS].selector; 422 ts->vm86_saved_regs.gs = env->segs[R_GS].selector; 423 424 /* build vm86 CPU state */ 425 ts->v86flags = tswap32(target_v86->regs.eflags); 426 env->eflags = (env->eflags & ~SAFE_MASK) | 427 (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK; 428 429 ts->vm86plus.cpu_type = tswapl(target_v86->cpu_type); 430 switch (ts->vm86plus.cpu_type) { 431 case TARGET_CPU_286: 432 ts->v86mask = 0; 433 break; 434 case TARGET_CPU_386: 435 ts->v86mask = NT_MASK | IOPL_MASK; 436 break; 437 case TARGET_CPU_486: 438 ts->v86mask = AC_MASK | NT_MASK | IOPL_MASK; 439 break; 440 default: 441 ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; 442 break; 443 } 444 445 env->regs[R_EBX] = tswap32(target_v86->regs.ebx); 446 env->regs[R_ECX] = tswap32(target_v86->regs.ecx); 447 env->regs[R_EDX] = tswap32(target_v86->regs.edx); 448 env->regs[R_ESI] = tswap32(target_v86->regs.esi); 449 env->regs[R_EDI] = tswap32(target_v86->regs.edi); 450 env->regs[R_EBP] = tswap32(target_v86->regs.ebp); 451 env->regs[R_ESP] = tswap32(target_v86->regs.esp); 452 env->eip = tswap32(target_v86->regs.eip); 453 cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs)); 454 cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss)); 455 cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds)); 456 cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es)); 457 cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs)); 458 cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs)); 459 ret = tswap32(target_v86->regs.eax); /* eax will be restored at 460 the end of the syscall */ 461 memcpy(&ts->vm86plus.int_revectored, 462 &target_v86->int_revectored, 32); 463 memcpy(&ts->vm86plus.int21_revectored, 464 &target_v86->int21_revectored, 32); 465 ts->vm86plus.vm86plus.flags = tswapl(target_v86->vm86plus.flags); 466 memcpy(&ts->vm86plus.vm86plus.vm86dbg_intxxtab, 467 target_v86->vm86plus.vm86dbg_intxxtab, 32); 468 469 #ifdef DEBUG_VM86 470 fprintf(logfile, "do_vm86: cs:ip=%04x:%04x\n", 471 env->segs[R_CS].selector, env->eip); 472 #endif 473 /* now the virtual CPU is ready for vm86 execution ! */ 474 out: 475 return ret; 476 } 477 478