1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 6 * stack - Manfred Spraul <manfred@colorfullife.com> 7 * 8 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 9 * them correctly. Now the emulation will be in a 10 * consistent state after stackfaults - Kasper Dupont 11 * <kasperd@daimi.au.dk> 12 * 13 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont 14 * <kasperd@daimi.au.dk> 15 * 16 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault 17 * caused by Kasper Dupont's changes - Stas Sergeev 18 * 19 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. 20 * Kasper Dupont <kasperd@daimi.au.dk> 21 * 22 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. 23 * Kasper Dupont <kasperd@daimi.au.dk> 24 * 25 * 9 apr 2002 - Changed stack access macros to jump to a label 26 * instead of returning to userspace. This simplifies 27 * do_int, and is needed by handle_vm6_fault. Kasper 28 * Dupont <kasperd@daimi.au.dk> 29 * 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/capability.h> 35 #include <linux/errno.h> 36 #include <linux/interrupt.h> 37 #include <linux/syscalls.h> 38 #include <linux/sched.h> 39 #include <linux/sched/task_stack.h> 40 #include <linux/kernel.h> 41 #include <linux/signal.h> 42 #include <linux/string.h> 43 #include <linux/mm.h> 44 #include <linux/smp.h> 45 #include <linux/highmem.h> 46 #include <linux/ptrace.h> 47 #include <linux/audit.h> 48 #include <linux/stddef.h> 49 #include <linux/slab.h> 50 #include <linux/security.h> 51 52 #include <linux/uaccess.h> 53 #include <asm/io.h> 54 #include <asm/tlbflush.h> 55 #include <asm/irq.h> 56 #include <asm/traps.h> 57 #include <asm/vm86.h> 58 #include <asm/switch_to.h> 59 60 /* 61 * Known problems: 62 * 63 * Interrupt handling is not guaranteed: 64 * - a real x86 will disable all interrupts for one instruction 65 * after a "mov ss,xx" to make stack handling atomic even without 66 * the 'lss' instruction. We can't guarantee this in v86 mode, 67 * as the next instruction might result in a page fault or similar. 68 * - a real x86 will have interrupts disabled for one instruction 69 * past the 'sti' that enables them. We don't bother with all the 70 * details yet. 71 * 72 * Let's hope these problems do not actually matter for anything. 73 */ 74 75 76 /* 77 * 8- and 16-bit register defines.. 78 */ 79 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) 80 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) 81 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) 82 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) 83 84 /* 85 * virtual flags (16 and 32-bit versions) 86 */ 87 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) 88 #define VEFLAGS (current->thread.vm86->veflags) 89 90 #define set_flags(X, new, mask) \ 91 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 92 93 #define SAFE_MASK (0xDD5) 94 #define RETURN_MASK (0xDFF) 95 96 void save_v86_state(struct kernel_vm86_regs *regs, int retval) 97 { 98 struct task_struct *tsk = current; 99 struct vm86plus_struct __user *user; 100 struct vm86 *vm86 = current->thread.vm86; 101 long err = 0; 102 103 /* 104 * This gets called from entry.S with interrupts disabled, but 105 * from process context. Enable interrupts here, before trying 106 * to access user space. 107 */ 108 local_irq_enable(); 109 110 if (!vm86 || !vm86->user_vm86) { 111 pr_alert("no user_vm86: BAD\n"); 112 do_exit(SIGSEGV); 113 } 114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); 115 user = vm86->user_vm86; 116 117 if (!access_ok(user, vm86->vm86plus.is_vm86pus ? 118 sizeof(struct vm86plus_struct) : 119 sizeof(struct vm86_struct))) { 120 pr_alert("could not access userspace vm86 info\n"); 121 do_exit(SIGSEGV); 122 } 123 124 put_user_try { 125 put_user_ex(regs->pt.bx, &user->regs.ebx); 126 put_user_ex(regs->pt.cx, &user->regs.ecx); 127 put_user_ex(regs->pt.dx, &user->regs.edx); 128 put_user_ex(regs->pt.si, &user->regs.esi); 129 put_user_ex(regs->pt.di, &user->regs.edi); 130 put_user_ex(regs->pt.bp, &user->regs.ebp); 131 put_user_ex(regs->pt.ax, &user->regs.eax); 132 put_user_ex(regs->pt.ip, &user->regs.eip); 133 put_user_ex(regs->pt.cs, &user->regs.cs); 134 put_user_ex(regs->pt.flags, &user->regs.eflags); 135 put_user_ex(regs->pt.sp, &user->regs.esp); 136 put_user_ex(regs->pt.ss, &user->regs.ss); 137 put_user_ex(regs->es, &user->regs.es); 138 put_user_ex(regs->ds, &user->regs.ds); 139 put_user_ex(regs->fs, &user->regs.fs); 140 put_user_ex(regs->gs, &user->regs.gs); 141 142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); 143 } put_user_catch(err); 144 if (err) { 145 pr_alert("could not access userspace vm86 info\n"); 146 do_exit(SIGSEGV); 147 } 148 149 preempt_disable(); 150 tsk->thread.sp0 = vm86->saved_sp0; 151 tsk->thread.sysenter_cs = __KERNEL_CS; 152 update_task_stack(tsk); 153 refresh_sysenter_cs(&tsk->thread); 154 vm86->saved_sp0 = 0; 155 preempt_enable(); 156 157 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); 158 159 lazy_load_gs(vm86->regs32.gs); 160 161 regs->pt.ax = retval; 162 } 163 164 static void mark_screen_rdonly(struct mm_struct *mm) 165 { 166 struct vm_area_struct *vma; 167 spinlock_t *ptl; 168 pgd_t *pgd; 169 p4d_t *p4d; 170 pud_t *pud; 171 pmd_t *pmd; 172 pte_t *pte; 173 int i; 174 175 down_write(&mm->mmap_sem); 176 pgd = pgd_offset(mm, 0xA0000); 177 if (pgd_none_or_clear_bad(pgd)) 178 goto out; 179 p4d = p4d_offset(pgd, 0xA0000); 180 if (p4d_none_or_clear_bad(p4d)) 181 goto out; 182 pud = pud_offset(p4d, 0xA0000); 183 if (pud_none_or_clear_bad(pud)) 184 goto out; 185 pmd = pmd_offset(pud, 0xA0000); 186 187 if (pmd_trans_huge(*pmd)) { 188 vma = find_vma(mm, 0xA0000); 189 split_huge_pmd(vma, pmd, 0xA0000); 190 } 191 if (pmd_none_or_clear_bad(pmd)) 192 goto out; 193 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); 194 for (i = 0; i < 32; i++) { 195 if (pte_present(*pte)) 196 set_pte(pte, pte_wrprotect(*pte)); 197 pte++; 198 } 199 pte_unmap_unlock(pte, ptl); 200 out: 201 up_write(&mm->mmap_sem); 202 flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false); 203 } 204 205 206 207 static int do_vm86_irq_handling(int subfunction, int irqnumber); 208 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); 209 210 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) 211 { 212 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); 213 } 214 215 216 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) 217 { 218 switch (cmd) { 219 case VM86_REQUEST_IRQ: 220 case VM86_FREE_IRQ: 221 case VM86_GET_IRQ_BITS: 222 case VM86_GET_AND_RESET_IRQ: 223 return do_vm86_irq_handling(cmd, (int)arg); 224 case VM86_PLUS_INSTALL_CHECK: 225 /* 226 * NOTE: on old vm86 stuff this will return the error 227 * from access_ok(), because the subfunction is 228 * interpreted as (invalid) address to vm86_struct. 229 * So the installation check works. 230 */ 231 return 0; 232 } 233 234 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 235 return do_sys_vm86((struct vm86plus_struct __user *) arg, true); 236 } 237 238 239 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) 240 { 241 struct task_struct *tsk = current; 242 struct vm86 *vm86 = tsk->thread.vm86; 243 struct kernel_vm86_regs vm86regs; 244 struct pt_regs *regs = current_pt_regs(); 245 unsigned long err = 0; 246 struct vm86_struct v; 247 248 err = security_mmap_addr(0); 249 if (err) { 250 /* 251 * vm86 cannot virtualize the address space, so vm86 users 252 * need to manage the low 1MB themselves using mmap. Given 253 * that BIOS places important data in the first page, vm86 254 * is essentially useless if mmap_min_addr != 0. DOSEMU, 255 * for example, won't even bother trying to use vm86 if it 256 * can't map a page at virtual address 0. 257 * 258 * To reduce the available kernel attack surface, simply 259 * disallow vm86(old) for users who cannot mmap at va 0. 260 * 261 * The implementation of security_mmap_addr will allow 262 * suitably privileged users to map va 0 even if 263 * vm.mmap_min_addr is set above 0, and we want this 264 * behavior for vm86 as well, as it ensures that legacy 265 * tools like vbetool will not fail just because of 266 * vm.mmap_min_addr. 267 */ 268 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", 269 current->comm, task_pid_nr(current), 270 from_kuid_munged(&init_user_ns, current_uid())); 271 return -EPERM; 272 } 273 274 if (!vm86) { 275 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) 276 return -ENOMEM; 277 tsk->thread.vm86 = vm86; 278 } 279 if (vm86->saved_sp0) 280 return -EPERM; 281 282 if (copy_from_user(&v, user_vm86, 283 offsetof(struct vm86_struct, int_revectored))) 284 return -EFAULT; 285 286 memset(&vm86regs, 0, sizeof(vm86regs)); 287 288 vm86regs.pt.bx = v.regs.ebx; 289 vm86regs.pt.cx = v.regs.ecx; 290 vm86regs.pt.dx = v.regs.edx; 291 vm86regs.pt.si = v.regs.esi; 292 vm86regs.pt.di = v.regs.edi; 293 vm86regs.pt.bp = v.regs.ebp; 294 vm86regs.pt.ax = v.regs.eax; 295 vm86regs.pt.ip = v.regs.eip; 296 vm86regs.pt.cs = v.regs.cs; 297 vm86regs.pt.flags = v.regs.eflags; 298 vm86regs.pt.sp = v.regs.esp; 299 vm86regs.pt.ss = v.regs.ss; 300 vm86regs.es = v.regs.es; 301 vm86regs.ds = v.regs.ds; 302 vm86regs.fs = v.regs.fs; 303 vm86regs.gs = v.regs.gs; 304 305 vm86->flags = v.flags; 306 vm86->screen_bitmap = v.screen_bitmap; 307 vm86->cpu_type = v.cpu_type; 308 309 if (copy_from_user(&vm86->int_revectored, 310 &user_vm86->int_revectored, 311 sizeof(struct revectored_struct))) 312 return -EFAULT; 313 if (copy_from_user(&vm86->int21_revectored, 314 &user_vm86->int21_revectored, 315 sizeof(struct revectored_struct))) 316 return -EFAULT; 317 if (plus) { 318 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, 319 sizeof(struct vm86plus_info_struct))) 320 return -EFAULT; 321 vm86->vm86plus.is_vm86pus = 1; 322 } else 323 memset(&vm86->vm86plus, 0, 324 sizeof(struct vm86plus_info_struct)); 325 326 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); 327 vm86->user_vm86 = user_vm86; 328 329 /* 330 * The flags register is also special: we cannot trust that the user 331 * has set it up safely, so this makes sure interrupt etc flags are 332 * inherited from protected mode. 333 */ 334 VEFLAGS = vm86regs.pt.flags; 335 vm86regs.pt.flags &= SAFE_MASK; 336 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; 337 vm86regs.pt.flags |= X86_VM_MASK; 338 339 vm86regs.pt.orig_ax = regs->orig_ax; 340 341 switch (vm86->cpu_type) { 342 case CPU_286: 343 vm86->veflags_mask = 0; 344 break; 345 case CPU_386: 346 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; 347 break; 348 case CPU_486: 349 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 350 break; 351 default: 352 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 353 break; 354 } 355 356 /* 357 * Save old state 358 */ 359 vm86->saved_sp0 = tsk->thread.sp0; 360 lazy_save_gs(vm86->regs32.gs); 361 362 /* make room for real-mode segments */ 363 preempt_disable(); 364 tsk->thread.sp0 += 16; 365 366 if (boot_cpu_has(X86_FEATURE_SEP)) { 367 tsk->thread.sysenter_cs = 0; 368 refresh_sysenter_cs(&tsk->thread); 369 } 370 371 update_task_stack(tsk); 372 preempt_enable(); 373 374 if (vm86->flags & VM86_SCREEN_BITMAP) 375 mark_screen_rdonly(tsk->mm); 376 377 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); 378 return regs->ax; 379 } 380 381 static inline void set_IF(struct kernel_vm86_regs *regs) 382 { 383 VEFLAGS |= X86_EFLAGS_VIF; 384 } 385 386 static inline void clear_IF(struct kernel_vm86_regs *regs) 387 { 388 VEFLAGS &= ~X86_EFLAGS_VIF; 389 } 390 391 static inline void clear_TF(struct kernel_vm86_regs *regs) 392 { 393 regs->pt.flags &= ~X86_EFLAGS_TF; 394 } 395 396 static inline void clear_AC(struct kernel_vm86_regs *regs) 397 { 398 regs->pt.flags &= ~X86_EFLAGS_AC; 399 } 400 401 /* 402 * It is correct to call set_IF(regs) from the set_vflags_* 403 * functions. However someone forgot to call clear_IF(regs) 404 * in the opposite case. 405 * After the command sequence CLI PUSHF STI POPF you should 406 * end up with interrupts disabled, but you ended up with 407 * interrupts enabled. 408 * ( I was testing my own changes, but the only bug I 409 * could find was in a function I had not changed. ) 410 * [KD] 411 */ 412 413 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) 414 { 415 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); 416 set_flags(regs->pt.flags, flags, SAFE_MASK); 417 if (flags & X86_EFLAGS_IF) 418 set_IF(regs); 419 else 420 clear_IF(regs); 421 } 422 423 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) 424 { 425 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); 426 set_flags(regs->pt.flags, flags, SAFE_MASK); 427 if (flags & X86_EFLAGS_IF) 428 set_IF(regs); 429 else 430 clear_IF(regs); 431 } 432 433 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) 434 { 435 unsigned long flags = regs->pt.flags & RETURN_MASK; 436 437 if (VEFLAGS & X86_EFLAGS_VIF) 438 flags |= X86_EFLAGS_IF; 439 flags |= X86_EFLAGS_IOPL; 440 return flags | (VEFLAGS & current->thread.vm86->veflags_mask); 441 } 442 443 static inline int is_revectored(int nr, struct revectored_struct *bitmap) 444 { 445 return test_bit(nr, bitmap->__map); 446 } 447 448 #define val_byte(val, n) (((__u8 *)&val)[n]) 449 450 #define pushb(base, ptr, val, err_label) \ 451 do { \ 452 __u8 __val = val; \ 453 ptr--; \ 454 if (put_user(__val, base + ptr) < 0) \ 455 goto err_label; \ 456 } while (0) 457 458 #define pushw(base, ptr, val, err_label) \ 459 do { \ 460 __u16 __val = val; \ 461 ptr--; \ 462 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 463 goto err_label; \ 464 ptr--; \ 465 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 466 goto err_label; \ 467 } while (0) 468 469 #define pushl(base, ptr, val, err_label) \ 470 do { \ 471 __u32 __val = val; \ 472 ptr--; \ 473 if (put_user(val_byte(__val, 3), base + ptr) < 0) \ 474 goto err_label; \ 475 ptr--; \ 476 if (put_user(val_byte(__val, 2), base + ptr) < 0) \ 477 goto err_label; \ 478 ptr--; \ 479 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 480 goto err_label; \ 481 ptr--; \ 482 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 483 goto err_label; \ 484 } while (0) 485 486 #define popb(base, ptr, err_label) \ 487 ({ \ 488 __u8 __res; \ 489 if (get_user(__res, base + ptr) < 0) \ 490 goto err_label; \ 491 ptr++; \ 492 __res; \ 493 }) 494 495 #define popw(base, ptr, err_label) \ 496 ({ \ 497 __u16 __res; \ 498 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 499 goto err_label; \ 500 ptr++; \ 501 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 502 goto err_label; \ 503 ptr++; \ 504 __res; \ 505 }) 506 507 #define popl(base, ptr, err_label) \ 508 ({ \ 509 __u32 __res; \ 510 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 511 goto err_label; \ 512 ptr++; \ 513 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 514 goto err_label; \ 515 ptr++; \ 516 if (get_user(val_byte(__res, 2), base + ptr) < 0) \ 517 goto err_label; \ 518 ptr++; \ 519 if (get_user(val_byte(__res, 3), base + ptr) < 0) \ 520 goto err_label; \ 521 ptr++; \ 522 __res; \ 523 }) 524 525 /* There are so many possible reasons for this function to return 526 * VM86_INTx, so adding another doesn't bother me. We can expect 527 * userspace programs to be able to handle it. (Getting a problem 528 * in userspace is always better than an Oops anyway.) [KD] 529 */ 530 static void do_int(struct kernel_vm86_regs *regs, int i, 531 unsigned char __user *ssp, unsigned short sp) 532 { 533 unsigned long __user *intr_ptr; 534 unsigned long segoffs; 535 struct vm86 *vm86 = current->thread.vm86; 536 537 if (regs->pt.cs == BIOSSEG) 538 goto cannot_handle; 539 if (is_revectored(i, &vm86->int_revectored)) 540 goto cannot_handle; 541 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) 542 goto cannot_handle; 543 intr_ptr = (unsigned long __user *) (i << 2); 544 if (get_user(segoffs, intr_ptr)) 545 goto cannot_handle; 546 if ((segoffs >> 16) == BIOSSEG) 547 goto cannot_handle; 548 pushw(ssp, sp, get_vflags(regs), cannot_handle); 549 pushw(ssp, sp, regs->pt.cs, cannot_handle); 550 pushw(ssp, sp, IP(regs), cannot_handle); 551 regs->pt.cs = segoffs >> 16; 552 SP(regs) -= 6; 553 IP(regs) = segoffs & 0xffff; 554 clear_TF(regs); 555 clear_IF(regs); 556 clear_AC(regs); 557 return; 558 559 cannot_handle: 560 save_v86_state(regs, VM86_INTx + (i << 8)); 561 } 562 563 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 564 { 565 struct vm86 *vm86 = current->thread.vm86; 566 567 if (vm86->vm86plus.is_vm86pus) { 568 if ((trapno == 3) || (trapno == 1)) { 569 save_v86_state(regs, VM86_TRAP + (trapno << 8)); 570 return 0; 571 } 572 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 573 return 0; 574 } 575 if (trapno != 1) 576 return 1; /* we let this handle by the calling routine */ 577 current->thread.trap_nr = trapno; 578 current->thread.error_code = error_code; 579 force_sig(SIGTRAP); 580 return 0; 581 } 582 583 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) 584 { 585 unsigned char opcode; 586 unsigned char __user *csp; 587 unsigned char __user *ssp; 588 unsigned short ip, sp, orig_flags; 589 int data32, pref_done; 590 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus; 591 592 #define CHECK_IF_IN_TRAP \ 593 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ 594 newflags |= X86_EFLAGS_TF 595 596 orig_flags = *(unsigned short *)®s->pt.flags; 597 598 csp = (unsigned char __user *) (regs->pt.cs << 4); 599 ssp = (unsigned char __user *) (regs->pt.ss << 4); 600 sp = SP(regs); 601 ip = IP(regs); 602 603 data32 = 0; 604 pref_done = 0; 605 do { 606 switch (opcode = popb(csp, ip, simulate_sigsegv)) { 607 case 0x66: /* 32-bit data */ data32 = 1; break; 608 case 0x67: /* 32-bit address */ break; 609 case 0x2e: /* CS */ break; 610 case 0x3e: /* DS */ break; 611 case 0x26: /* ES */ break; 612 case 0x36: /* SS */ break; 613 case 0x65: /* GS */ break; 614 case 0x64: /* FS */ break; 615 case 0xf2: /* repnz */ break; 616 case 0xf3: /* rep */ break; 617 default: pref_done = 1; 618 } 619 } while (!pref_done); 620 621 switch (opcode) { 622 623 /* pushf */ 624 case 0x9c: 625 if (data32) { 626 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); 627 SP(regs) -= 4; 628 } else { 629 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); 630 SP(regs) -= 2; 631 } 632 IP(regs) = ip; 633 goto vm86_fault_return; 634 635 /* popf */ 636 case 0x9d: 637 { 638 unsigned long newflags; 639 if (data32) { 640 newflags = popl(ssp, sp, simulate_sigsegv); 641 SP(regs) += 4; 642 } else { 643 newflags = popw(ssp, sp, simulate_sigsegv); 644 SP(regs) += 2; 645 } 646 IP(regs) = ip; 647 CHECK_IF_IN_TRAP; 648 if (data32) 649 set_vflags_long(newflags, regs); 650 else 651 set_vflags_short(newflags, regs); 652 653 goto check_vip; 654 } 655 656 /* int xx */ 657 case 0xcd: { 658 int intno = popb(csp, ip, simulate_sigsegv); 659 IP(regs) = ip; 660 if (vmpi->vm86dbg_active) { 661 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { 662 save_v86_state(regs, VM86_INTx + (intno << 8)); 663 return; 664 } 665 } 666 do_int(regs, intno, ssp, sp); 667 return; 668 } 669 670 /* iret */ 671 case 0xcf: 672 { 673 unsigned long newip; 674 unsigned long newcs; 675 unsigned long newflags; 676 if (data32) { 677 newip = popl(ssp, sp, simulate_sigsegv); 678 newcs = popl(ssp, sp, simulate_sigsegv); 679 newflags = popl(ssp, sp, simulate_sigsegv); 680 SP(regs) += 12; 681 } else { 682 newip = popw(ssp, sp, simulate_sigsegv); 683 newcs = popw(ssp, sp, simulate_sigsegv); 684 newflags = popw(ssp, sp, simulate_sigsegv); 685 SP(regs) += 6; 686 } 687 IP(regs) = newip; 688 regs->pt.cs = newcs; 689 CHECK_IF_IN_TRAP; 690 if (data32) { 691 set_vflags_long(newflags, regs); 692 } else { 693 set_vflags_short(newflags, regs); 694 } 695 goto check_vip; 696 } 697 698 /* cli */ 699 case 0xfa: 700 IP(regs) = ip; 701 clear_IF(regs); 702 goto vm86_fault_return; 703 704 /* sti */ 705 /* 706 * Damn. This is incorrect: the 'sti' instruction should actually 707 * enable interrupts after the /next/ instruction. Not good. 708 * 709 * Probably needs some horsing around with the TF flag. Aiee.. 710 */ 711 case 0xfb: 712 IP(regs) = ip; 713 set_IF(regs); 714 goto check_vip; 715 716 default: 717 save_v86_state(regs, VM86_UNKNOWN); 718 } 719 720 return; 721 722 check_vip: 723 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == 724 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { 725 save_v86_state(regs, VM86_STI); 726 return; 727 } 728 729 vm86_fault_return: 730 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { 731 save_v86_state(regs, VM86_PICRETURN); 732 return; 733 } 734 if (orig_flags & X86_EFLAGS_TF) 735 handle_vm86_trap(regs, 0, X86_TRAP_DB); 736 return; 737 738 simulate_sigsegv: 739 /* FIXME: After a long discussion with Stas we finally 740 * agreed, that this is wrong. Here we should 741 * really send a SIGSEGV to the user program. 742 * But how do we create the correct context? We 743 * are inside a general protection fault handler 744 * and has just returned from a page fault handler. 745 * The correct context for the signal handler 746 * should be a mixture of the two, but how do we 747 * get the information? [KD] 748 */ 749 save_v86_state(regs, VM86_UNKNOWN); 750 } 751 752 /* ---------------- vm86 special IRQ passing stuff ----------------- */ 753 754 #define VM86_IRQNAME "vm86irq" 755 756 static struct vm86_irqs { 757 struct task_struct *tsk; 758 int sig; 759 } vm86_irqs[16]; 760 761 static DEFINE_SPINLOCK(irqbits_lock); 762 static int irqbits; 763 764 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ 765 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ 766 | (1 << SIGUNUSED)) 767 768 static irqreturn_t irq_handler(int intno, void *dev_id) 769 { 770 int irq_bit; 771 unsigned long flags; 772 773 spin_lock_irqsave(&irqbits_lock, flags); 774 irq_bit = 1 << intno; 775 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) 776 goto out; 777 irqbits |= irq_bit; 778 if (vm86_irqs[intno].sig) 779 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); 780 /* 781 * IRQ will be re-enabled when user asks for the irq (whether 782 * polling or as a result of the signal) 783 */ 784 disable_irq_nosync(intno); 785 spin_unlock_irqrestore(&irqbits_lock, flags); 786 return IRQ_HANDLED; 787 788 out: 789 spin_unlock_irqrestore(&irqbits_lock, flags); 790 return IRQ_NONE; 791 } 792 793 static inline void free_vm86_irq(int irqnumber) 794 { 795 unsigned long flags; 796 797 free_irq(irqnumber, NULL); 798 vm86_irqs[irqnumber].tsk = NULL; 799 800 spin_lock_irqsave(&irqbits_lock, flags); 801 irqbits &= ~(1 << irqnumber); 802 spin_unlock_irqrestore(&irqbits_lock, flags); 803 } 804 805 void release_vm86_irqs(struct task_struct *task) 806 { 807 int i; 808 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) 809 if (vm86_irqs[i].tsk == task) 810 free_vm86_irq(i); 811 } 812 813 static inline int get_and_reset_irq(int irqnumber) 814 { 815 int bit; 816 unsigned long flags; 817 int ret = 0; 818 819 if (invalid_vm86_irq(irqnumber)) return 0; 820 if (vm86_irqs[irqnumber].tsk != current) return 0; 821 spin_lock_irqsave(&irqbits_lock, flags); 822 bit = irqbits & (1 << irqnumber); 823 irqbits &= ~bit; 824 if (bit) { 825 enable_irq(irqnumber); 826 ret = 1; 827 } 828 829 spin_unlock_irqrestore(&irqbits_lock, flags); 830 return ret; 831 } 832 833 834 static int do_vm86_irq_handling(int subfunction, int irqnumber) 835 { 836 int ret; 837 switch (subfunction) { 838 case VM86_GET_AND_RESET_IRQ: { 839 return get_and_reset_irq(irqnumber); 840 } 841 case VM86_GET_IRQ_BITS: { 842 return irqbits; 843 } 844 case VM86_REQUEST_IRQ: { 845 int sig = irqnumber >> 8; 846 int irq = irqnumber & 255; 847 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 848 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; 849 if (invalid_vm86_irq(irq)) return -EPERM; 850 if (vm86_irqs[irq].tsk) return -EPERM; 851 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); 852 if (ret) return ret; 853 vm86_irqs[irq].sig = sig; 854 vm86_irqs[irq].tsk = current; 855 return irq; 856 } 857 case VM86_FREE_IRQ: { 858 if (invalid_vm86_irq(irqnumber)) return -EPERM; 859 if (!vm86_irqs[irqnumber].tsk) return 0; 860 if (vm86_irqs[irqnumber].tsk != current) return -EPERM; 861 free_vm86_irq(irqnumber); 862 return 0; 863 } 864 } 865 return -EINVAL; 866 } 867 868