1 /* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 5 * stack - Manfred Spraul <manfred@colorfullife.com> 6 * 7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 8 * them correctly. Now the emulation will be in a 9 * consistent state after stackfaults - Kasper Dupont 10 * <kasperd@daimi.au.dk> 11 * 12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont 13 * <kasperd@daimi.au.dk> 14 * 15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault 16 * caused by Kasper Dupont's changes - Stas Sergeev 17 * 18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. 19 * Kasper Dupont <kasperd@daimi.au.dk> 20 * 21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. 22 * Kasper Dupont <kasperd@daimi.au.dk> 23 * 24 * 9 apr 2002 - Changed stack access macros to jump to a label 25 * instead of returning to userspace. This simplifies 26 * do_int, and is needed by handle_vm6_fault. Kasper 27 * Dupont <kasperd@daimi.au.dk> 28 * 29 */ 30 31 #include <linux/capability.h> 32 #include <linux/errno.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <linux/kernel.h> 36 #include <linux/signal.h> 37 #include <linux/string.h> 38 #include <linux/mm.h> 39 #include <linux/smp.h> 40 #include <linux/highmem.h> 41 #include <linux/ptrace.h> 42 #include <linux/audit.h> 43 #include <linux/stddef.h> 44 45 #include <asm/uaccess.h> 46 #include <asm/io.h> 47 #include <asm/tlbflush.h> 48 #include <asm/irq.h> 49 50 /* 51 * Known problems: 52 * 53 * Interrupt handling is not guaranteed: 54 * - a real x86 will disable all interrupts for one instruction 55 * after a "mov ss,xx" to make stack handling atomic even without 56 * the 'lss' instruction. We can't guarantee this in v86 mode, 57 * as the next instruction might result in a page fault or similar. 58 * - a real x86 will have interrupts disabled for one instruction 59 * past the 'sti' that enables them. We don't bother with all the 60 * details yet. 61 * 62 * Let's hope these problems do not actually matter for anything. 63 */ 64 65 66 #define KVM86 ((struct kernel_vm86_struct *)regs) 67 #define VMPI KVM86->vm86plus 68 69 70 /* 71 * 8- and 16-bit register defines.. 72 */ 73 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) 74 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) 75 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) 76 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) 77 78 /* 79 * virtual flags (16 and 32-bit versions) 80 */ 81 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) 82 #define VEFLAGS (current->thread.v86flags) 83 84 #define set_flags(X, new, mask) \ 85 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 86 87 #define SAFE_MASK (0xDD5) 88 #define RETURN_MASK (0xDFF) 89 90 /* convert kernel_vm86_regs to vm86_regs */ 91 static int copy_vm86_regs_to_user(struct vm86_regs __user *user, 92 const struct kernel_vm86_regs *regs) 93 { 94 int ret = 0; 95 96 /* 97 * kernel_vm86_regs is missing gs, so copy everything up to 98 * (but not including) orig_eax, and then rest including orig_eax. 99 */ 100 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 101 ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, 102 sizeof(struct kernel_vm86_regs) - 103 offsetof(struct kernel_vm86_regs, pt.orig_ax)); 104 105 return ret; 106 } 107 108 /* convert vm86_regs to kernel_vm86_regs */ 109 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, 110 const struct vm86_regs __user *user, 111 unsigned extra) 112 { 113 int ret = 0; 114 115 /* copy ax-fs inclusive */ 116 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 117 /* copy orig_ax-__gsh+extra */ 118 ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, 119 sizeof(struct kernel_vm86_regs) - 120 offsetof(struct kernel_vm86_regs, pt.orig_ax) + 121 extra); 122 return ret; 123 } 124 125 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) 126 { 127 struct tss_struct *tss; 128 struct pt_regs *ret; 129 unsigned long tmp; 130 131 /* 132 * This gets called from entry.S with interrupts disabled, but 133 * from process context. Enable interrupts here, before trying 134 * to access user space. 135 */ 136 local_irq_enable(); 137 138 if (!current->thread.vm86_info) { 139 printk("no vm86_info: BAD\n"); 140 do_exit(SIGSEGV); 141 } 142 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); 143 tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); 144 tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); 145 if (tmp) { 146 printk("vm86: could not access userspace vm86_info\n"); 147 do_exit(SIGSEGV); 148 } 149 150 tss = &per_cpu(init_tss, get_cpu()); 151 current->thread.sp0 = current->thread.saved_sp0; 152 current->thread.sysenter_cs = __KERNEL_CS; 153 load_sp0(tss, ¤t->thread); 154 current->thread.saved_sp0 = 0; 155 put_cpu(); 156 157 ret = KVM86->regs32; 158 159 ret->fs = current->thread.saved_fs; 160 loadsegment(gs, current->thread.saved_gs); 161 162 return ret; 163 } 164 165 static void mark_screen_rdonly(struct mm_struct *mm) 166 { 167 pgd_t *pgd; 168 pud_t *pud; 169 pmd_t *pmd; 170 pte_t *pte; 171 spinlock_t *ptl; 172 int i; 173 174 pgd = pgd_offset(mm, 0xA0000); 175 if (pgd_none_or_clear_bad(pgd)) 176 goto out; 177 pud = pud_offset(pgd, 0xA0000); 178 if (pud_none_or_clear_bad(pud)) 179 goto out; 180 pmd = pmd_offset(pud, 0xA0000); 181 if (pmd_none_or_clear_bad(pmd)) 182 goto out; 183 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); 184 for (i = 0; i < 32; i++) { 185 if (pte_present(*pte)) 186 set_pte(pte, pte_wrprotect(*pte)); 187 pte++; 188 } 189 pte_unmap_unlock(pte, ptl); 190 out: 191 flush_tlb(); 192 } 193 194 195 196 static int do_vm86_irq_handling(int subfunction, int irqnumber); 197 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 198 199 asmlinkage int sys_vm86old(struct pt_regs regs) 200 { 201 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; 202 struct kernel_vm86_struct info; /* declare this _on top_, 203 * this avoids wasting of stack space. 204 * This remains on the stack until we 205 * return to 32 bit user space. 206 */ 207 struct task_struct *tsk; 208 int tmp, ret = -EPERM; 209 210 tsk = current; 211 if (tsk->thread.saved_sp0) 212 goto out; 213 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 214 offsetof(struct kernel_vm86_struct, vm86plus) - 215 sizeof(info.regs)); 216 ret = -EFAULT; 217 if (tmp) 218 goto out; 219 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 220 info.regs32 = ®s; 221 tsk->thread.vm86_info = v86; 222 do_sys_vm86(&info, tsk); 223 ret = 0; /* we never return here */ 224 out: 225 return ret; 226 } 227 228 229 asmlinkage int sys_vm86(struct pt_regs regs) 230 { 231 struct kernel_vm86_struct info; /* declare this _on top_, 232 * this avoids wasting of stack space. 233 * This remains on the stack until we 234 * return to 32 bit user space. 235 */ 236 struct task_struct *tsk; 237 int tmp, ret; 238 struct vm86plus_struct __user *v86; 239 240 tsk = current; 241 switch (regs.bx) { 242 case VM86_REQUEST_IRQ: 243 case VM86_FREE_IRQ: 244 case VM86_GET_IRQ_BITS: 245 case VM86_GET_AND_RESET_IRQ: 246 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); 247 goto out; 248 case VM86_PLUS_INSTALL_CHECK: 249 /* 250 * NOTE: on old vm86 stuff this will return the error 251 * from access_ok(), because the subfunction is 252 * interpreted as (invalid) address to vm86_struct. 253 * So the installation check works. 254 */ 255 ret = 0; 256 goto out; 257 } 258 259 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 260 ret = -EPERM; 261 if (tsk->thread.saved_sp0) 262 goto out; 263 v86 = (struct vm86plus_struct __user *)regs.cx; 264 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 265 offsetof(struct kernel_vm86_struct, regs32) - 266 sizeof(info.regs)); 267 ret = -EFAULT; 268 if (tmp) 269 goto out; 270 info.regs32 = ®s; 271 info.vm86plus.is_vm86pus = 1; 272 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 273 do_sys_vm86(&info, tsk); 274 ret = 0; /* we never return here */ 275 out: 276 return ret; 277 } 278 279 280 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) 281 { 282 struct tss_struct *tss; 283 /* 284 * make sure the vm86() system call doesn't try to do anything silly 285 */ 286 info->regs.pt.ds = 0; 287 info->regs.pt.es = 0; 288 info->regs.pt.fs = 0; 289 290 /* we are clearing gs later just before "jmp resume_userspace", 291 * because it is not saved/restored. 292 */ 293 294 /* 295 * The flags register is also special: we cannot trust that the user 296 * has set it up safely, so this makes sure interrupt etc flags are 297 * inherited from protected mode. 298 */ 299 VEFLAGS = info->regs.pt.flags; 300 info->regs.pt.flags &= SAFE_MASK; 301 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; 302 info->regs.pt.flags |= X86_VM_MASK; 303 304 switch (info->cpu_type) { 305 case CPU_286: 306 tsk->thread.v86mask = 0; 307 break; 308 case CPU_386: 309 tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; 310 break; 311 case CPU_486: 312 tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 313 break; 314 default: 315 tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 316 break; 317 } 318 319 /* 320 * Save old state, set default return value (%ax) to 0 321 */ 322 info->regs32->ax = 0; 323 tsk->thread.saved_sp0 = tsk->thread.sp0; 324 tsk->thread.saved_fs = info->regs32->fs; 325 savesegment(gs, tsk->thread.saved_gs); 326 327 tss = &per_cpu(init_tss, get_cpu()); 328 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; 329 if (cpu_has_sep) 330 tsk->thread.sysenter_cs = 0; 331 load_sp0(tss, &tsk->thread); 332 put_cpu(); 333 334 tsk->thread.screen_bitmap = info->screen_bitmap; 335 if (info->flags & VM86_SCREEN_BITMAP) 336 mark_screen_rdonly(tsk->mm); 337 338 /*call audit_syscall_exit since we do not exit via the normal paths */ 339 if (unlikely(current->audit_context)) 340 audit_syscall_exit(AUDITSC_RESULT(0), 0); 341 342 __asm__ __volatile__( 343 "movl %0,%%esp\n\t" 344 "movl %1,%%ebp\n\t" 345 "mov %2, %%gs\n\t" 346 "jmp resume_userspace" 347 : /* no outputs */ 348 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); 349 /* we never return here */ 350 } 351 352 static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) 353 { 354 struct pt_regs *regs32; 355 356 regs32 = save_v86_state(regs16); 357 regs32->ax = retval; 358 __asm__ __volatile__("movl %0,%%esp\n\t" 359 "movl %1,%%ebp\n\t" 360 "jmp resume_userspace" 361 : : "r" (regs32), "r" (current_thread_info())); 362 } 363 364 static inline void set_IF(struct kernel_vm86_regs *regs) 365 { 366 VEFLAGS |= X86_EFLAGS_VIF; 367 if (VEFLAGS & X86_EFLAGS_VIP) 368 return_to_32bit(regs, VM86_STI); 369 } 370 371 static inline void clear_IF(struct kernel_vm86_regs *regs) 372 { 373 VEFLAGS &= ~X86_EFLAGS_VIF; 374 } 375 376 static inline void clear_TF(struct kernel_vm86_regs *regs) 377 { 378 regs->pt.flags &= ~X86_EFLAGS_TF; 379 } 380 381 static inline void clear_AC(struct kernel_vm86_regs *regs) 382 { 383 regs->pt.flags &= ~X86_EFLAGS_AC; 384 } 385 386 /* 387 * It is correct to call set_IF(regs) from the set_vflags_* 388 * functions. However someone forgot to call clear_IF(regs) 389 * in the opposite case. 390 * After the command sequence CLI PUSHF STI POPF you should 391 * end up with interrupts disabled, but you ended up with 392 * interrupts enabled. 393 * ( I was testing my own changes, but the only bug I 394 * could find was in a function I had not changed. ) 395 * [KD] 396 */ 397 398 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) 399 { 400 set_flags(VEFLAGS, flags, current->thread.v86mask); 401 set_flags(regs->pt.flags, flags, SAFE_MASK); 402 if (flags & X86_EFLAGS_IF) 403 set_IF(regs); 404 else 405 clear_IF(regs); 406 } 407 408 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) 409 { 410 set_flags(VFLAGS, flags, current->thread.v86mask); 411 set_flags(regs->pt.flags, flags, SAFE_MASK); 412 if (flags & X86_EFLAGS_IF) 413 set_IF(regs); 414 else 415 clear_IF(regs); 416 } 417 418 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) 419 { 420 unsigned long flags = regs->pt.flags & RETURN_MASK; 421 422 if (VEFLAGS & X86_EFLAGS_VIF) 423 flags |= X86_EFLAGS_IF; 424 flags |= X86_EFLAGS_IOPL; 425 return flags | (VEFLAGS & current->thread.v86mask); 426 } 427 428 static inline int is_revectored(int nr, struct revectored_struct *bitmap) 429 { 430 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" 431 :"=r" (nr) 432 :"m" (*bitmap), "r" (nr)); 433 return nr; 434 } 435 436 #define val_byte(val, n) (((__u8 *)&val)[n]) 437 438 #define pushb(base, ptr, val, err_label) \ 439 do { \ 440 __u8 __val = val; \ 441 ptr--; \ 442 if (put_user(__val, base + ptr) < 0) \ 443 goto err_label; \ 444 } while (0) 445 446 #define pushw(base, ptr, val, err_label) \ 447 do { \ 448 __u16 __val = val; \ 449 ptr--; \ 450 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 451 goto err_label; \ 452 ptr--; \ 453 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 454 goto err_label; \ 455 } while (0) 456 457 #define pushl(base, ptr, val, err_label) \ 458 do { \ 459 __u32 __val = val; \ 460 ptr--; \ 461 if (put_user(val_byte(__val, 3), base + ptr) < 0) \ 462 goto err_label; \ 463 ptr--; \ 464 if (put_user(val_byte(__val, 2), base + ptr) < 0) \ 465 goto err_label; \ 466 ptr--; \ 467 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 468 goto err_label; \ 469 ptr--; \ 470 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 471 goto err_label; \ 472 } while (0) 473 474 #define popb(base, ptr, err_label) \ 475 ({ \ 476 __u8 __res; \ 477 if (get_user(__res, base + ptr) < 0) \ 478 goto err_label; \ 479 ptr++; \ 480 __res; \ 481 }) 482 483 #define popw(base, ptr, err_label) \ 484 ({ \ 485 __u16 __res; \ 486 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 487 goto err_label; \ 488 ptr++; \ 489 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 490 goto err_label; \ 491 ptr++; \ 492 __res; \ 493 }) 494 495 #define popl(base, ptr, err_label) \ 496 ({ \ 497 __u32 __res; \ 498 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 499 goto err_label; \ 500 ptr++; \ 501 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 502 goto err_label; \ 503 ptr++; \ 504 if (get_user(val_byte(__res, 2), base + ptr) < 0) \ 505 goto err_label; \ 506 ptr++; \ 507 if (get_user(val_byte(__res, 3), base + ptr) < 0) \ 508 goto err_label; \ 509 ptr++; \ 510 __res; \ 511 }) 512 513 /* There are so many possible reasons for this function to return 514 * VM86_INTx, so adding another doesn't bother me. We can expect 515 * userspace programs to be able to handle it. (Getting a problem 516 * in userspace is always better than an Oops anyway.) [KD] 517 */ 518 static void do_int(struct kernel_vm86_regs *regs, int i, 519 unsigned char __user *ssp, unsigned short sp) 520 { 521 unsigned long __user *intr_ptr; 522 unsigned long segoffs; 523 524 if (regs->pt.cs == BIOSSEG) 525 goto cannot_handle; 526 if (is_revectored(i, &KVM86->int_revectored)) 527 goto cannot_handle; 528 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) 529 goto cannot_handle; 530 intr_ptr = (unsigned long __user *) (i << 2); 531 if (get_user(segoffs, intr_ptr)) 532 goto cannot_handle; 533 if ((segoffs >> 16) == BIOSSEG) 534 goto cannot_handle; 535 pushw(ssp, sp, get_vflags(regs), cannot_handle); 536 pushw(ssp, sp, regs->pt.cs, cannot_handle); 537 pushw(ssp, sp, IP(regs), cannot_handle); 538 regs->pt.cs = segoffs >> 16; 539 SP(regs) -= 6; 540 IP(regs) = segoffs & 0xffff; 541 clear_TF(regs); 542 clear_IF(regs); 543 clear_AC(regs); 544 return; 545 546 cannot_handle: 547 return_to_32bit(regs, VM86_INTx + (i << 8)); 548 } 549 550 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 551 { 552 if (VMPI.is_vm86pus) { 553 if ((trapno == 3) || (trapno == 1)) 554 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 555 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 556 return 0; 557 } 558 if (trapno != 1) 559 return 1; /* we let this handle by the calling routine */ 560 current->thread.trap_no = trapno; 561 current->thread.error_code = error_code; 562 force_sig(SIGTRAP, current); 563 return 0; 564 } 565 566 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) 567 { 568 unsigned char opcode; 569 unsigned char __user *csp; 570 unsigned char __user *ssp; 571 unsigned short ip, sp, orig_flags; 572 int data32, pref_done; 573 574 #define CHECK_IF_IN_TRAP \ 575 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ 576 newflags |= X86_EFLAGS_TF 577 #define VM86_FAULT_RETURN do { \ 578 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ 579 return_to_32bit(regs, VM86_PICRETURN); \ 580 if (orig_flags & X86_EFLAGS_TF) \ 581 handle_vm86_trap(regs, 0, 1); \ 582 return; } while (0) 583 584 orig_flags = *(unsigned short *)®s->pt.flags; 585 586 csp = (unsigned char __user *) (regs->pt.cs << 4); 587 ssp = (unsigned char __user *) (regs->pt.ss << 4); 588 sp = SP(regs); 589 ip = IP(regs); 590 591 data32 = 0; 592 pref_done = 0; 593 do { 594 switch (opcode = popb(csp, ip, simulate_sigsegv)) { 595 case 0x66: /* 32-bit data */ data32 = 1; break; 596 case 0x67: /* 32-bit address */ break; 597 case 0x2e: /* CS */ break; 598 case 0x3e: /* DS */ break; 599 case 0x26: /* ES */ break; 600 case 0x36: /* SS */ break; 601 case 0x65: /* GS */ break; 602 case 0x64: /* FS */ break; 603 case 0xf2: /* repnz */ break; 604 case 0xf3: /* rep */ break; 605 default: pref_done = 1; 606 } 607 } while (!pref_done); 608 609 switch (opcode) { 610 611 /* pushf */ 612 case 0x9c: 613 if (data32) { 614 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); 615 SP(regs) -= 4; 616 } else { 617 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); 618 SP(regs) -= 2; 619 } 620 IP(regs) = ip; 621 VM86_FAULT_RETURN; 622 623 /* popf */ 624 case 0x9d: 625 { 626 unsigned long newflags; 627 if (data32) { 628 newflags = popl(ssp, sp, simulate_sigsegv); 629 SP(regs) += 4; 630 } else { 631 newflags = popw(ssp, sp, simulate_sigsegv); 632 SP(regs) += 2; 633 } 634 IP(regs) = ip; 635 CHECK_IF_IN_TRAP; 636 if (data32) 637 set_vflags_long(newflags, regs); 638 else 639 set_vflags_short(newflags, regs); 640 641 VM86_FAULT_RETURN; 642 } 643 644 /* int xx */ 645 case 0xcd: { 646 int intno = popb(csp, ip, simulate_sigsegv); 647 IP(regs) = ip; 648 if (VMPI.vm86dbg_active) { 649 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) 650 return_to_32bit(regs, VM86_INTx + (intno << 8)); 651 } 652 do_int(regs, intno, ssp, sp); 653 return; 654 } 655 656 /* iret */ 657 case 0xcf: 658 { 659 unsigned long newip; 660 unsigned long newcs; 661 unsigned long newflags; 662 if (data32) { 663 newip = popl(ssp, sp, simulate_sigsegv); 664 newcs = popl(ssp, sp, simulate_sigsegv); 665 newflags = popl(ssp, sp, simulate_sigsegv); 666 SP(regs) += 12; 667 } else { 668 newip = popw(ssp, sp, simulate_sigsegv); 669 newcs = popw(ssp, sp, simulate_sigsegv); 670 newflags = popw(ssp, sp, simulate_sigsegv); 671 SP(regs) += 6; 672 } 673 IP(regs) = newip; 674 regs->pt.cs = newcs; 675 CHECK_IF_IN_TRAP; 676 if (data32) { 677 set_vflags_long(newflags, regs); 678 } else { 679 set_vflags_short(newflags, regs); 680 } 681 VM86_FAULT_RETURN; 682 } 683 684 /* cli */ 685 case 0xfa: 686 IP(regs) = ip; 687 clear_IF(regs); 688 VM86_FAULT_RETURN; 689 690 /* sti */ 691 /* 692 * Damn. This is incorrect: the 'sti' instruction should actually 693 * enable interrupts after the /next/ instruction. Not good. 694 * 695 * Probably needs some horsing around with the TF flag. Aiee.. 696 */ 697 case 0xfb: 698 IP(regs) = ip; 699 set_IF(regs); 700 VM86_FAULT_RETURN; 701 702 default: 703 return_to_32bit(regs, VM86_UNKNOWN); 704 } 705 706 return; 707 708 simulate_sigsegv: 709 /* FIXME: After a long discussion with Stas we finally 710 * agreed, that this is wrong. Here we should 711 * really send a SIGSEGV to the user program. 712 * But how do we create the correct context? We 713 * are inside a general protection fault handler 714 * and has just returned from a page fault handler. 715 * The correct context for the signal handler 716 * should be a mixture of the two, but how do we 717 * get the information? [KD] 718 */ 719 return_to_32bit(regs, VM86_UNKNOWN); 720 } 721 722 /* ---------------- vm86 special IRQ passing stuff ----------------- */ 723 724 #define VM86_IRQNAME "vm86irq" 725 726 static struct vm86_irqs { 727 struct task_struct *tsk; 728 int sig; 729 } vm86_irqs[16]; 730 731 static DEFINE_SPINLOCK(irqbits_lock); 732 static int irqbits; 733 734 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ 735 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ 736 | (1 << SIGUNUSED)) 737 738 static irqreturn_t irq_handler(int intno, void *dev_id) 739 { 740 int irq_bit; 741 unsigned long flags; 742 743 spin_lock_irqsave(&irqbits_lock, flags); 744 irq_bit = 1 << intno; 745 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) 746 goto out; 747 irqbits |= irq_bit; 748 if (vm86_irqs[intno].sig) 749 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); 750 /* 751 * IRQ will be re-enabled when user asks for the irq (whether 752 * polling or as a result of the signal) 753 */ 754 disable_irq_nosync(intno); 755 spin_unlock_irqrestore(&irqbits_lock, flags); 756 return IRQ_HANDLED; 757 758 out: 759 spin_unlock_irqrestore(&irqbits_lock, flags); 760 return IRQ_NONE; 761 } 762 763 static inline void free_vm86_irq(int irqnumber) 764 { 765 unsigned long flags; 766 767 free_irq(irqnumber, NULL); 768 vm86_irqs[irqnumber].tsk = NULL; 769 770 spin_lock_irqsave(&irqbits_lock, flags); 771 irqbits &= ~(1 << irqnumber); 772 spin_unlock_irqrestore(&irqbits_lock, flags); 773 } 774 775 void release_vm86_irqs(struct task_struct *task) 776 { 777 int i; 778 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) 779 if (vm86_irqs[i].tsk == task) 780 free_vm86_irq(i); 781 } 782 783 static inline int get_and_reset_irq(int irqnumber) 784 { 785 int bit; 786 unsigned long flags; 787 int ret = 0; 788 789 if (invalid_vm86_irq(irqnumber)) return 0; 790 if (vm86_irqs[irqnumber].tsk != current) return 0; 791 spin_lock_irqsave(&irqbits_lock, flags); 792 bit = irqbits & (1 << irqnumber); 793 irqbits &= ~bit; 794 if (bit) { 795 enable_irq(irqnumber); 796 ret = 1; 797 } 798 799 spin_unlock_irqrestore(&irqbits_lock, flags); 800 return ret; 801 } 802 803 804 static int do_vm86_irq_handling(int subfunction, int irqnumber) 805 { 806 int ret; 807 switch (subfunction) { 808 case VM86_GET_AND_RESET_IRQ: { 809 return get_and_reset_irq(irqnumber); 810 } 811 case VM86_GET_IRQ_BITS: { 812 return irqbits; 813 } 814 case VM86_REQUEST_IRQ: { 815 int sig = irqnumber >> 8; 816 int irq = irqnumber & 255; 817 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 818 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; 819 if (invalid_vm86_irq(irq)) return -EPERM; 820 if (vm86_irqs[irq].tsk) return -EPERM; 821 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); 822 if (ret) return ret; 823 vm86_irqs[irq].sig = sig; 824 vm86_irqs[irq].tsk = current; 825 return irq; 826 } 827 case VM86_FREE_IRQ: { 828 if (invalid_vm86_irq(irqnumber)) return -EPERM; 829 if (!vm86_irqs[irqnumber].tsk) return 0; 830 if (vm86_irqs[irqnumber].tsk != current) return -EPERM; 831 free_vm86_irq(irqnumber); 832 return 0; 833 } 834 } 835 return -EINVAL; 836 } 837 838