1 /* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 5 * stack - Manfred Spraul <manfred@colorfullife.com> 6 * 7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 8 * them correctly. Now the emulation will be in a 9 * consistent state after stackfaults - Kasper Dupont 10 * <kasperd@daimi.au.dk> 11 * 12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont 13 * <kasperd@daimi.au.dk> 14 * 15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault 16 * caused by Kasper Dupont's changes - Stas Sergeev 17 * 18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. 19 * Kasper Dupont <kasperd@daimi.au.dk> 20 * 21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. 22 * Kasper Dupont <kasperd@daimi.au.dk> 23 * 24 * 9 apr 2002 - Changed stack access macros to jump to a label 25 * instead of returning to userspace. This simplifies 26 * do_int, and is needed by handle_vm6_fault. Kasper 27 * Dupont <kasperd@daimi.au.dk> 28 * 29 */ 30 31 #include <linux/capability.h> 32 #include <linux/errno.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <linux/kernel.h> 36 #include <linux/signal.h> 37 #include <linux/string.h> 38 #include <linux/mm.h> 39 #include <linux/smp.h> 40 #include <linux/highmem.h> 41 #include <linux/ptrace.h> 42 #include <linux/audit.h> 43 #include <linux/stddef.h> 44 45 #include <asm/uaccess.h> 46 #include <asm/io.h> 47 #include <asm/tlbflush.h> 48 #include <asm/irq.h> 49 #include <asm/syscalls.h> 50 51 /* 52 * Known problems: 53 * 54 * Interrupt handling is not guaranteed: 55 * - a real x86 will disable all interrupts for one instruction 56 * after a "mov ss,xx" to make stack handling atomic even without 57 * the 'lss' instruction. We can't guarantee this in v86 mode, 58 * as the next instruction might result in a page fault or similar. 59 * - a real x86 will have interrupts disabled for one instruction 60 * past the 'sti' that enables them. We don't bother with all the 61 * details yet. 62 * 63 * Let's hope these problems do not actually matter for anything. 64 */ 65 66 67 #define KVM86 ((struct kernel_vm86_struct *)regs) 68 #define VMPI KVM86->vm86plus 69 70 71 /* 72 * 8- and 16-bit register defines.. 73 */ 74 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) 75 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) 76 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) 77 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) 78 79 /* 80 * virtual flags (16 and 32-bit versions) 81 */ 82 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) 83 #define VEFLAGS (current->thread.v86flags) 84 85 #define set_flags(X, new, mask) \ 86 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 87 88 #define SAFE_MASK (0xDD5) 89 #define RETURN_MASK (0xDFF) 90 91 /* convert kernel_vm86_regs to vm86_regs */ 92 static int copy_vm86_regs_to_user(struct vm86_regs __user *user, 93 const struct kernel_vm86_regs *regs) 94 { 95 int ret = 0; 96 97 /* 98 * kernel_vm86_regs is missing gs, so copy everything up to 99 * (but not including) orig_eax, and then rest including orig_eax. 100 */ 101 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 102 ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, 103 sizeof(struct kernel_vm86_regs) - 104 offsetof(struct kernel_vm86_regs, pt.orig_ax)); 105 106 return ret; 107 } 108 109 /* convert vm86_regs to kernel_vm86_regs */ 110 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, 111 const struct vm86_regs __user *user, 112 unsigned extra) 113 { 114 int ret = 0; 115 116 /* copy ax-fs inclusive */ 117 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 118 /* copy orig_ax-__gsh+extra */ 119 ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, 120 sizeof(struct kernel_vm86_regs) - 121 offsetof(struct kernel_vm86_regs, pt.orig_ax) + 122 extra); 123 return ret; 124 } 125 126 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) 127 { 128 struct tss_struct *tss; 129 struct pt_regs *ret; 130 unsigned long tmp; 131 132 /* 133 * This gets called from entry.S with interrupts disabled, but 134 * from process context. Enable interrupts here, before trying 135 * to access user space. 136 */ 137 local_irq_enable(); 138 139 if (!current->thread.vm86_info) { 140 printk("no vm86_info: BAD\n"); 141 do_exit(SIGSEGV); 142 } 143 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); 144 tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); 145 tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); 146 if (tmp) { 147 printk("vm86: could not access userspace vm86_info\n"); 148 do_exit(SIGSEGV); 149 } 150 151 tss = &per_cpu(init_tss, get_cpu()); 152 current->thread.sp0 = current->thread.saved_sp0; 153 current->thread.sysenter_cs = __KERNEL_CS; 154 load_sp0(tss, ¤t->thread); 155 current->thread.saved_sp0 = 0; 156 put_cpu(); 157 158 ret = KVM86->regs32; 159 160 ret->fs = current->thread.saved_fs; 161 set_user_gs(ret, current->thread.saved_gs); 162 163 return ret; 164 } 165 166 static void mark_screen_rdonly(struct mm_struct *mm) 167 { 168 pgd_t *pgd; 169 pud_t *pud; 170 pmd_t *pmd; 171 pte_t *pte; 172 spinlock_t *ptl; 173 int i; 174 175 down_write(&mm->mmap_sem); 176 pgd = pgd_offset(mm, 0xA0000); 177 if (pgd_none_or_clear_bad(pgd)) 178 goto out; 179 pud = pud_offset(pgd, 0xA0000); 180 if (pud_none_or_clear_bad(pud)) 181 goto out; 182 pmd = pmd_offset(pud, 0xA0000); 183 split_huge_page_pmd(mm, pmd); 184 if (pmd_none_or_clear_bad(pmd)) 185 goto out; 186 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); 187 for (i = 0; i < 32; i++) { 188 if (pte_present(*pte)) 189 set_pte(pte, pte_wrprotect(*pte)); 190 pte++; 191 } 192 pte_unmap_unlock(pte, ptl); 193 out: 194 up_write(&mm->mmap_sem); 195 flush_tlb(); 196 } 197 198 199 200 static int do_vm86_irq_handling(int subfunction, int irqnumber); 201 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 202 203 int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs) 204 { 205 struct kernel_vm86_struct info; /* declare this _on top_, 206 * this avoids wasting of stack space. 207 * This remains on the stack until we 208 * return to 32 bit user space. 209 */ 210 struct task_struct *tsk; 211 int tmp, ret = -EPERM; 212 213 tsk = current; 214 if (tsk->thread.saved_sp0) 215 goto out; 216 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 217 offsetof(struct kernel_vm86_struct, vm86plus) - 218 sizeof(info.regs)); 219 ret = -EFAULT; 220 if (tmp) 221 goto out; 222 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 223 info.regs32 = regs; 224 tsk->thread.vm86_info = v86; 225 do_sys_vm86(&info, tsk); 226 ret = 0; /* we never return here */ 227 out: 228 return ret; 229 } 230 231 232 int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs) 233 { 234 struct kernel_vm86_struct info; /* declare this _on top_, 235 * this avoids wasting of stack space. 236 * This remains on the stack until we 237 * return to 32 bit user space. 238 */ 239 struct task_struct *tsk; 240 int tmp, ret; 241 struct vm86plus_struct __user *v86; 242 243 tsk = current; 244 switch (cmd) { 245 case VM86_REQUEST_IRQ: 246 case VM86_FREE_IRQ: 247 case VM86_GET_IRQ_BITS: 248 case VM86_GET_AND_RESET_IRQ: 249 ret = do_vm86_irq_handling(cmd, (int)arg); 250 goto out; 251 case VM86_PLUS_INSTALL_CHECK: 252 /* 253 * NOTE: on old vm86 stuff this will return the error 254 * from access_ok(), because the subfunction is 255 * interpreted as (invalid) address to vm86_struct. 256 * So the installation check works. 257 */ 258 ret = 0; 259 goto out; 260 } 261 262 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 263 ret = -EPERM; 264 if (tsk->thread.saved_sp0) 265 goto out; 266 v86 = (struct vm86plus_struct __user *)arg; 267 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 268 offsetof(struct kernel_vm86_struct, regs32) - 269 sizeof(info.regs)); 270 ret = -EFAULT; 271 if (tmp) 272 goto out; 273 info.regs32 = regs; 274 info.vm86plus.is_vm86pus = 1; 275 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 276 do_sys_vm86(&info, tsk); 277 ret = 0; /* we never return here */ 278 out: 279 return ret; 280 } 281 282 283 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) 284 { 285 struct tss_struct *tss; 286 /* 287 * make sure the vm86() system call doesn't try to do anything silly 288 */ 289 info->regs.pt.ds = 0; 290 info->regs.pt.es = 0; 291 info->regs.pt.fs = 0; 292 #ifndef CONFIG_X86_32_LAZY_GS 293 info->regs.pt.gs = 0; 294 #endif 295 296 /* 297 * The flags register is also special: we cannot trust that the user 298 * has set it up safely, so this makes sure interrupt etc flags are 299 * inherited from protected mode. 300 */ 301 VEFLAGS = info->regs.pt.flags; 302 info->regs.pt.flags &= SAFE_MASK; 303 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; 304 info->regs.pt.flags |= X86_VM_MASK; 305 306 switch (info->cpu_type) { 307 case CPU_286: 308 tsk->thread.v86mask = 0; 309 break; 310 case CPU_386: 311 tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; 312 break; 313 case CPU_486: 314 tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 315 break; 316 default: 317 tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 318 break; 319 } 320 321 /* 322 * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) 323 */ 324 info->regs32->ax = VM86_SIGNAL; 325 tsk->thread.saved_sp0 = tsk->thread.sp0; 326 tsk->thread.saved_fs = info->regs32->fs; 327 tsk->thread.saved_gs = get_user_gs(info->regs32); 328 329 tss = &per_cpu(init_tss, get_cpu()); 330 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; 331 if (cpu_has_sep) 332 tsk->thread.sysenter_cs = 0; 333 load_sp0(tss, &tsk->thread); 334 put_cpu(); 335 336 tsk->thread.screen_bitmap = info->screen_bitmap; 337 if (info->flags & VM86_SCREEN_BITMAP) 338 mark_screen_rdonly(tsk->mm); 339 340 /*call __audit_syscall_exit since we do not exit via the normal paths */ 341 #ifdef CONFIG_AUDITSYSCALL 342 if (unlikely(current->audit_context)) 343 __audit_syscall_exit(1, 0); 344 #endif 345 346 __asm__ __volatile__( 347 "movl %0,%%esp\n\t" 348 "movl %1,%%ebp\n\t" 349 #ifdef CONFIG_X86_32_LAZY_GS 350 "mov %2, %%gs\n\t" 351 #endif 352 "jmp resume_userspace" 353 : /* no outputs */ 354 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); 355 /* we never return here */ 356 } 357 358 static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) 359 { 360 struct pt_regs *regs32; 361 362 regs32 = save_v86_state(regs16); 363 regs32->ax = retval; 364 __asm__ __volatile__("movl %0,%%esp\n\t" 365 "movl %1,%%ebp\n\t" 366 "jmp resume_userspace" 367 : : "r" (regs32), "r" (current_thread_info())); 368 } 369 370 static inline void set_IF(struct kernel_vm86_regs *regs) 371 { 372 VEFLAGS |= X86_EFLAGS_VIF; 373 if (VEFLAGS & X86_EFLAGS_VIP) 374 return_to_32bit(regs, VM86_STI); 375 } 376 377 static inline void clear_IF(struct kernel_vm86_regs *regs) 378 { 379 VEFLAGS &= ~X86_EFLAGS_VIF; 380 } 381 382 static inline void clear_TF(struct kernel_vm86_regs *regs) 383 { 384 regs->pt.flags &= ~X86_EFLAGS_TF; 385 } 386 387 static inline void clear_AC(struct kernel_vm86_regs *regs) 388 { 389 regs->pt.flags &= ~X86_EFLAGS_AC; 390 } 391 392 /* 393 * It is correct to call set_IF(regs) from the set_vflags_* 394 * functions. However someone forgot to call clear_IF(regs) 395 * in the opposite case. 396 * After the command sequence CLI PUSHF STI POPF you should 397 * end up with interrupts disabled, but you ended up with 398 * interrupts enabled. 399 * ( I was testing my own changes, but the only bug I 400 * could find was in a function I had not changed. ) 401 * [KD] 402 */ 403 404 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) 405 { 406 set_flags(VEFLAGS, flags, current->thread.v86mask); 407 set_flags(regs->pt.flags, flags, SAFE_MASK); 408 if (flags & X86_EFLAGS_IF) 409 set_IF(regs); 410 else 411 clear_IF(regs); 412 } 413 414 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) 415 { 416 set_flags(VFLAGS, flags, current->thread.v86mask); 417 set_flags(regs->pt.flags, flags, SAFE_MASK); 418 if (flags & X86_EFLAGS_IF) 419 set_IF(regs); 420 else 421 clear_IF(regs); 422 } 423 424 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) 425 { 426 unsigned long flags = regs->pt.flags & RETURN_MASK; 427 428 if (VEFLAGS & X86_EFLAGS_VIF) 429 flags |= X86_EFLAGS_IF; 430 flags |= X86_EFLAGS_IOPL; 431 return flags | (VEFLAGS & current->thread.v86mask); 432 } 433 434 static inline int is_revectored(int nr, struct revectored_struct *bitmap) 435 { 436 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" 437 :"=r" (nr) 438 :"m" (*bitmap), "r" (nr)); 439 return nr; 440 } 441 442 #define val_byte(val, n) (((__u8 *)&val)[n]) 443 444 #define pushb(base, ptr, val, err_label) \ 445 do { \ 446 __u8 __val = val; \ 447 ptr--; \ 448 if (put_user(__val, base + ptr) < 0) \ 449 goto err_label; \ 450 } while (0) 451 452 #define pushw(base, ptr, val, err_label) \ 453 do { \ 454 __u16 __val = val; \ 455 ptr--; \ 456 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 457 goto err_label; \ 458 ptr--; \ 459 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 460 goto err_label; \ 461 } while (0) 462 463 #define pushl(base, ptr, val, err_label) \ 464 do { \ 465 __u32 __val = val; \ 466 ptr--; \ 467 if (put_user(val_byte(__val, 3), base + ptr) < 0) \ 468 goto err_label; \ 469 ptr--; \ 470 if (put_user(val_byte(__val, 2), base + ptr) < 0) \ 471 goto err_label; \ 472 ptr--; \ 473 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 474 goto err_label; \ 475 ptr--; \ 476 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 477 goto err_label; \ 478 } while (0) 479 480 #define popb(base, ptr, err_label) \ 481 ({ \ 482 __u8 __res; \ 483 if (get_user(__res, base + ptr) < 0) \ 484 goto err_label; \ 485 ptr++; \ 486 __res; \ 487 }) 488 489 #define popw(base, ptr, err_label) \ 490 ({ \ 491 __u16 __res; \ 492 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 493 goto err_label; \ 494 ptr++; \ 495 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 496 goto err_label; \ 497 ptr++; \ 498 __res; \ 499 }) 500 501 #define popl(base, ptr, err_label) \ 502 ({ \ 503 __u32 __res; \ 504 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 505 goto err_label; \ 506 ptr++; \ 507 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 508 goto err_label; \ 509 ptr++; \ 510 if (get_user(val_byte(__res, 2), base + ptr) < 0) \ 511 goto err_label; \ 512 ptr++; \ 513 if (get_user(val_byte(__res, 3), base + ptr) < 0) \ 514 goto err_label; \ 515 ptr++; \ 516 __res; \ 517 }) 518 519 /* There are so many possible reasons for this function to return 520 * VM86_INTx, so adding another doesn't bother me. We can expect 521 * userspace programs to be able to handle it. (Getting a problem 522 * in userspace is always better than an Oops anyway.) [KD] 523 */ 524 static void do_int(struct kernel_vm86_regs *regs, int i, 525 unsigned char __user *ssp, unsigned short sp) 526 { 527 unsigned long __user *intr_ptr; 528 unsigned long segoffs; 529 530 if (regs->pt.cs == BIOSSEG) 531 goto cannot_handle; 532 if (is_revectored(i, &KVM86->int_revectored)) 533 goto cannot_handle; 534 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) 535 goto cannot_handle; 536 intr_ptr = (unsigned long __user *) (i << 2); 537 if (get_user(segoffs, intr_ptr)) 538 goto cannot_handle; 539 if ((segoffs >> 16) == BIOSSEG) 540 goto cannot_handle; 541 pushw(ssp, sp, get_vflags(regs), cannot_handle); 542 pushw(ssp, sp, regs->pt.cs, cannot_handle); 543 pushw(ssp, sp, IP(regs), cannot_handle); 544 regs->pt.cs = segoffs >> 16; 545 SP(regs) -= 6; 546 IP(regs) = segoffs & 0xffff; 547 clear_TF(regs); 548 clear_IF(regs); 549 clear_AC(regs); 550 return; 551 552 cannot_handle: 553 return_to_32bit(regs, VM86_INTx + (i << 8)); 554 } 555 556 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 557 { 558 if (VMPI.is_vm86pus) { 559 if ((trapno == 3) || (trapno == 1)) { 560 KVM86->regs32->ax = VM86_TRAP + (trapno << 8); 561 /* setting this flag forces the code in entry_32.S to 562 call save_v86_state() and change the stack pointer 563 to KVM86->regs32 */ 564 set_thread_flag(TIF_IRET); 565 return 0; 566 } 567 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 568 return 0; 569 } 570 if (trapno != 1) 571 return 1; /* we let this handle by the calling routine */ 572 current->thread.trap_nr = trapno; 573 current->thread.error_code = error_code; 574 force_sig(SIGTRAP, current); 575 return 0; 576 } 577 578 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) 579 { 580 unsigned char opcode; 581 unsigned char __user *csp; 582 unsigned char __user *ssp; 583 unsigned short ip, sp, orig_flags; 584 int data32, pref_done; 585 586 #define CHECK_IF_IN_TRAP \ 587 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ 588 newflags |= X86_EFLAGS_TF 589 #define VM86_FAULT_RETURN do { \ 590 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ 591 return_to_32bit(regs, VM86_PICRETURN); \ 592 if (orig_flags & X86_EFLAGS_TF) \ 593 handle_vm86_trap(regs, 0, 1); \ 594 return; } while (0) 595 596 orig_flags = *(unsigned short *)®s->pt.flags; 597 598 csp = (unsigned char __user *) (regs->pt.cs << 4); 599 ssp = (unsigned char __user *) (regs->pt.ss << 4); 600 sp = SP(regs); 601 ip = IP(regs); 602 603 data32 = 0; 604 pref_done = 0; 605 do { 606 switch (opcode = popb(csp, ip, simulate_sigsegv)) { 607 case 0x66: /* 32-bit data */ data32 = 1; break; 608 case 0x67: /* 32-bit address */ break; 609 case 0x2e: /* CS */ break; 610 case 0x3e: /* DS */ break; 611 case 0x26: /* ES */ break; 612 case 0x36: /* SS */ break; 613 case 0x65: /* GS */ break; 614 case 0x64: /* FS */ break; 615 case 0xf2: /* repnz */ break; 616 case 0xf3: /* rep */ break; 617 default: pref_done = 1; 618 } 619 } while (!pref_done); 620 621 switch (opcode) { 622 623 /* pushf */ 624 case 0x9c: 625 if (data32) { 626 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); 627 SP(regs) -= 4; 628 } else { 629 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); 630 SP(regs) -= 2; 631 } 632 IP(regs) = ip; 633 VM86_FAULT_RETURN; 634 635 /* popf */ 636 case 0x9d: 637 { 638 unsigned long newflags; 639 if (data32) { 640 newflags = popl(ssp, sp, simulate_sigsegv); 641 SP(regs) += 4; 642 } else { 643 newflags = popw(ssp, sp, simulate_sigsegv); 644 SP(regs) += 2; 645 } 646 IP(regs) = ip; 647 CHECK_IF_IN_TRAP; 648 if (data32) 649 set_vflags_long(newflags, regs); 650 else 651 set_vflags_short(newflags, regs); 652 653 VM86_FAULT_RETURN; 654 } 655 656 /* int xx */ 657 case 0xcd: { 658 int intno = popb(csp, ip, simulate_sigsegv); 659 IP(regs) = ip; 660 if (VMPI.vm86dbg_active) { 661 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) 662 return_to_32bit(regs, VM86_INTx + (intno << 8)); 663 } 664 do_int(regs, intno, ssp, sp); 665 return; 666 } 667 668 /* iret */ 669 case 0xcf: 670 { 671 unsigned long newip; 672 unsigned long newcs; 673 unsigned long newflags; 674 if (data32) { 675 newip = popl(ssp, sp, simulate_sigsegv); 676 newcs = popl(ssp, sp, simulate_sigsegv); 677 newflags = popl(ssp, sp, simulate_sigsegv); 678 SP(regs) += 12; 679 } else { 680 newip = popw(ssp, sp, simulate_sigsegv); 681 newcs = popw(ssp, sp, simulate_sigsegv); 682 newflags = popw(ssp, sp, simulate_sigsegv); 683 SP(regs) += 6; 684 } 685 IP(regs) = newip; 686 regs->pt.cs = newcs; 687 CHECK_IF_IN_TRAP; 688 if (data32) { 689 set_vflags_long(newflags, regs); 690 } else { 691 set_vflags_short(newflags, regs); 692 } 693 VM86_FAULT_RETURN; 694 } 695 696 /* cli */ 697 case 0xfa: 698 IP(regs) = ip; 699 clear_IF(regs); 700 VM86_FAULT_RETURN; 701 702 /* sti */ 703 /* 704 * Damn. This is incorrect: the 'sti' instruction should actually 705 * enable interrupts after the /next/ instruction. Not good. 706 * 707 * Probably needs some horsing around with the TF flag. Aiee.. 708 */ 709 case 0xfb: 710 IP(regs) = ip; 711 set_IF(regs); 712 VM86_FAULT_RETURN; 713 714 default: 715 return_to_32bit(regs, VM86_UNKNOWN); 716 } 717 718 return; 719 720 simulate_sigsegv: 721 /* FIXME: After a long discussion with Stas we finally 722 * agreed, that this is wrong. Here we should 723 * really send a SIGSEGV to the user program. 724 * But how do we create the correct context? We 725 * are inside a general protection fault handler 726 * and has just returned from a page fault handler. 727 * The correct context for the signal handler 728 * should be a mixture of the two, but how do we 729 * get the information? [KD] 730 */ 731 return_to_32bit(regs, VM86_UNKNOWN); 732 } 733 734 /* ---------------- vm86 special IRQ passing stuff ----------------- */ 735 736 #define VM86_IRQNAME "vm86irq" 737 738 static struct vm86_irqs { 739 struct task_struct *tsk; 740 int sig; 741 } vm86_irqs[16]; 742 743 static DEFINE_SPINLOCK(irqbits_lock); 744 static int irqbits; 745 746 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ 747 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ 748 | (1 << SIGUNUSED)) 749 750 static irqreturn_t irq_handler(int intno, void *dev_id) 751 { 752 int irq_bit; 753 unsigned long flags; 754 755 spin_lock_irqsave(&irqbits_lock, flags); 756 irq_bit = 1 << intno; 757 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) 758 goto out; 759 irqbits |= irq_bit; 760 if (vm86_irqs[intno].sig) 761 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); 762 /* 763 * IRQ will be re-enabled when user asks for the irq (whether 764 * polling or as a result of the signal) 765 */ 766 disable_irq_nosync(intno); 767 spin_unlock_irqrestore(&irqbits_lock, flags); 768 return IRQ_HANDLED; 769 770 out: 771 spin_unlock_irqrestore(&irqbits_lock, flags); 772 return IRQ_NONE; 773 } 774 775 static inline void free_vm86_irq(int irqnumber) 776 { 777 unsigned long flags; 778 779 free_irq(irqnumber, NULL); 780 vm86_irqs[irqnumber].tsk = NULL; 781 782 spin_lock_irqsave(&irqbits_lock, flags); 783 irqbits &= ~(1 << irqnumber); 784 spin_unlock_irqrestore(&irqbits_lock, flags); 785 } 786 787 void release_vm86_irqs(struct task_struct *task) 788 { 789 int i; 790 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) 791 if (vm86_irqs[i].tsk == task) 792 free_vm86_irq(i); 793 } 794 795 static inline int get_and_reset_irq(int irqnumber) 796 { 797 int bit; 798 unsigned long flags; 799 int ret = 0; 800 801 if (invalid_vm86_irq(irqnumber)) return 0; 802 if (vm86_irqs[irqnumber].tsk != current) return 0; 803 spin_lock_irqsave(&irqbits_lock, flags); 804 bit = irqbits & (1 << irqnumber); 805 irqbits &= ~bit; 806 if (bit) { 807 enable_irq(irqnumber); 808 ret = 1; 809 } 810 811 spin_unlock_irqrestore(&irqbits_lock, flags); 812 return ret; 813 } 814 815 816 static int do_vm86_irq_handling(int subfunction, int irqnumber) 817 { 818 int ret; 819 switch (subfunction) { 820 case VM86_GET_AND_RESET_IRQ: { 821 return get_and_reset_irq(irqnumber); 822 } 823 case VM86_GET_IRQ_BITS: { 824 return irqbits; 825 } 826 case VM86_REQUEST_IRQ: { 827 int sig = irqnumber >> 8; 828 int irq = irqnumber & 255; 829 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 830 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; 831 if (invalid_vm86_irq(irq)) return -EPERM; 832 if (vm86_irqs[irq].tsk) return -EPERM; 833 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); 834 if (ret) return ret; 835 vm86_irqs[irq].sig = sig; 836 vm86_irqs[irq].tsk = current; 837 return irq; 838 } 839 case VM86_FREE_IRQ: { 840 if (invalid_vm86_irq(irqnumber)) return -EPERM; 841 if (!vm86_irqs[irqnumber].tsk) return 0; 842 if (vm86_irqs[irqnumber].tsk != current) return -EPERM; 843 free_vm86_irq(irqnumber); 844 return 0; 845 } 846 } 847 return -EINVAL; 848 } 849 850