1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 6 * stack - Manfred Spraul <manfred@colorfullife.com> 7 * 8 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 9 * them correctly. Now the emulation will be in a 10 * consistent state after stackfaults - Kasper Dupont 11 * <kasperd@daimi.au.dk> 12 * 13 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont 14 * <kasperd@daimi.au.dk> 15 * 16 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault 17 * caused by Kasper Dupont's changes - Stas Sergeev 18 * 19 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. 20 * Kasper Dupont <kasperd@daimi.au.dk> 21 * 22 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. 23 * Kasper Dupont <kasperd@daimi.au.dk> 24 * 25 * 9 apr 2002 - Changed stack access macros to jump to a label 26 * instead of returning to userspace. This simplifies 27 * do_int, and is needed by handle_vm6_fault. Kasper 28 * Dupont <kasperd@daimi.au.dk> 29 * 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include <linux/capability.h> 35 #include <linux/errno.h> 36 #include <linux/interrupt.h> 37 #include <linux/syscalls.h> 38 #include <linux/sched.h> 39 #include <linux/sched/task_stack.h> 40 #include <linux/kernel.h> 41 #include <linux/signal.h> 42 #include <linux/string.h> 43 #include <linux/mm.h> 44 #include <linux/smp.h> 45 #include <linux/highmem.h> 46 #include <linux/ptrace.h> 47 #include <linux/audit.h> 48 #include <linux/stddef.h> 49 #include <linux/slab.h> 50 #include <linux/security.h> 51 52 #include <linux/uaccess.h> 53 #include <asm/io.h> 54 #include <asm/tlbflush.h> 55 #include <asm/irq.h> 56 #include <asm/traps.h> 57 #include <asm/vm86.h> 58 #include <asm/switch_to.h> 59 60 /* 61 * Known problems: 62 * 63 * Interrupt handling is not guaranteed: 64 * - a real x86 will disable all interrupts for one instruction 65 * after a "mov ss,xx" to make stack handling atomic even without 66 * the 'lss' instruction. We can't guarantee this in v86 mode, 67 * as the next instruction might result in a page fault or similar. 68 * - a real x86 will have interrupts disabled for one instruction 69 * past the 'sti' that enables them. We don't bother with all the 70 * details yet. 71 * 72 * Let's hope these problems do not actually matter for anything. 73 */ 74 75 76 /* 77 * 8- and 16-bit register defines.. 78 */ 79 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) 80 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) 81 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) 82 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) 83 84 /* 85 * virtual flags (16 and 32-bit versions) 86 */ 87 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) 88 #define VEFLAGS (current->thread.vm86->veflags) 89 90 #define set_flags(X, new, mask) \ 91 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 92 93 #define SAFE_MASK (0xDD5) 94 #define RETURN_MASK (0xDFF) 95 96 void save_v86_state(struct kernel_vm86_regs *regs, int retval) 97 { 98 struct task_struct *tsk = current; 99 struct vm86plus_struct __user *user; 100 struct vm86 *vm86 = current->thread.vm86; 101 102 /* 103 * This gets called from entry.S with interrupts disabled, but 104 * from process context. Enable interrupts here, before trying 105 * to access user space. 106 */ 107 local_irq_enable(); 108 109 if (!vm86 || !vm86->user_vm86) { 110 pr_alert("no user_vm86: BAD\n"); 111 do_exit(SIGSEGV); 112 } 113 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); 114 user = vm86->user_vm86; 115 116 if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ? 117 sizeof(struct vm86plus_struct) : 118 sizeof(struct vm86_struct))) 119 goto Efault; 120 121 unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end); 122 unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end); 123 unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end); 124 unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end); 125 unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end); 126 unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end); 127 unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end); 128 unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end); 129 unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end); 130 unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end); 131 unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end); 132 unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end); 133 unsafe_put_user(regs->es, &user->regs.es, Efault_end); 134 unsafe_put_user(regs->ds, &user->regs.ds, Efault_end); 135 unsafe_put_user(regs->fs, &user->regs.fs, Efault_end); 136 unsafe_put_user(regs->gs, &user->regs.gs, Efault_end); 137 138 /* 139 * Don't write screen_bitmap in case some user had a value there 140 * and expected it to remain unchanged. 141 */ 142 143 user_access_end(); 144 145 exit_vm86: 146 preempt_disable(); 147 tsk->thread.sp0 = vm86->saved_sp0; 148 tsk->thread.sysenter_cs = __KERNEL_CS; 149 update_task_stack(tsk); 150 refresh_sysenter_cs(&tsk->thread); 151 vm86->saved_sp0 = 0; 152 preempt_enable(); 153 154 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); 155 156 lazy_load_gs(vm86->regs32.gs); 157 158 regs->pt.ax = retval; 159 return; 160 161 Efault_end: 162 user_access_end(); 163 Efault: 164 pr_alert("could not access userspace vm86 info\n"); 165 force_fatal_sig(SIGSEGV); 166 goto exit_vm86; 167 } 168 169 static int do_vm86_irq_handling(int subfunction, int irqnumber); 170 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); 171 172 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) 173 { 174 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); 175 } 176 177 178 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) 179 { 180 switch (cmd) { 181 case VM86_REQUEST_IRQ: 182 case VM86_FREE_IRQ: 183 case VM86_GET_IRQ_BITS: 184 case VM86_GET_AND_RESET_IRQ: 185 return do_vm86_irq_handling(cmd, (int)arg); 186 case VM86_PLUS_INSTALL_CHECK: 187 /* 188 * NOTE: on old vm86 stuff this will return the error 189 * from access_ok(), because the subfunction is 190 * interpreted as (invalid) address to vm86_struct. 191 * So the installation check works. 192 */ 193 return 0; 194 } 195 196 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 197 return do_sys_vm86((struct vm86plus_struct __user *) arg, true); 198 } 199 200 201 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) 202 { 203 struct task_struct *tsk = current; 204 struct vm86 *vm86 = tsk->thread.vm86; 205 struct kernel_vm86_regs vm86regs; 206 struct pt_regs *regs = current_pt_regs(); 207 unsigned long err = 0; 208 struct vm86_struct v; 209 210 err = security_mmap_addr(0); 211 if (err) { 212 /* 213 * vm86 cannot virtualize the address space, so vm86 users 214 * need to manage the low 1MB themselves using mmap. Given 215 * that BIOS places important data in the first page, vm86 216 * is essentially useless if mmap_min_addr != 0. DOSEMU, 217 * for example, won't even bother trying to use vm86 if it 218 * can't map a page at virtual address 0. 219 * 220 * To reduce the available kernel attack surface, simply 221 * disallow vm86(old) for users who cannot mmap at va 0. 222 * 223 * The implementation of security_mmap_addr will allow 224 * suitably privileged users to map va 0 even if 225 * vm.mmap_min_addr is set above 0, and we want this 226 * behavior for vm86 as well, as it ensures that legacy 227 * tools like vbetool will not fail just because of 228 * vm.mmap_min_addr. 229 */ 230 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", 231 current->comm, task_pid_nr(current), 232 from_kuid_munged(&init_user_ns, current_uid())); 233 return -EPERM; 234 } 235 236 if (!vm86) { 237 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) 238 return -ENOMEM; 239 tsk->thread.vm86 = vm86; 240 } 241 if (vm86->saved_sp0) 242 return -EPERM; 243 244 if (copy_from_user(&v, user_vm86, 245 offsetof(struct vm86_struct, int_revectored))) 246 return -EFAULT; 247 248 249 /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */ 250 if (v.flags & VM86_SCREEN_BITMAP) { 251 char comm[TASK_COMM_LEN]; 252 253 pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current)); 254 return -EINVAL; 255 } 256 257 memset(&vm86regs, 0, sizeof(vm86regs)); 258 259 vm86regs.pt.bx = v.regs.ebx; 260 vm86regs.pt.cx = v.regs.ecx; 261 vm86regs.pt.dx = v.regs.edx; 262 vm86regs.pt.si = v.regs.esi; 263 vm86regs.pt.di = v.regs.edi; 264 vm86regs.pt.bp = v.regs.ebp; 265 vm86regs.pt.ax = v.regs.eax; 266 vm86regs.pt.ip = v.regs.eip; 267 vm86regs.pt.cs = v.regs.cs; 268 vm86regs.pt.flags = v.regs.eflags; 269 vm86regs.pt.sp = v.regs.esp; 270 vm86regs.pt.ss = v.regs.ss; 271 vm86regs.es = v.regs.es; 272 vm86regs.ds = v.regs.ds; 273 vm86regs.fs = v.regs.fs; 274 vm86regs.gs = v.regs.gs; 275 276 vm86->flags = v.flags; 277 vm86->cpu_type = v.cpu_type; 278 279 if (copy_from_user(&vm86->int_revectored, 280 &user_vm86->int_revectored, 281 sizeof(struct revectored_struct))) 282 return -EFAULT; 283 if (copy_from_user(&vm86->int21_revectored, 284 &user_vm86->int21_revectored, 285 sizeof(struct revectored_struct))) 286 return -EFAULT; 287 if (plus) { 288 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, 289 sizeof(struct vm86plus_info_struct))) 290 return -EFAULT; 291 vm86->vm86plus.is_vm86pus = 1; 292 } else 293 memset(&vm86->vm86plus, 0, 294 sizeof(struct vm86plus_info_struct)); 295 296 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); 297 vm86->user_vm86 = user_vm86; 298 299 /* 300 * The flags register is also special: we cannot trust that the user 301 * has set it up safely, so this makes sure interrupt etc flags are 302 * inherited from protected mode. 303 */ 304 VEFLAGS = vm86regs.pt.flags; 305 vm86regs.pt.flags &= SAFE_MASK; 306 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; 307 vm86regs.pt.flags |= X86_VM_MASK; 308 309 vm86regs.pt.orig_ax = regs->orig_ax; 310 311 switch (vm86->cpu_type) { 312 case CPU_286: 313 vm86->veflags_mask = 0; 314 break; 315 case CPU_386: 316 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; 317 break; 318 case CPU_486: 319 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 320 break; 321 default: 322 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; 323 break; 324 } 325 326 /* 327 * Save old state 328 */ 329 vm86->saved_sp0 = tsk->thread.sp0; 330 lazy_save_gs(vm86->regs32.gs); 331 332 /* make room for real-mode segments */ 333 preempt_disable(); 334 tsk->thread.sp0 += 16; 335 336 if (boot_cpu_has(X86_FEATURE_SEP)) { 337 tsk->thread.sysenter_cs = 0; 338 refresh_sysenter_cs(&tsk->thread); 339 } 340 341 update_task_stack(tsk); 342 preempt_enable(); 343 344 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); 345 return regs->ax; 346 } 347 348 static inline void set_IF(struct kernel_vm86_regs *regs) 349 { 350 VEFLAGS |= X86_EFLAGS_VIF; 351 } 352 353 static inline void clear_IF(struct kernel_vm86_regs *regs) 354 { 355 VEFLAGS &= ~X86_EFLAGS_VIF; 356 } 357 358 static inline void clear_TF(struct kernel_vm86_regs *regs) 359 { 360 regs->pt.flags &= ~X86_EFLAGS_TF; 361 } 362 363 static inline void clear_AC(struct kernel_vm86_regs *regs) 364 { 365 regs->pt.flags &= ~X86_EFLAGS_AC; 366 } 367 368 /* 369 * It is correct to call set_IF(regs) from the set_vflags_* 370 * functions. However someone forgot to call clear_IF(regs) 371 * in the opposite case. 372 * After the command sequence CLI PUSHF STI POPF you should 373 * end up with interrupts disabled, but you ended up with 374 * interrupts enabled. 375 * ( I was testing my own changes, but the only bug I 376 * could find was in a function I had not changed. ) 377 * [KD] 378 */ 379 380 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) 381 { 382 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); 383 set_flags(regs->pt.flags, flags, SAFE_MASK); 384 if (flags & X86_EFLAGS_IF) 385 set_IF(regs); 386 else 387 clear_IF(regs); 388 } 389 390 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) 391 { 392 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); 393 set_flags(regs->pt.flags, flags, SAFE_MASK); 394 if (flags & X86_EFLAGS_IF) 395 set_IF(regs); 396 else 397 clear_IF(regs); 398 } 399 400 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) 401 { 402 unsigned long flags = regs->pt.flags & RETURN_MASK; 403 404 if (VEFLAGS & X86_EFLAGS_VIF) 405 flags |= X86_EFLAGS_IF; 406 flags |= X86_EFLAGS_IOPL; 407 return flags | (VEFLAGS & current->thread.vm86->veflags_mask); 408 } 409 410 static inline int is_revectored(int nr, struct revectored_struct *bitmap) 411 { 412 return test_bit(nr, bitmap->__map); 413 } 414 415 #define val_byte(val, n) (((__u8 *)&val)[n]) 416 417 #define pushb(base, ptr, val, err_label) \ 418 do { \ 419 __u8 __val = val; \ 420 ptr--; \ 421 if (put_user(__val, base + ptr) < 0) \ 422 goto err_label; \ 423 } while (0) 424 425 #define pushw(base, ptr, val, err_label) \ 426 do { \ 427 __u16 __val = val; \ 428 ptr--; \ 429 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 430 goto err_label; \ 431 ptr--; \ 432 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 433 goto err_label; \ 434 } while (0) 435 436 #define pushl(base, ptr, val, err_label) \ 437 do { \ 438 __u32 __val = val; \ 439 ptr--; \ 440 if (put_user(val_byte(__val, 3), base + ptr) < 0) \ 441 goto err_label; \ 442 ptr--; \ 443 if (put_user(val_byte(__val, 2), base + ptr) < 0) \ 444 goto err_label; \ 445 ptr--; \ 446 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 447 goto err_label; \ 448 ptr--; \ 449 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 450 goto err_label; \ 451 } while (0) 452 453 #define popb(base, ptr, err_label) \ 454 ({ \ 455 __u8 __res; \ 456 if (get_user(__res, base + ptr) < 0) \ 457 goto err_label; \ 458 ptr++; \ 459 __res; \ 460 }) 461 462 #define popw(base, ptr, err_label) \ 463 ({ \ 464 __u16 __res; \ 465 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 466 goto err_label; \ 467 ptr++; \ 468 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 469 goto err_label; \ 470 ptr++; \ 471 __res; \ 472 }) 473 474 #define popl(base, ptr, err_label) \ 475 ({ \ 476 __u32 __res; \ 477 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 478 goto err_label; \ 479 ptr++; \ 480 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 481 goto err_label; \ 482 ptr++; \ 483 if (get_user(val_byte(__res, 2), base + ptr) < 0) \ 484 goto err_label; \ 485 ptr++; \ 486 if (get_user(val_byte(__res, 3), base + ptr) < 0) \ 487 goto err_label; \ 488 ptr++; \ 489 __res; \ 490 }) 491 492 /* There are so many possible reasons for this function to return 493 * VM86_INTx, so adding another doesn't bother me. We can expect 494 * userspace programs to be able to handle it. (Getting a problem 495 * in userspace is always better than an Oops anyway.) [KD] 496 */ 497 static void do_int(struct kernel_vm86_regs *regs, int i, 498 unsigned char __user *ssp, unsigned short sp) 499 { 500 unsigned long __user *intr_ptr; 501 unsigned long segoffs; 502 struct vm86 *vm86 = current->thread.vm86; 503 504 if (regs->pt.cs == BIOSSEG) 505 goto cannot_handle; 506 if (is_revectored(i, &vm86->int_revectored)) 507 goto cannot_handle; 508 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) 509 goto cannot_handle; 510 intr_ptr = (unsigned long __user *) (i << 2); 511 if (get_user(segoffs, intr_ptr)) 512 goto cannot_handle; 513 if ((segoffs >> 16) == BIOSSEG) 514 goto cannot_handle; 515 pushw(ssp, sp, get_vflags(regs), cannot_handle); 516 pushw(ssp, sp, regs->pt.cs, cannot_handle); 517 pushw(ssp, sp, IP(regs), cannot_handle); 518 regs->pt.cs = segoffs >> 16; 519 SP(regs) -= 6; 520 IP(regs) = segoffs & 0xffff; 521 clear_TF(regs); 522 clear_IF(regs); 523 clear_AC(regs); 524 return; 525 526 cannot_handle: 527 save_v86_state(regs, VM86_INTx + (i << 8)); 528 } 529 530 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) 531 { 532 struct vm86 *vm86 = current->thread.vm86; 533 534 if (vm86->vm86plus.is_vm86pus) { 535 if ((trapno == 3) || (trapno == 1)) { 536 save_v86_state(regs, VM86_TRAP + (trapno << 8)); 537 return 0; 538 } 539 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 540 return 0; 541 } 542 if (trapno != 1) 543 return 1; /* we let this handle by the calling routine */ 544 current->thread.trap_nr = trapno; 545 current->thread.error_code = error_code; 546 force_sig(SIGTRAP); 547 return 0; 548 } 549 550 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) 551 { 552 unsigned char opcode; 553 unsigned char __user *csp; 554 unsigned char __user *ssp; 555 unsigned short ip, sp, orig_flags; 556 int data32, pref_done; 557 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus; 558 559 #define CHECK_IF_IN_TRAP \ 560 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ 561 newflags |= X86_EFLAGS_TF 562 563 orig_flags = *(unsigned short *)®s->pt.flags; 564 565 csp = (unsigned char __user *) (regs->pt.cs << 4); 566 ssp = (unsigned char __user *) (regs->pt.ss << 4); 567 sp = SP(regs); 568 ip = IP(regs); 569 570 data32 = 0; 571 pref_done = 0; 572 do { 573 switch (opcode = popb(csp, ip, simulate_sigsegv)) { 574 case 0x66: /* 32-bit data */ data32 = 1; break; 575 case 0x67: /* 32-bit address */ break; 576 case 0x2e: /* CS */ break; 577 case 0x3e: /* DS */ break; 578 case 0x26: /* ES */ break; 579 case 0x36: /* SS */ break; 580 case 0x65: /* GS */ break; 581 case 0x64: /* FS */ break; 582 case 0xf2: /* repnz */ break; 583 case 0xf3: /* rep */ break; 584 default: pref_done = 1; 585 } 586 } while (!pref_done); 587 588 switch (opcode) { 589 590 /* pushf */ 591 case 0x9c: 592 if (data32) { 593 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); 594 SP(regs) -= 4; 595 } else { 596 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); 597 SP(regs) -= 2; 598 } 599 IP(regs) = ip; 600 goto vm86_fault_return; 601 602 /* popf */ 603 case 0x9d: 604 { 605 unsigned long newflags; 606 if (data32) { 607 newflags = popl(ssp, sp, simulate_sigsegv); 608 SP(regs) += 4; 609 } else { 610 newflags = popw(ssp, sp, simulate_sigsegv); 611 SP(regs) += 2; 612 } 613 IP(regs) = ip; 614 CHECK_IF_IN_TRAP; 615 if (data32) 616 set_vflags_long(newflags, regs); 617 else 618 set_vflags_short(newflags, regs); 619 620 goto check_vip; 621 } 622 623 /* int xx */ 624 case 0xcd: { 625 int intno = popb(csp, ip, simulate_sigsegv); 626 IP(regs) = ip; 627 if (vmpi->vm86dbg_active) { 628 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { 629 save_v86_state(regs, VM86_INTx + (intno << 8)); 630 return; 631 } 632 } 633 do_int(regs, intno, ssp, sp); 634 return; 635 } 636 637 /* iret */ 638 case 0xcf: 639 { 640 unsigned long newip; 641 unsigned long newcs; 642 unsigned long newflags; 643 if (data32) { 644 newip = popl(ssp, sp, simulate_sigsegv); 645 newcs = popl(ssp, sp, simulate_sigsegv); 646 newflags = popl(ssp, sp, simulate_sigsegv); 647 SP(regs) += 12; 648 } else { 649 newip = popw(ssp, sp, simulate_sigsegv); 650 newcs = popw(ssp, sp, simulate_sigsegv); 651 newflags = popw(ssp, sp, simulate_sigsegv); 652 SP(regs) += 6; 653 } 654 IP(regs) = newip; 655 regs->pt.cs = newcs; 656 CHECK_IF_IN_TRAP; 657 if (data32) { 658 set_vflags_long(newflags, regs); 659 } else { 660 set_vflags_short(newflags, regs); 661 } 662 goto check_vip; 663 } 664 665 /* cli */ 666 case 0xfa: 667 IP(regs) = ip; 668 clear_IF(regs); 669 goto vm86_fault_return; 670 671 /* sti */ 672 /* 673 * Damn. This is incorrect: the 'sti' instruction should actually 674 * enable interrupts after the /next/ instruction. Not good. 675 * 676 * Probably needs some horsing around with the TF flag. Aiee.. 677 */ 678 case 0xfb: 679 IP(regs) = ip; 680 set_IF(regs); 681 goto check_vip; 682 683 default: 684 save_v86_state(regs, VM86_UNKNOWN); 685 } 686 687 return; 688 689 check_vip: 690 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == 691 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { 692 save_v86_state(regs, VM86_STI); 693 return; 694 } 695 696 vm86_fault_return: 697 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { 698 save_v86_state(regs, VM86_PICRETURN); 699 return; 700 } 701 if (orig_flags & X86_EFLAGS_TF) 702 handle_vm86_trap(regs, 0, X86_TRAP_DB); 703 return; 704 705 simulate_sigsegv: 706 /* FIXME: After a long discussion with Stas we finally 707 * agreed, that this is wrong. Here we should 708 * really send a SIGSEGV to the user program. 709 * But how do we create the correct context? We 710 * are inside a general protection fault handler 711 * and has just returned from a page fault handler. 712 * The correct context for the signal handler 713 * should be a mixture of the two, but how do we 714 * get the information? [KD] 715 */ 716 save_v86_state(regs, VM86_UNKNOWN); 717 } 718 719 /* ---------------- vm86 special IRQ passing stuff ----------------- */ 720 721 #define VM86_IRQNAME "vm86irq" 722 723 static struct vm86_irqs { 724 struct task_struct *tsk; 725 int sig; 726 } vm86_irqs[16]; 727 728 static DEFINE_SPINLOCK(irqbits_lock); 729 static int irqbits; 730 731 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ 732 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ 733 | (1 << SIGUNUSED)) 734 735 static irqreturn_t irq_handler(int intno, void *dev_id) 736 { 737 int irq_bit; 738 unsigned long flags; 739 740 spin_lock_irqsave(&irqbits_lock, flags); 741 irq_bit = 1 << intno; 742 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) 743 goto out; 744 irqbits |= irq_bit; 745 if (vm86_irqs[intno].sig) 746 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); 747 /* 748 * IRQ will be re-enabled when user asks for the irq (whether 749 * polling or as a result of the signal) 750 */ 751 disable_irq_nosync(intno); 752 spin_unlock_irqrestore(&irqbits_lock, flags); 753 return IRQ_HANDLED; 754 755 out: 756 spin_unlock_irqrestore(&irqbits_lock, flags); 757 return IRQ_NONE; 758 } 759 760 static inline void free_vm86_irq(int irqnumber) 761 { 762 unsigned long flags; 763 764 free_irq(irqnumber, NULL); 765 vm86_irqs[irqnumber].tsk = NULL; 766 767 spin_lock_irqsave(&irqbits_lock, flags); 768 irqbits &= ~(1 << irqnumber); 769 spin_unlock_irqrestore(&irqbits_lock, flags); 770 } 771 772 void release_vm86_irqs(struct task_struct *task) 773 { 774 int i; 775 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) 776 if (vm86_irqs[i].tsk == task) 777 free_vm86_irq(i); 778 } 779 780 static inline int get_and_reset_irq(int irqnumber) 781 { 782 int bit; 783 unsigned long flags; 784 int ret = 0; 785 786 if (invalid_vm86_irq(irqnumber)) return 0; 787 if (vm86_irqs[irqnumber].tsk != current) return 0; 788 spin_lock_irqsave(&irqbits_lock, flags); 789 bit = irqbits & (1 << irqnumber); 790 irqbits &= ~bit; 791 if (bit) { 792 enable_irq(irqnumber); 793 ret = 1; 794 } 795 796 spin_unlock_irqrestore(&irqbits_lock, flags); 797 return ret; 798 } 799 800 801 static int do_vm86_irq_handling(int subfunction, int irqnumber) 802 { 803 int ret; 804 switch (subfunction) { 805 case VM86_GET_AND_RESET_IRQ: { 806 return get_and_reset_irq(irqnumber); 807 } 808 case VM86_GET_IRQ_BITS: { 809 return irqbits; 810 } 811 case VM86_REQUEST_IRQ: { 812 int sig = irqnumber >> 8; 813 int irq = irqnumber & 255; 814 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 815 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; 816 if (invalid_vm86_irq(irq)) return -EPERM; 817 if (vm86_irqs[irq].tsk) return -EPERM; 818 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); 819 if (ret) return ret; 820 vm86_irqs[irq].sig = sig; 821 vm86_irqs[irq].tsk = current; 822 return irq; 823 } 824 case VM86_FREE_IRQ: { 825 if (invalid_vm86_irq(irqnumber)) return -EPERM; 826 if (!vm86_irqs[irqnumber].tsk) return 0; 827 if (vm86_irqs[irqnumber].tsk != current) return -EPERM; 828 free_vm86_irq(irqnumber); 829 return 0; 830 } 831 } 832 return -EINVAL; 833 } 834 835