1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21965aae3SH. Peter Anvin #ifndef _ASM_X86_VM86_H 31965aae3SH. Peter Anvin #define _ASM_X86_VM86_H 4bb898558SAl Viro 5bb898558SAl Viro #include <asm/ptrace.h> 6af170c50SDavid Howells #include <uapi/asm/vm86.h> 7bb898558SAl Viro 8bb898558SAl Viro /* 9bb898558SAl Viro * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 10bb898558SAl Viro * mode - the main change is that the old segment descriptors aren't 11bb898558SAl Viro * useful any more and are forced to be zero by the kernel (and the 12bb898558SAl Viro * hardware when a trap occurs), and the real segment descriptors are 13bb898558SAl Viro * at the end of the structure. Look at ptrace.h to see the "normal" 14bb898558SAl Viro * setup. For user space layout see 'struct vm86_regs' above. 15bb898558SAl Viro */ 16bb898558SAl Viro 17bb898558SAl Viro struct kernel_vm86_regs { 18bb898558SAl Viro /* 19bb898558SAl Viro * normal regs, with special meaning for the segment descriptors.. 20bb898558SAl Viro */ 21bb898558SAl Viro struct pt_regs pt; 22bb898558SAl Viro /* 23bb898558SAl Viro * these are specific to v86 mode: 24bb898558SAl Viro */ 25bb898558SAl Viro unsigned short es, __esh; 26bb898558SAl Viro unsigned short ds, __dsh; 27bb898558SAl Viro unsigned short fs, __fsh; 28bb898558SAl Viro unsigned short gs, __gsh; 29bb898558SAl Viro }; 30bb898558SAl Viro 319fda6a06SBrian Gerst struct vm86 { 3213426356SBrian Gerst struct vm86plus_struct __user *user_vm86; 335ed92a8aSBrian Gerst struct pt_regs regs32; 34decd275eSBrian Gerst unsigned long veflags; 35decd275eSBrian Gerst unsigned long veflags_mask; 369fda6a06SBrian Gerst unsigned long saved_sp0; 37d4ce0f26SBrian Gerst 38d4ce0f26SBrian Gerst unsigned long flags; 39d4ce0f26SBrian Gerst unsigned long screen_bitmap; 40d4ce0f26SBrian Gerst unsigned long cpu_type; 41d4ce0f26SBrian Gerst struct revectored_struct int_revectored; 42d4ce0f26SBrian Gerst struct revectored_struct int21_revectored; 43d4ce0f26SBrian Gerst struct vm86plus_info_struct vm86plus; 449fda6a06SBrian Gerst }; 459fda6a06SBrian Gerst 46bb898558SAl Viro #ifdef CONFIG_VM86 47bb898558SAl Viro 48bb898558SAl Viro void handle_vm86_fault(struct kernel_vm86_regs *, long); 49bb898558SAl Viro int handle_vm86_trap(struct kernel_vm86_regs *, long, int); 505ed92a8aSBrian Gerst void save_v86_state(struct kernel_vm86_regs *, int); 51bb898558SAl Viro 52bb898558SAl Viro struct task_struct; 53bb898558SAl Viro 549fda6a06SBrian Gerst #define free_vm86(t) do { \ 559fda6a06SBrian Gerst struct thread_struct *__t = (t); \ 569fda6a06SBrian Gerst if (__t->vm86 != NULL) { \ 579fda6a06SBrian Gerst kfree(__t->vm86); \ 589fda6a06SBrian Gerst __t->vm86 = NULL; \ 599fda6a06SBrian Gerst } \ 609fda6a06SBrian Gerst } while (0) 619fda6a06SBrian Gerst 62af3e565aSIngo Molnar /* 63af3e565aSIngo Molnar * Support for VM86 programs to request interrupts for 64af3e565aSIngo Molnar * real mode hardware drivers: 65af3e565aSIngo Molnar */ 66af3e565aSIngo Molnar #define FIRST_VM86_IRQ 3 67af3e565aSIngo Molnar #define LAST_VM86_IRQ 15 68af3e565aSIngo Molnar 69af3e565aSIngo Molnar static inline int invalid_vm86_irq(int irq) 70af3e565aSIngo Molnar { 71af3e565aSIngo Molnar return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; 72af3e565aSIngo Molnar } 73af3e565aSIngo Molnar 74af3e565aSIngo Molnar void release_vm86_irqs(struct task_struct *); 75af3e565aSIngo Molnar 76bb898558SAl Viro #else 77bb898558SAl Viro 78bb898558SAl Viro #define handle_vm86_fault(a, b) 79bb898558SAl Viro #define release_vm86_irqs(a) 80bb898558SAl Viro 81bb898558SAl Viro static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) 82bb898558SAl Viro { 83bb898558SAl Viro return 0; 84bb898558SAl Viro } 85bb898558SAl Viro 865ed92a8aSBrian Gerst static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { } 875ed92a8aSBrian Gerst 889fda6a06SBrian Gerst #define free_vm86(t) do { } while(0) 899fda6a06SBrian Gerst 90bb898558SAl Viro #endif /* CONFIG_VM86 */ 91bb898558SAl Viro 921965aae3SH. Peter Anvin #endif /* _ASM_X86_VM86_H */ 93