xref: /openbmc/qemu/target/i386/tcg/seg_helper.c (revision 744c72a8)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 /* return non zero if error */
32 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
33                                uint32_t *e2_ptr, int selector,
34                                uintptr_t retaddr)
35 {
36     SegmentCache *dt;
37     int index;
38     target_ulong ptr;
39 
40     if (selector & 0x4) {
41         dt = &env->ldt;
42     } else {
43         dt = &env->gdt;
44     }
45     index = selector & ~7;
46     if ((index + 7) > dt->limit) {
47         return -1;
48     }
49     ptr = dt->base + index;
50     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
51     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
52     return 0;
53 }
54 
55 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
56                                uint32_t *e2_ptr, int selector)
57 {
58     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
59 }
60 
61 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
62 {
63     unsigned int limit;
64 
65     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
66     if (e2 & DESC_G_MASK) {
67         limit = (limit << 12) | 0xfff;
68     }
69     return limit;
70 }
71 
72 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
73 {
74     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
75 }
76 
77 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
78                                          uint32_t e2)
79 {
80     sc->base = get_seg_base(e1, e2);
81     sc->limit = get_seg_limit(e1, e2);
82     sc->flags = e2;
83 }
84 
85 /* init the segment cache in vm86 mode. */
86 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
87 {
88     selector &= 0xffff;
89 
90     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
91                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
92                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
93 }
94 
95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
96                                        uint32_t *esp_ptr, int dpl,
97                                        uintptr_t retaddr)
98 {
99     X86CPU *cpu = env_archcpu(env);
100     int type, index, shift;
101 
102 #if 0
103     {
104         int i;
105         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
106         for (i = 0; i < env->tr.limit; i++) {
107             printf("%02x ", env->tr.base[i]);
108             if ((i & 7) == 7) {
109                 printf("\n");
110             }
111         }
112         printf("\n");
113     }
114 #endif
115 
116     if (!(env->tr.flags & DESC_P_MASK)) {
117         cpu_abort(CPU(cpu), "invalid tss");
118     }
119     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
120     if ((type & 7) != 1) {
121         cpu_abort(CPU(cpu), "invalid tss type");
122     }
123     shift = type >> 3;
124     index = (dpl * 4 + 2) << shift;
125     if (index + (4 << shift) - 1 > env->tr.limit) {
126         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
127     }
128     if (shift == 0) {
129         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
130         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
131     } else {
132         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
133         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
134     }
135 }
136 
137 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
138                          int cpl, uintptr_t retaddr)
139 {
140     uint32_t e1, e2;
141     int rpl, dpl;
142 
143     if ((selector & 0xfffc) != 0) {
144         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
145             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
146         }
147         if (!(e2 & DESC_S_MASK)) {
148             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
149         }
150         rpl = selector & 3;
151         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
152         if (seg_reg == R_CS) {
153             if (!(e2 & DESC_CS_MASK)) {
154                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
155             }
156             if (dpl != rpl) {
157                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
158             }
159         } else if (seg_reg == R_SS) {
160             /* SS must be writable data */
161             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
162                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
163             }
164             if (dpl != cpl || dpl != rpl) {
165                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
166             }
167         } else {
168             /* not readable code */
169             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
170                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
171             }
172             /* if data or non conforming code, checks the rights */
173             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
174                 if (dpl < cpl || dpl < rpl) {
175                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
176                 }
177             }
178         }
179         if (!(e2 & DESC_P_MASK)) {
180             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
181         }
182         cpu_x86_load_seg_cache(env, seg_reg, selector,
183                                get_seg_base(e1, e2),
184                                get_seg_limit(e1, e2),
185                                e2);
186     } else {
187         if (seg_reg == R_SS || seg_reg == R_CS) {
188             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
189         }
190     }
191 }
192 
193 #define SWITCH_TSS_JMP  0
194 #define SWITCH_TSS_IRET 1
195 #define SWITCH_TSS_CALL 2
196 
197 /* XXX: restore CPU state in registers (PowerPC case) */
198 static void switch_tss_ra(CPUX86State *env, int tss_selector,
199                           uint32_t e1, uint32_t e2, int source,
200                           uint32_t next_eip, uintptr_t retaddr)
201 {
202     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
203     target_ulong tss_base;
204     uint32_t new_regs[8], new_segs[6];
205     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
206     uint32_t old_eflags, eflags_mask;
207     SegmentCache *dt;
208     int index;
209     target_ulong ptr;
210 
211     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
212     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
213               source);
214 
215     /* if task gate, we read the TSS segment and we load it */
216     if (type == 5) {
217         if (!(e2 & DESC_P_MASK)) {
218             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
219         }
220         tss_selector = e1 >> 16;
221         if (tss_selector & 4) {
222             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
223         }
224         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
225             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
226         }
227         if (e2 & DESC_S_MASK) {
228             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
229         }
230         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
231         if ((type & 7) != 1) {
232             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
233         }
234     }
235 
236     if (!(e2 & DESC_P_MASK)) {
237         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
238     }
239 
240     if (type & 8) {
241         tss_limit_max = 103;
242     } else {
243         tss_limit_max = 43;
244     }
245     tss_limit = get_seg_limit(e1, e2);
246     tss_base = get_seg_base(e1, e2);
247     if ((tss_selector & 4) != 0 ||
248         tss_limit < tss_limit_max) {
249         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
250     }
251     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
252     if (old_type & 8) {
253         old_tss_limit_max = 103;
254     } else {
255         old_tss_limit_max = 43;
256     }
257 
258     /* read all the registers from the new TSS */
259     if (type & 8) {
260         /* 32 bit */
261         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
262         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
263         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
264         for (i = 0; i < 8; i++) {
265             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
266                                             retaddr);
267         }
268         for (i = 0; i < 6; i++) {
269             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
270                                              retaddr);
271         }
272         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
273         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
274     } else {
275         /* 16 bit */
276         new_cr3 = 0;
277         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
278         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
279         for (i = 0; i < 8; i++) {
280             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
281                                              retaddr) | 0xffff0000;
282         }
283         for (i = 0; i < 4; i++) {
284             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
285                                              retaddr);
286         }
287         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
288         new_segs[R_FS] = 0;
289         new_segs[R_GS] = 0;
290         new_trap = 0;
291     }
292     /* XXX: avoid a compiler warning, see
293      http://support.amd.com/us/Processor_TechDocs/24593.pdf
294      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
295     (void)new_trap;
296 
297     /* NOTE: we must avoid memory exceptions during the task switch,
298        so we make dummy accesses before */
299     /* XXX: it can still fail in some cases, so a bigger hack is
300        necessary to valid the TLB after having done the accesses */
301 
302     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
303     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
304     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
305     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
306 
307     /* clear busy bit (it is restartable) */
308     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
309         target_ulong ptr;
310         uint32_t e2;
311 
312         ptr = env->gdt.base + (env->tr.selector & ~7);
313         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
314         e2 &= ~DESC_TSS_BUSY_MASK;
315         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
316     }
317     old_eflags = cpu_compute_eflags(env);
318     if (source == SWITCH_TSS_IRET) {
319         old_eflags &= ~NT_MASK;
320     }
321 
322     /* save the current state in the old TSS */
323     if (type & 8) {
324         /* 32 bit */
325         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
326         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
327         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
328         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
329         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
330         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
331         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
332         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
333         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
334         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
335         for (i = 0; i < 6; i++) {
336             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
337                               env->segs[i].selector, retaddr);
338         }
339     } else {
340         /* 16 bit */
341         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
342         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
343         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
344         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
345         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
346         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
347         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
348         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
349         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
350         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
351         for (i = 0; i < 4; i++) {
352             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
353                               env->segs[i].selector, retaddr);
354         }
355     }
356 
357     /* now if an exception occurs, it will occurs in the next task
358        context */
359 
360     if (source == SWITCH_TSS_CALL) {
361         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
362         new_eflags |= NT_MASK;
363     }
364 
365     /* set busy bit */
366     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
367         target_ulong ptr;
368         uint32_t e2;
369 
370         ptr = env->gdt.base + (tss_selector & ~7);
371         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
372         e2 |= DESC_TSS_BUSY_MASK;
373         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
374     }
375 
376     /* set the new CPU state */
377     /* from this point, any exception which occurs can give problems */
378     env->cr[0] |= CR0_TS_MASK;
379     env->hflags |= HF_TS_MASK;
380     env->tr.selector = tss_selector;
381     env->tr.base = tss_base;
382     env->tr.limit = tss_limit;
383     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
384 
385     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
386         cpu_x86_update_cr3(env, new_cr3);
387     }
388 
389     /* load all registers without an exception, then reload them with
390        possible exception */
391     env->eip = new_eip;
392     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
393         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
394     if (!(type & 8)) {
395         eflags_mask &= 0xffff;
396     }
397     cpu_load_eflags(env, new_eflags, eflags_mask);
398     /* XXX: what to do in 16 bit case? */
399     env->regs[R_EAX] = new_regs[0];
400     env->regs[R_ECX] = new_regs[1];
401     env->regs[R_EDX] = new_regs[2];
402     env->regs[R_EBX] = new_regs[3];
403     env->regs[R_ESP] = new_regs[4];
404     env->regs[R_EBP] = new_regs[5];
405     env->regs[R_ESI] = new_regs[6];
406     env->regs[R_EDI] = new_regs[7];
407     if (new_eflags & VM_MASK) {
408         for (i = 0; i < 6; i++) {
409             load_seg_vm(env, i, new_segs[i]);
410         }
411     } else {
412         /* first just selectors as the rest may trigger exceptions */
413         for (i = 0; i < 6; i++) {
414             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
415         }
416     }
417 
418     env->ldt.selector = new_ldt & ~4;
419     env->ldt.base = 0;
420     env->ldt.limit = 0;
421     env->ldt.flags = 0;
422 
423     /* load the LDT */
424     if (new_ldt & 4) {
425         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
426     }
427 
428     if ((new_ldt & 0xfffc) != 0) {
429         dt = &env->gdt;
430         index = new_ldt & ~7;
431         if ((index + 7) > dt->limit) {
432             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
433         }
434         ptr = dt->base + index;
435         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
436         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
437         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
438             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
439         }
440         if (!(e2 & DESC_P_MASK)) {
441             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
442         }
443         load_seg_cache_raw_dt(&env->ldt, e1, e2);
444     }
445 
446     /* load the segments */
447     if (!(new_eflags & VM_MASK)) {
448         int cpl = new_segs[R_CS] & 3;
449         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
450         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
451         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
452         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
453         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
454         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
455     }
456 
457     /* check that env->eip is in the CS segment limits */
458     if (new_eip > env->segs[R_CS].limit) {
459         /* XXX: different exception if CALL? */
460         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
461     }
462 
463 #ifndef CONFIG_USER_ONLY
464     /* reset local breakpoints */
465     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
466         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
467     }
468 #endif
469 }
470 
471 static void switch_tss(CPUX86State *env, int tss_selector,
472                        uint32_t e1, uint32_t e2, int source,
473                         uint32_t next_eip)
474 {
475     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
476 }
477 
478 static inline unsigned int get_sp_mask(unsigned int e2)
479 {
480 #ifdef TARGET_X86_64
481     if (e2 & DESC_L_MASK) {
482         return 0;
483     } else
484 #endif
485     if (e2 & DESC_B_MASK) {
486         return 0xffffffff;
487     } else {
488         return 0xffff;
489     }
490 }
491 
492 int exception_has_error_code(int intno)
493 {
494     switch (intno) {
495     case 8:
496     case 10:
497     case 11:
498     case 12:
499     case 13:
500     case 14:
501     case 17:
502         return 1;
503     }
504     return 0;
505 }
506 
507 #ifdef TARGET_X86_64
508 #define SET_ESP(val, sp_mask)                                   \
509     do {                                                        \
510         if ((sp_mask) == 0xffff) {                              \
511             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
512                 ((val) & 0xffff);                               \
513         } else if ((sp_mask) == 0xffffffffLL) {                 \
514             env->regs[R_ESP] = (uint32_t)(val);                 \
515         } else {                                                \
516             env->regs[R_ESP] = (val);                           \
517         }                                                       \
518     } while (0)
519 #else
520 #define SET_ESP(val, sp_mask)                                   \
521     do {                                                        \
522         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
523             ((val) & (sp_mask));                                \
524     } while (0)
525 #endif
526 
527 /* in 64-bit machines, this can overflow. So this segment addition macro
528  * can be used to trim the value to 32-bit whenever needed */
529 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
530 
531 /* XXX: add a is_user flag to have proper security support */
532 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
533     {                                                            \
534         sp -= 2;                                                 \
535         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
536     }
537 
538 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
539     {                                                                   \
540         sp -= 4;                                                        \
541         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
542     }
543 
544 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
545     {                                                            \
546         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
547         sp += 2;                                                 \
548     }
549 
550 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
551     {                                                                   \
552         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
553         sp += 4;                                                        \
554     }
555 
556 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
557 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
558 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
559 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
560 
561 /* protected mode interrupt */
562 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
563                                    int error_code, unsigned int next_eip,
564                                    int is_hw)
565 {
566     SegmentCache *dt;
567     target_ulong ptr, ssp;
568     int type, dpl, selector, ss_dpl, cpl;
569     int has_error_code, new_stack, shift;
570     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
571     uint32_t old_eip, sp_mask;
572     int vm86 = env->eflags & VM_MASK;
573 
574     has_error_code = 0;
575     if (!is_int && !is_hw) {
576         has_error_code = exception_has_error_code(intno);
577     }
578     if (is_int) {
579         old_eip = next_eip;
580     } else {
581         old_eip = env->eip;
582     }
583 
584     dt = &env->idt;
585     if (intno * 8 + 7 > dt->limit) {
586         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
587     }
588     ptr = dt->base + intno * 8;
589     e1 = cpu_ldl_kernel(env, ptr);
590     e2 = cpu_ldl_kernel(env, ptr + 4);
591     /* check gate type */
592     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
593     switch (type) {
594     case 5: /* task gate */
595     case 6: /* 286 interrupt gate */
596     case 7: /* 286 trap gate */
597     case 14: /* 386 interrupt gate */
598     case 15: /* 386 trap gate */
599         break;
600     default:
601         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
602         break;
603     }
604     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
605     cpl = env->hflags & HF_CPL_MASK;
606     /* check privilege if software int */
607     if (is_int && dpl < cpl) {
608         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
609     }
610 
611     if (type == 5) {
612         /* task gate */
613         /* must do that check here to return the correct error code */
614         if (!(e2 & DESC_P_MASK)) {
615             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
616         }
617         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
618         if (has_error_code) {
619             int type;
620             uint32_t mask;
621 
622             /* push the error code */
623             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
624             shift = type >> 3;
625             if (env->segs[R_SS].flags & DESC_B_MASK) {
626                 mask = 0xffffffff;
627             } else {
628                 mask = 0xffff;
629             }
630             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
631             ssp = env->segs[R_SS].base + esp;
632             if (shift) {
633                 cpu_stl_kernel(env, ssp, error_code);
634             } else {
635                 cpu_stw_kernel(env, ssp, error_code);
636             }
637             SET_ESP(esp, mask);
638         }
639         return;
640     }
641 
642     /* Otherwise, trap or interrupt gate */
643 
644     /* check valid bit */
645     if (!(e2 & DESC_P_MASK)) {
646         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
647     }
648     selector = e1 >> 16;
649     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
650     if ((selector & 0xfffc) == 0) {
651         raise_exception_err(env, EXCP0D_GPF, 0);
652     }
653     if (load_segment(env, &e1, &e2, selector) != 0) {
654         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
655     }
656     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
657         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
658     }
659     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
660     if (dpl > cpl) {
661         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
662     }
663     if (!(e2 & DESC_P_MASK)) {
664         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
665     }
666     if (e2 & DESC_C_MASK) {
667         dpl = cpl;
668     }
669     if (dpl < cpl) {
670         /* to inner privilege */
671         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
672         if ((ss & 0xfffc) == 0) {
673             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
674         }
675         if ((ss & 3) != dpl) {
676             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
677         }
678         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
679             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
680         }
681         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
682         if (ss_dpl != dpl) {
683             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
684         }
685         if (!(ss_e2 & DESC_S_MASK) ||
686             (ss_e2 & DESC_CS_MASK) ||
687             !(ss_e2 & DESC_W_MASK)) {
688             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
689         }
690         if (!(ss_e2 & DESC_P_MASK)) {
691             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
692         }
693         new_stack = 1;
694         sp_mask = get_sp_mask(ss_e2);
695         ssp = get_seg_base(ss_e1, ss_e2);
696     } else  {
697         /* to same privilege */
698         if (vm86) {
699             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
700         }
701         new_stack = 0;
702         sp_mask = get_sp_mask(env->segs[R_SS].flags);
703         ssp = env->segs[R_SS].base;
704         esp = env->regs[R_ESP];
705     }
706 
707     shift = type >> 3;
708 
709 #if 0
710     /* XXX: check that enough room is available */
711     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
712     if (vm86) {
713         push_size += 8;
714     }
715     push_size <<= shift;
716 #endif
717     if (shift == 1) {
718         if (new_stack) {
719             if (vm86) {
720                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
721                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
722                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
723                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
724             }
725             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
726             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
727         }
728         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
729         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
730         PUSHL(ssp, esp, sp_mask, old_eip);
731         if (has_error_code) {
732             PUSHL(ssp, esp, sp_mask, error_code);
733         }
734     } else {
735         if (new_stack) {
736             if (vm86) {
737                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
738                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
739                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
740                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
741             }
742             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
743             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
744         }
745         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
746         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
747         PUSHW(ssp, esp, sp_mask, old_eip);
748         if (has_error_code) {
749             PUSHW(ssp, esp, sp_mask, error_code);
750         }
751     }
752 
753     /* interrupt gate clear IF mask */
754     if ((type & 1) == 0) {
755         env->eflags &= ~IF_MASK;
756     }
757     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
758 
759     if (new_stack) {
760         if (vm86) {
761             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
762             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
763             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
764             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
765         }
766         ss = (ss & ~3) | dpl;
767         cpu_x86_load_seg_cache(env, R_SS, ss,
768                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
769     }
770     SET_ESP(esp, sp_mask);
771 
772     selector = (selector & ~3) | dpl;
773     cpu_x86_load_seg_cache(env, R_CS, selector,
774                    get_seg_base(e1, e2),
775                    get_seg_limit(e1, e2),
776                    e2);
777     env->eip = offset;
778 }
779 
780 #ifdef TARGET_X86_64
781 
782 #define PUSHQ_RA(sp, val, ra)                   \
783     {                                           \
784         sp -= 8;                                \
785         cpu_stq_kernel_ra(env, sp, (val), ra);  \
786     }
787 
788 #define POPQ_RA(sp, val, ra)                    \
789     {                                           \
790         val = cpu_ldq_kernel_ra(env, sp, ra);   \
791         sp += 8;                                \
792     }
793 
794 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
795 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
796 
797 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
798 {
799     X86CPU *cpu = env_archcpu(env);
800     int index;
801 
802 #if 0
803     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
804            env->tr.base, env->tr.limit);
805 #endif
806 
807     if (!(env->tr.flags & DESC_P_MASK)) {
808         cpu_abort(CPU(cpu), "invalid tss");
809     }
810     index = 8 * level + 4;
811     if ((index + 7) > env->tr.limit) {
812         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
813     }
814     return cpu_ldq_kernel(env, env->tr.base + index);
815 }
816 
817 /* 64 bit interrupt */
818 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
819                            int error_code, target_ulong next_eip, int is_hw)
820 {
821     SegmentCache *dt;
822     target_ulong ptr;
823     int type, dpl, selector, cpl, ist;
824     int has_error_code, new_stack;
825     uint32_t e1, e2, e3, ss;
826     target_ulong old_eip, esp, offset;
827 
828     has_error_code = 0;
829     if (!is_int && !is_hw) {
830         has_error_code = exception_has_error_code(intno);
831     }
832     if (is_int) {
833         old_eip = next_eip;
834     } else {
835         old_eip = env->eip;
836     }
837 
838     dt = &env->idt;
839     if (intno * 16 + 15 > dt->limit) {
840         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
841     }
842     ptr = dt->base + intno * 16;
843     e1 = cpu_ldl_kernel(env, ptr);
844     e2 = cpu_ldl_kernel(env, ptr + 4);
845     e3 = cpu_ldl_kernel(env, ptr + 8);
846     /* check gate type */
847     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
848     switch (type) {
849     case 14: /* 386 interrupt gate */
850     case 15: /* 386 trap gate */
851         break;
852     default:
853         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
854         break;
855     }
856     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
857     cpl = env->hflags & HF_CPL_MASK;
858     /* check privilege if software int */
859     if (is_int && dpl < cpl) {
860         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
861     }
862     /* check valid bit */
863     if (!(e2 & DESC_P_MASK)) {
864         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
865     }
866     selector = e1 >> 16;
867     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
868     ist = e2 & 7;
869     if ((selector & 0xfffc) == 0) {
870         raise_exception_err(env, EXCP0D_GPF, 0);
871     }
872 
873     if (load_segment(env, &e1, &e2, selector) != 0) {
874         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
875     }
876     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
877         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
878     }
879     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
880     if (dpl > cpl) {
881         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
882     }
883     if (!(e2 & DESC_P_MASK)) {
884         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
885     }
886     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
887         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
888     }
889     if (e2 & DESC_C_MASK) {
890         dpl = cpl;
891     }
892     if (dpl < cpl || ist != 0) {
893         /* to inner privilege */
894         new_stack = 1;
895         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
896         ss = 0;
897     } else {
898         /* to same privilege */
899         if (env->eflags & VM_MASK) {
900             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
901         }
902         new_stack = 0;
903         esp = env->regs[R_ESP];
904     }
905     esp &= ~0xfLL; /* align stack */
906 
907     PUSHQ(esp, env->segs[R_SS].selector);
908     PUSHQ(esp, env->regs[R_ESP]);
909     PUSHQ(esp, cpu_compute_eflags(env));
910     PUSHQ(esp, env->segs[R_CS].selector);
911     PUSHQ(esp, old_eip);
912     if (has_error_code) {
913         PUSHQ(esp, error_code);
914     }
915 
916     /* interrupt gate clear IF mask */
917     if ((type & 1) == 0) {
918         env->eflags &= ~IF_MASK;
919     }
920     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
921 
922     if (new_stack) {
923         ss = 0 | dpl;
924         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
925     }
926     env->regs[R_ESP] = esp;
927 
928     selector = (selector & ~3) | dpl;
929     cpu_x86_load_seg_cache(env, R_CS, selector,
930                    get_seg_base(e1, e2),
931                    get_seg_limit(e1, e2),
932                    e2);
933     env->eip = offset;
934 }
935 #endif
936 
937 #ifdef TARGET_X86_64
938 void helper_sysret(CPUX86State *env, int dflag)
939 {
940     int cpl, selector;
941 
942     if (!(env->efer & MSR_EFER_SCE)) {
943         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
944     }
945     cpl = env->hflags & HF_CPL_MASK;
946     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
947         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
948     }
949     selector = (env->star >> 48) & 0xffff;
950     if (env->hflags & HF_LMA_MASK) {
951         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
952                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
953                         NT_MASK);
954         if (dflag == 2) {
955             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
956                                    0, 0xffffffff,
957                                    DESC_G_MASK | DESC_P_MASK |
958                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
959                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
960                                    DESC_L_MASK);
961             env->eip = env->regs[R_ECX];
962         } else {
963             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
964                                    0, 0xffffffff,
965                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
966                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
967                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
968             env->eip = (uint32_t)env->regs[R_ECX];
969         }
970         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
971                                0, 0xffffffff,
972                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
973                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
974                                DESC_W_MASK | DESC_A_MASK);
975     } else {
976         env->eflags |= IF_MASK;
977         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
978                                0, 0xffffffff,
979                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
980                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
981                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
982         env->eip = (uint32_t)env->regs[R_ECX];
983         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
984                                0, 0xffffffff,
985                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
986                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
987                                DESC_W_MASK | DESC_A_MASK);
988     }
989 }
990 #endif
991 
992 /* real mode interrupt */
993 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
994                               int error_code, unsigned int next_eip)
995 {
996     SegmentCache *dt;
997     target_ulong ptr, ssp;
998     int selector;
999     uint32_t offset, esp;
1000     uint32_t old_cs, old_eip;
1001 
1002     /* real mode (simpler!) */
1003     dt = &env->idt;
1004     if (intno * 4 + 3 > dt->limit) {
1005         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1006     }
1007     ptr = dt->base + intno * 4;
1008     offset = cpu_lduw_kernel(env, ptr);
1009     selector = cpu_lduw_kernel(env, ptr + 2);
1010     esp = env->regs[R_ESP];
1011     ssp = env->segs[R_SS].base;
1012     if (is_int) {
1013         old_eip = next_eip;
1014     } else {
1015         old_eip = env->eip;
1016     }
1017     old_cs = env->segs[R_CS].selector;
1018     /* XXX: use SS segment size? */
1019     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1020     PUSHW(ssp, esp, 0xffff, old_cs);
1021     PUSHW(ssp, esp, 0xffff, old_eip);
1022 
1023     /* update processor state */
1024     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1025     env->eip = offset;
1026     env->segs[R_CS].selector = selector;
1027     env->segs[R_CS].base = (selector << 4);
1028     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1029 }
1030 
1031 /*
1032  * Begin execution of an interruption. is_int is TRUE if coming from
1033  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1034  * instruction. It is only relevant if is_int is TRUE.
1035  */
1036 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1037                       int error_code, target_ulong next_eip, int is_hw)
1038 {
1039     CPUX86State *env = &cpu->env;
1040 
1041     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1042         if ((env->cr[0] & CR0_PE_MASK)) {
1043             static int count;
1044 
1045             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1046                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1047                      count, intno, error_code, is_int,
1048                      env->hflags & HF_CPL_MASK,
1049                      env->segs[R_CS].selector, env->eip,
1050                      (int)env->segs[R_CS].base + env->eip,
1051                      env->segs[R_SS].selector, env->regs[R_ESP]);
1052             if (intno == 0x0e) {
1053                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1054             } else {
1055                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1056             }
1057             qemu_log("\n");
1058             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1059 #if 0
1060             {
1061                 int i;
1062                 target_ulong ptr;
1063 
1064                 qemu_log("       code=");
1065                 ptr = env->segs[R_CS].base + env->eip;
1066                 for (i = 0; i < 16; i++) {
1067                     qemu_log(" %02x", ldub(ptr + i));
1068                 }
1069                 qemu_log("\n");
1070             }
1071 #endif
1072             count++;
1073         }
1074     }
1075     if (env->cr[0] & CR0_PE_MASK) {
1076 #if !defined(CONFIG_USER_ONLY)
1077         if (env->hflags & HF_GUEST_MASK) {
1078             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1079         }
1080 #endif
1081 #ifdef TARGET_X86_64
1082         if (env->hflags & HF_LMA_MASK) {
1083             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1084         } else
1085 #endif
1086         {
1087             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1088                                    is_hw);
1089         }
1090     } else {
1091 #if !defined(CONFIG_USER_ONLY)
1092         if (env->hflags & HF_GUEST_MASK) {
1093             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1094         }
1095 #endif
1096         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1097     }
1098 
1099 #if !defined(CONFIG_USER_ONLY)
1100     if (env->hflags & HF_GUEST_MASK) {
1101         CPUState *cs = CPU(cpu);
1102         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1103                                       offsetof(struct vmcb,
1104                                                control.event_inj));
1105 
1106         x86_stl_phys(cs,
1107                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1108                  event_inj & ~SVM_EVTINJ_VALID);
1109     }
1110 #endif
1111 }
1112 
1113 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1114 {
1115     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1116 }
1117 
1118 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1119 {
1120     X86CPU *cpu = X86_CPU(cs);
1121     CPUX86State *env = &cpu->env;
1122     int intno;
1123 
1124     interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1125     if (!interrupt_request) {
1126         return false;
1127     }
1128 
1129     /* Don't process multiple interrupt requests in a single call.
1130      * This is required to make icount-driven execution deterministic.
1131      */
1132     switch (interrupt_request) {
1133 #if !defined(CONFIG_USER_ONLY)
1134     case CPU_INTERRUPT_POLL:
1135         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1136         apic_poll_irq(cpu->apic_state);
1137         break;
1138 #endif
1139     case CPU_INTERRUPT_SIPI:
1140         do_cpu_sipi(cpu);
1141         break;
1142     case CPU_INTERRUPT_SMI:
1143         cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1144         cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1145 #ifdef CONFIG_USER_ONLY
1146         cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
1147 #else
1148         do_smm_enter(cpu);
1149 #endif /* CONFIG_USER_ONLY */
1150         break;
1151     case CPU_INTERRUPT_NMI:
1152         cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1153         cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1154         env->hflags2 |= HF2_NMI_MASK;
1155         do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1156         break;
1157     case CPU_INTERRUPT_MCE:
1158         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1159         do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1160         break;
1161     case CPU_INTERRUPT_HARD:
1162         cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1163         cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1164                                    CPU_INTERRUPT_VIRQ);
1165         intno = cpu_get_pic_interrupt(env);
1166         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1167                       "Servicing hardware INT=0x%02x\n", intno);
1168         do_interrupt_x86_hardirq(env, intno, 1);
1169         break;
1170 #if !defined(CONFIG_USER_ONLY)
1171     case CPU_INTERRUPT_VIRQ:
1172         /* FIXME: this should respect TPR */
1173         cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1174         intno = x86_ldl_phys(cs, env->vm_vmcb
1175                              + offsetof(struct vmcb, control.int_vector));
1176         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1177                       "Servicing virtual hardware INT=0x%02x\n", intno);
1178         do_interrupt_x86_hardirq(env, intno, 1);
1179         cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1180         break;
1181 #endif
1182     }
1183 
1184     /* Ensure that no TB jump will be modified as the program flow was changed.  */
1185     return true;
1186 }
1187 
1188 void helper_lldt(CPUX86State *env, int selector)
1189 {
1190     SegmentCache *dt;
1191     uint32_t e1, e2;
1192     int index, entry_limit;
1193     target_ulong ptr;
1194 
1195     selector &= 0xffff;
1196     if ((selector & 0xfffc) == 0) {
1197         /* XXX: NULL selector case: invalid LDT */
1198         env->ldt.base = 0;
1199         env->ldt.limit = 0;
1200     } else {
1201         if (selector & 0x4) {
1202             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1203         }
1204         dt = &env->gdt;
1205         index = selector & ~7;
1206 #ifdef TARGET_X86_64
1207         if (env->hflags & HF_LMA_MASK) {
1208             entry_limit = 15;
1209         } else
1210 #endif
1211         {
1212             entry_limit = 7;
1213         }
1214         if ((index + entry_limit) > dt->limit) {
1215             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1216         }
1217         ptr = dt->base + index;
1218         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1219         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1220         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1221             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1222         }
1223         if (!(e2 & DESC_P_MASK)) {
1224             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1225         }
1226 #ifdef TARGET_X86_64
1227         if (env->hflags & HF_LMA_MASK) {
1228             uint32_t e3;
1229 
1230             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1231             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1232             env->ldt.base |= (target_ulong)e3 << 32;
1233         } else
1234 #endif
1235         {
1236             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1237         }
1238     }
1239     env->ldt.selector = selector;
1240 }
1241 
1242 void helper_ltr(CPUX86State *env, int selector)
1243 {
1244     SegmentCache *dt;
1245     uint32_t e1, e2;
1246     int index, type, entry_limit;
1247     target_ulong ptr;
1248 
1249     selector &= 0xffff;
1250     if ((selector & 0xfffc) == 0) {
1251         /* NULL selector case: invalid TR */
1252         env->tr.base = 0;
1253         env->tr.limit = 0;
1254         env->tr.flags = 0;
1255     } else {
1256         if (selector & 0x4) {
1257             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1258         }
1259         dt = &env->gdt;
1260         index = selector & ~7;
1261 #ifdef TARGET_X86_64
1262         if (env->hflags & HF_LMA_MASK) {
1263             entry_limit = 15;
1264         } else
1265 #endif
1266         {
1267             entry_limit = 7;
1268         }
1269         if ((index + entry_limit) > dt->limit) {
1270             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1271         }
1272         ptr = dt->base + index;
1273         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1274         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1275         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1276         if ((e2 & DESC_S_MASK) ||
1277             (type != 1 && type != 9)) {
1278             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1279         }
1280         if (!(e2 & DESC_P_MASK)) {
1281             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1282         }
1283 #ifdef TARGET_X86_64
1284         if (env->hflags & HF_LMA_MASK) {
1285             uint32_t e3, e4;
1286 
1287             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1288             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1289             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1290                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1291             }
1292             load_seg_cache_raw_dt(&env->tr, e1, e2);
1293             env->tr.base |= (target_ulong)e3 << 32;
1294         } else
1295 #endif
1296         {
1297             load_seg_cache_raw_dt(&env->tr, e1, e2);
1298         }
1299         e2 |= DESC_TSS_BUSY_MASK;
1300         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1301     }
1302     env->tr.selector = selector;
1303 }
1304 
1305 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1306 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1307 {
1308     uint32_t e1, e2;
1309     int cpl, dpl, rpl;
1310     SegmentCache *dt;
1311     int index;
1312     target_ulong ptr;
1313 
1314     selector &= 0xffff;
1315     cpl = env->hflags & HF_CPL_MASK;
1316     if ((selector & 0xfffc) == 0) {
1317         /* null selector case */
1318         if (seg_reg == R_SS
1319 #ifdef TARGET_X86_64
1320             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1321 #endif
1322             ) {
1323             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1324         }
1325         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1326     } else {
1327 
1328         if (selector & 0x4) {
1329             dt = &env->ldt;
1330         } else {
1331             dt = &env->gdt;
1332         }
1333         index = selector & ~7;
1334         if ((index + 7) > dt->limit) {
1335             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1336         }
1337         ptr = dt->base + index;
1338         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1339         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1340 
1341         if (!(e2 & DESC_S_MASK)) {
1342             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1343         }
1344         rpl = selector & 3;
1345         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1346         if (seg_reg == R_SS) {
1347             /* must be writable segment */
1348             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1349                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1350             }
1351             if (rpl != cpl || dpl != cpl) {
1352                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1353             }
1354         } else {
1355             /* must be readable segment */
1356             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1357                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1358             }
1359 
1360             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1361                 /* if not conforming code, test rights */
1362                 if (dpl < cpl || dpl < rpl) {
1363                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1364                 }
1365             }
1366         }
1367 
1368         if (!(e2 & DESC_P_MASK)) {
1369             if (seg_reg == R_SS) {
1370                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1371             } else {
1372                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1373             }
1374         }
1375 
1376         /* set the access bit if not already set */
1377         if (!(e2 & DESC_A_MASK)) {
1378             e2 |= DESC_A_MASK;
1379             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1380         }
1381 
1382         cpu_x86_load_seg_cache(env, seg_reg, selector,
1383                        get_seg_base(e1, e2),
1384                        get_seg_limit(e1, e2),
1385                        e2);
1386 #if 0
1387         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1388                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1389 #endif
1390     }
1391 }
1392 
1393 /* protected mode jump */
1394 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1395                            target_ulong next_eip)
1396 {
1397     int gate_cs, type;
1398     uint32_t e1, e2, cpl, dpl, rpl, limit;
1399 
1400     if ((new_cs & 0xfffc) == 0) {
1401         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1402     }
1403     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1404         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1405     }
1406     cpl = env->hflags & HF_CPL_MASK;
1407     if (e2 & DESC_S_MASK) {
1408         if (!(e2 & DESC_CS_MASK)) {
1409             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1410         }
1411         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1412         if (e2 & DESC_C_MASK) {
1413             /* conforming code segment */
1414             if (dpl > cpl) {
1415                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1416             }
1417         } else {
1418             /* non conforming code segment */
1419             rpl = new_cs & 3;
1420             if (rpl > cpl) {
1421                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1422             }
1423             if (dpl != cpl) {
1424                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1425             }
1426         }
1427         if (!(e2 & DESC_P_MASK)) {
1428             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1429         }
1430         limit = get_seg_limit(e1, e2);
1431         if (new_eip > limit &&
1432             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1433             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1434         }
1435         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1436                        get_seg_base(e1, e2), limit, e2);
1437         env->eip = new_eip;
1438     } else {
1439         /* jump to call or task gate */
1440         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1441         rpl = new_cs & 3;
1442         cpl = env->hflags & HF_CPL_MASK;
1443         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1444 
1445 #ifdef TARGET_X86_64
1446         if (env->efer & MSR_EFER_LMA) {
1447             if (type != 12) {
1448                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1449             }
1450         }
1451 #endif
1452         switch (type) {
1453         case 1: /* 286 TSS */
1454         case 9: /* 386 TSS */
1455         case 5: /* task gate */
1456             if (dpl < cpl || dpl < rpl) {
1457                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1458             }
1459             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1460             break;
1461         case 4: /* 286 call gate */
1462         case 12: /* 386 call gate */
1463             if ((dpl < cpl) || (dpl < rpl)) {
1464                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1465             }
1466             if (!(e2 & DESC_P_MASK)) {
1467                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1468             }
1469             gate_cs = e1 >> 16;
1470             new_eip = (e1 & 0xffff);
1471             if (type == 12) {
1472                 new_eip |= (e2 & 0xffff0000);
1473             }
1474 
1475 #ifdef TARGET_X86_64
1476             if (env->efer & MSR_EFER_LMA) {
1477                 /* load the upper 8 bytes of the 64-bit call gate */
1478                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1479                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1480                                            GETPC());
1481                 }
1482                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1483                 if (type != 0) {
1484                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1485                                            GETPC());
1486                 }
1487                 new_eip |= ((target_ulong)e1) << 32;
1488             }
1489 #endif
1490 
1491             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1492                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1493             }
1494             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1495             /* must be code segment */
1496             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1497                  (DESC_S_MASK | DESC_CS_MASK))) {
1498                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1499             }
1500             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1501                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1502                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1503             }
1504 #ifdef TARGET_X86_64
1505             if (env->efer & MSR_EFER_LMA) {
1506                 if (!(e2 & DESC_L_MASK)) {
1507                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1508                 }
1509                 if (e2 & DESC_B_MASK) {
1510                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1511                 }
1512             }
1513 #endif
1514             if (!(e2 & DESC_P_MASK)) {
1515                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1516             }
1517             limit = get_seg_limit(e1, e2);
1518             if (new_eip > limit &&
1519                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1520                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1521             }
1522             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1523                                    get_seg_base(e1, e2), limit, e2);
1524             env->eip = new_eip;
1525             break;
1526         default:
1527             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1528             break;
1529         }
1530     }
1531 }
1532 
1533 /* real mode call */
1534 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1535                        int shift, int next_eip)
1536 {
1537     int new_eip;
1538     uint32_t esp, esp_mask;
1539     target_ulong ssp;
1540 
1541     new_eip = new_eip1;
1542     esp = env->regs[R_ESP];
1543     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1544     ssp = env->segs[R_SS].base;
1545     if (shift) {
1546         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1547         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1548     } else {
1549         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1550         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1551     }
1552 
1553     SET_ESP(esp, esp_mask);
1554     env->eip = new_eip;
1555     env->segs[R_CS].selector = new_cs;
1556     env->segs[R_CS].base = (new_cs << 4);
1557 }
1558 
1559 /* protected mode call */
1560 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1561                             int shift, target_ulong next_eip)
1562 {
1563     int new_stack, i;
1564     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1565     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1566     uint32_t val, limit, old_sp_mask;
1567     target_ulong ssp, old_ssp, offset, sp;
1568 
1569     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1570     LOG_PCALL_STATE(env_cpu(env));
1571     if ((new_cs & 0xfffc) == 0) {
1572         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1573     }
1574     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1575         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1576     }
1577     cpl = env->hflags & HF_CPL_MASK;
1578     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1579     if (e2 & DESC_S_MASK) {
1580         if (!(e2 & DESC_CS_MASK)) {
1581             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1582         }
1583         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1584         if (e2 & DESC_C_MASK) {
1585             /* conforming code segment */
1586             if (dpl > cpl) {
1587                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1588             }
1589         } else {
1590             /* non conforming code segment */
1591             rpl = new_cs & 3;
1592             if (rpl > cpl) {
1593                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1594             }
1595             if (dpl != cpl) {
1596                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1597             }
1598         }
1599         if (!(e2 & DESC_P_MASK)) {
1600             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1601         }
1602 
1603 #ifdef TARGET_X86_64
1604         /* XXX: check 16/32 bit cases in long mode */
1605         if (shift == 2) {
1606             target_ulong rsp;
1607 
1608             /* 64 bit case */
1609             rsp = env->regs[R_ESP];
1610             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1611             PUSHQ_RA(rsp, next_eip, GETPC());
1612             /* from this point, not restartable */
1613             env->regs[R_ESP] = rsp;
1614             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1615                                    get_seg_base(e1, e2),
1616                                    get_seg_limit(e1, e2), e2);
1617             env->eip = new_eip;
1618         } else
1619 #endif
1620         {
1621             sp = env->regs[R_ESP];
1622             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1623             ssp = env->segs[R_SS].base;
1624             if (shift) {
1625                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1626                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1627             } else {
1628                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1629                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1630             }
1631 
1632             limit = get_seg_limit(e1, e2);
1633             if (new_eip > limit) {
1634                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1635             }
1636             /* from this point, not restartable */
1637             SET_ESP(sp, sp_mask);
1638             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1639                                    get_seg_base(e1, e2), limit, e2);
1640             env->eip = new_eip;
1641         }
1642     } else {
1643         /* check gate type */
1644         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1645         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1646         rpl = new_cs & 3;
1647 
1648 #ifdef TARGET_X86_64
1649         if (env->efer & MSR_EFER_LMA) {
1650             if (type != 12) {
1651                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1652             }
1653         }
1654 #endif
1655 
1656         switch (type) {
1657         case 1: /* available 286 TSS */
1658         case 9: /* available 386 TSS */
1659         case 5: /* task gate */
1660             if (dpl < cpl || dpl < rpl) {
1661                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1662             }
1663             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1664             return;
1665         case 4: /* 286 call gate */
1666         case 12: /* 386 call gate */
1667             break;
1668         default:
1669             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1670             break;
1671         }
1672         shift = type >> 3;
1673 
1674         if (dpl < cpl || dpl < rpl) {
1675             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1676         }
1677         /* check valid bit */
1678         if (!(e2 & DESC_P_MASK)) {
1679             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1680         }
1681         selector = e1 >> 16;
1682         param_count = e2 & 0x1f;
1683         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1684 #ifdef TARGET_X86_64
1685         if (env->efer & MSR_EFER_LMA) {
1686             /* load the upper 8 bytes of the 64-bit call gate */
1687             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1688                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1689                                        GETPC());
1690             }
1691             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1692             if (type != 0) {
1693                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1694                                        GETPC());
1695             }
1696             offset |= ((target_ulong)e1) << 32;
1697         }
1698 #endif
1699         if ((selector & 0xfffc) == 0) {
1700             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1701         }
1702 
1703         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1704             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1705         }
1706         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1707             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1708         }
1709         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1710         if (dpl > cpl) {
1711             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1712         }
1713 #ifdef TARGET_X86_64
1714         if (env->efer & MSR_EFER_LMA) {
1715             if (!(e2 & DESC_L_MASK)) {
1716                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1717             }
1718             if (e2 & DESC_B_MASK) {
1719                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1720             }
1721             shift++;
1722         }
1723 #endif
1724         if (!(e2 & DESC_P_MASK)) {
1725             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1726         }
1727 
1728         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1729             /* to inner privilege */
1730 #ifdef TARGET_X86_64
1731             if (shift == 2) {
1732                 sp = get_rsp_from_tss(env, dpl);
1733                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1734                 new_stack = 1;
1735                 sp_mask = 0;
1736                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1737                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1738                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1739             } else
1740 #endif
1741             {
1742                 uint32_t sp32;
1743                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1744                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1745                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1746                           env->regs[R_ESP]);
1747                 sp = sp32;
1748                 if ((ss & 0xfffc) == 0) {
1749                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1750                 }
1751                 if ((ss & 3) != dpl) {
1752                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1753                 }
1754                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1755                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1756                 }
1757                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1758                 if (ss_dpl != dpl) {
1759                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1760                 }
1761                 if (!(ss_e2 & DESC_S_MASK) ||
1762                     (ss_e2 & DESC_CS_MASK) ||
1763                     !(ss_e2 & DESC_W_MASK)) {
1764                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1765                 }
1766                 if (!(ss_e2 & DESC_P_MASK)) {
1767                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1768                 }
1769 
1770                 sp_mask = get_sp_mask(ss_e2);
1771                 ssp = get_seg_base(ss_e1, ss_e2);
1772             }
1773 
1774             /* push_size = ((param_count * 2) + 8) << shift; */
1775 
1776             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1777             old_ssp = env->segs[R_SS].base;
1778 #ifdef TARGET_X86_64
1779             if (shift == 2) {
1780                 /* XXX: verify if new stack address is canonical */
1781                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1782                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1783                 /* parameters aren't supported for 64-bit call gates */
1784             } else
1785 #endif
1786             if (shift == 1) {
1787                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1788                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1789                 for (i = param_count - 1; i >= 0; i--) {
1790                     val = cpu_ldl_kernel_ra(env, old_ssp +
1791                                             ((env->regs[R_ESP] + i * 4) &
1792                                              old_sp_mask), GETPC());
1793                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1794                 }
1795             } else {
1796                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1797                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1798                 for (i = param_count - 1; i >= 0; i--) {
1799                     val = cpu_lduw_kernel_ra(env, old_ssp +
1800                                              ((env->regs[R_ESP] + i * 2) &
1801                                               old_sp_mask), GETPC());
1802                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1803                 }
1804             }
1805             new_stack = 1;
1806         } else {
1807             /* to same privilege */
1808             sp = env->regs[R_ESP];
1809             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1810             ssp = env->segs[R_SS].base;
1811             /* push_size = (4 << shift); */
1812             new_stack = 0;
1813         }
1814 
1815 #ifdef TARGET_X86_64
1816         if (shift == 2) {
1817             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
1818             PUSHQ_RA(sp, next_eip, GETPC());
1819         } else
1820 #endif
1821         if (shift == 1) {
1822             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1823             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1824         } else {
1825             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1826             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1827         }
1828 
1829         /* from this point, not restartable */
1830 
1831         if (new_stack) {
1832 #ifdef TARGET_X86_64
1833             if (shift == 2) {
1834                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1835             } else
1836 #endif
1837             {
1838                 ss = (ss & ~3) | dpl;
1839                 cpu_x86_load_seg_cache(env, R_SS, ss,
1840                                        ssp,
1841                                        get_seg_limit(ss_e1, ss_e2),
1842                                        ss_e2);
1843             }
1844         }
1845 
1846         selector = (selector & ~3) | dpl;
1847         cpu_x86_load_seg_cache(env, R_CS, selector,
1848                        get_seg_base(e1, e2),
1849                        get_seg_limit(e1, e2),
1850                        e2);
1851         SET_ESP(sp, sp_mask);
1852         env->eip = offset;
1853     }
1854 }
1855 
1856 /* real and vm86 mode iret */
1857 void helper_iret_real(CPUX86State *env, int shift)
1858 {
1859     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1860     target_ulong ssp;
1861     int eflags_mask;
1862 
1863     sp_mask = 0xffff; /* XXXX: use SS segment size? */
1864     sp = env->regs[R_ESP];
1865     ssp = env->segs[R_SS].base;
1866     if (shift == 1) {
1867         /* 32 bits */
1868         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1869         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1870         new_cs &= 0xffff;
1871         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1872     } else {
1873         /* 16 bits */
1874         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1875         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1876         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1877     }
1878     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1879     env->segs[R_CS].selector = new_cs;
1880     env->segs[R_CS].base = (new_cs << 4);
1881     env->eip = new_eip;
1882     if (env->eflags & VM_MASK) {
1883         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1884             NT_MASK;
1885     } else {
1886         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1887             RF_MASK | NT_MASK;
1888     }
1889     if (shift == 0) {
1890         eflags_mask &= 0xffff;
1891     }
1892     cpu_load_eflags(env, new_eflags, eflags_mask);
1893     env->hflags2 &= ~HF2_NMI_MASK;
1894 }
1895 
1896 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1897 {
1898     int dpl;
1899     uint32_t e2;
1900 
1901     /* XXX: on x86_64, we do not want to nullify FS and GS because
1902        they may still contain a valid base. I would be interested to
1903        know how a real x86_64 CPU behaves */
1904     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1905         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1906         return;
1907     }
1908 
1909     e2 = env->segs[seg_reg].flags;
1910     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1911     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1912         /* data or non conforming code segment */
1913         if (dpl < cpl) {
1914             cpu_x86_load_seg_cache(env, seg_reg, 0,
1915                                    env->segs[seg_reg].base,
1916                                    env->segs[seg_reg].limit,
1917                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1918         }
1919     }
1920 }
1921 
1922 /* protected mode iret */
1923 static inline void helper_ret_protected(CPUX86State *env, int shift,
1924                                         int is_iret, int addend,
1925                                         uintptr_t retaddr)
1926 {
1927     uint32_t new_cs, new_eflags, new_ss;
1928     uint32_t new_es, new_ds, new_fs, new_gs;
1929     uint32_t e1, e2, ss_e1, ss_e2;
1930     int cpl, dpl, rpl, eflags_mask, iopl;
1931     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1932 
1933 #ifdef TARGET_X86_64
1934     if (shift == 2) {
1935         sp_mask = -1;
1936     } else
1937 #endif
1938     {
1939         sp_mask = get_sp_mask(env->segs[R_SS].flags);
1940     }
1941     sp = env->regs[R_ESP];
1942     ssp = env->segs[R_SS].base;
1943     new_eflags = 0; /* avoid warning */
1944 #ifdef TARGET_X86_64
1945     if (shift == 2) {
1946         POPQ_RA(sp, new_eip, retaddr);
1947         POPQ_RA(sp, new_cs, retaddr);
1948         new_cs &= 0xffff;
1949         if (is_iret) {
1950             POPQ_RA(sp, new_eflags, retaddr);
1951         }
1952     } else
1953 #endif
1954     {
1955         if (shift == 1) {
1956             /* 32 bits */
1957             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
1958             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
1959             new_cs &= 0xffff;
1960             if (is_iret) {
1961                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1962                 if (new_eflags & VM_MASK) {
1963                     goto return_to_vm86;
1964                 }
1965             }
1966         } else {
1967             /* 16 bits */
1968             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
1969             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
1970             if (is_iret) {
1971                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1972             }
1973         }
1974     }
1975     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
1976               new_cs, new_eip, shift, addend);
1977     LOG_PCALL_STATE(env_cpu(env));
1978     if ((new_cs & 0xfffc) == 0) {
1979         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1980     }
1981     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
1982         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1983     }
1984     if (!(e2 & DESC_S_MASK) ||
1985         !(e2 & DESC_CS_MASK)) {
1986         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1987     }
1988     cpl = env->hflags & HF_CPL_MASK;
1989     rpl = new_cs & 3;
1990     if (rpl < cpl) {
1991         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1992     }
1993     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1994     if (e2 & DESC_C_MASK) {
1995         if (dpl > rpl) {
1996             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1997         }
1998     } else {
1999         if (dpl != rpl) {
2000             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2001         }
2002     }
2003     if (!(e2 & DESC_P_MASK)) {
2004         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2005     }
2006 
2007     sp += addend;
2008     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2009                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2010         /* return to same privilege level */
2011         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2012                        get_seg_base(e1, e2),
2013                        get_seg_limit(e1, e2),
2014                        e2);
2015     } else {
2016         /* return to different privilege level */
2017 #ifdef TARGET_X86_64
2018         if (shift == 2) {
2019             POPQ_RA(sp, new_esp, retaddr);
2020             POPQ_RA(sp, new_ss, retaddr);
2021             new_ss &= 0xffff;
2022         } else
2023 #endif
2024         {
2025             if (shift == 1) {
2026                 /* 32 bits */
2027                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2028                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2029                 new_ss &= 0xffff;
2030             } else {
2031                 /* 16 bits */
2032                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2033                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2034             }
2035         }
2036         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2037                   new_ss, new_esp);
2038         if ((new_ss & 0xfffc) == 0) {
2039 #ifdef TARGET_X86_64
2040             /* NULL ss is allowed in long mode if cpl != 3 */
2041             /* XXX: test CS64? */
2042             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2043                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2044                                        0, 0xffffffff,
2045                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2046                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2047                                        DESC_W_MASK | DESC_A_MASK);
2048                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2049             } else
2050 #endif
2051             {
2052                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2053             }
2054         } else {
2055             if ((new_ss & 3) != rpl) {
2056                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2057             }
2058             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2059                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2060             }
2061             if (!(ss_e2 & DESC_S_MASK) ||
2062                 (ss_e2 & DESC_CS_MASK) ||
2063                 !(ss_e2 & DESC_W_MASK)) {
2064                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2065             }
2066             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2067             if (dpl != rpl) {
2068                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2069             }
2070             if (!(ss_e2 & DESC_P_MASK)) {
2071                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2072             }
2073             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2074                                    get_seg_base(ss_e1, ss_e2),
2075                                    get_seg_limit(ss_e1, ss_e2),
2076                                    ss_e2);
2077         }
2078 
2079         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2080                        get_seg_base(e1, e2),
2081                        get_seg_limit(e1, e2),
2082                        e2);
2083         sp = new_esp;
2084 #ifdef TARGET_X86_64
2085         if (env->hflags & HF_CS64_MASK) {
2086             sp_mask = -1;
2087         } else
2088 #endif
2089         {
2090             sp_mask = get_sp_mask(ss_e2);
2091         }
2092 
2093         /* validate data segments */
2094         validate_seg(env, R_ES, rpl);
2095         validate_seg(env, R_DS, rpl);
2096         validate_seg(env, R_FS, rpl);
2097         validate_seg(env, R_GS, rpl);
2098 
2099         sp += addend;
2100     }
2101     SET_ESP(sp, sp_mask);
2102     env->eip = new_eip;
2103     if (is_iret) {
2104         /* NOTE: 'cpl' is the _old_ CPL */
2105         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2106         if (cpl == 0) {
2107             eflags_mask |= IOPL_MASK;
2108         }
2109         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2110         if (cpl <= iopl) {
2111             eflags_mask |= IF_MASK;
2112         }
2113         if (shift == 0) {
2114             eflags_mask &= 0xffff;
2115         }
2116         cpu_load_eflags(env, new_eflags, eflags_mask);
2117     }
2118     return;
2119 
2120  return_to_vm86:
2121     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2122     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2123     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2124     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2125     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2126     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2127 
2128     /* modify processor state */
2129     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2130                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2131                     VIP_MASK);
2132     load_seg_vm(env, R_CS, new_cs & 0xffff);
2133     load_seg_vm(env, R_SS, new_ss & 0xffff);
2134     load_seg_vm(env, R_ES, new_es & 0xffff);
2135     load_seg_vm(env, R_DS, new_ds & 0xffff);
2136     load_seg_vm(env, R_FS, new_fs & 0xffff);
2137     load_seg_vm(env, R_GS, new_gs & 0xffff);
2138 
2139     env->eip = new_eip & 0xffff;
2140     env->regs[R_ESP] = new_esp;
2141 }
2142 
2143 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2144 {
2145     int tss_selector, type;
2146     uint32_t e1, e2;
2147 
2148     /* specific case for TSS */
2149     if (env->eflags & NT_MASK) {
2150 #ifdef TARGET_X86_64
2151         if (env->hflags & HF_LMA_MASK) {
2152             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2153         }
2154 #endif
2155         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2156         if (tss_selector & 4) {
2157             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2158         }
2159         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2160             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2161         }
2162         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2163         /* NOTE: we check both segment and busy TSS */
2164         if (type != 3) {
2165             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2166         }
2167         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2168     } else {
2169         helper_ret_protected(env, shift, 1, 0, GETPC());
2170     }
2171     env->hflags2 &= ~HF2_NMI_MASK;
2172 }
2173 
2174 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2175 {
2176     helper_ret_protected(env, shift, 0, addend, GETPC());
2177 }
2178 
2179 void helper_sysenter(CPUX86State *env)
2180 {
2181     if (env->sysenter_cs == 0) {
2182         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2183     }
2184     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2185 
2186 #ifdef TARGET_X86_64
2187     if (env->hflags & HF_LMA_MASK) {
2188         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2189                                0, 0xffffffff,
2190                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2191                                DESC_S_MASK |
2192                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2193                                DESC_L_MASK);
2194     } else
2195 #endif
2196     {
2197         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2198                                0, 0xffffffff,
2199                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2200                                DESC_S_MASK |
2201                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2202     }
2203     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2204                            0, 0xffffffff,
2205                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2206                            DESC_S_MASK |
2207                            DESC_W_MASK | DESC_A_MASK);
2208     env->regs[R_ESP] = env->sysenter_esp;
2209     env->eip = env->sysenter_eip;
2210 }
2211 
2212 void helper_sysexit(CPUX86State *env, int dflag)
2213 {
2214     int cpl;
2215 
2216     cpl = env->hflags & HF_CPL_MASK;
2217     if (env->sysenter_cs == 0 || cpl != 0) {
2218         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2219     }
2220 #ifdef TARGET_X86_64
2221     if (dflag == 2) {
2222         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2223                                3, 0, 0xffffffff,
2224                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2225                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2226                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2227                                DESC_L_MASK);
2228         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2229                                3, 0, 0xffffffff,
2230                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2231                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2232                                DESC_W_MASK | DESC_A_MASK);
2233     } else
2234 #endif
2235     {
2236         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2237                                3, 0, 0xffffffff,
2238                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2239                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2240                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2241         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2242                                3, 0, 0xffffffff,
2243                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2244                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2245                                DESC_W_MASK | DESC_A_MASK);
2246     }
2247     env->regs[R_ESP] = env->regs[R_ECX];
2248     env->eip = env->regs[R_EDX];
2249 }
2250 
2251 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2252 {
2253     unsigned int limit;
2254     uint32_t e1, e2, eflags, selector;
2255     int rpl, dpl, cpl, type;
2256 
2257     selector = selector1 & 0xffff;
2258     eflags = cpu_cc_compute_all(env, CC_OP);
2259     if ((selector & 0xfffc) == 0) {
2260         goto fail;
2261     }
2262     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2263         goto fail;
2264     }
2265     rpl = selector & 3;
2266     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2267     cpl = env->hflags & HF_CPL_MASK;
2268     if (e2 & DESC_S_MASK) {
2269         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2270             /* conforming */
2271         } else {
2272             if (dpl < cpl || dpl < rpl) {
2273                 goto fail;
2274             }
2275         }
2276     } else {
2277         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2278         switch (type) {
2279         case 1:
2280         case 2:
2281         case 3:
2282         case 9:
2283         case 11:
2284             break;
2285         default:
2286             goto fail;
2287         }
2288         if (dpl < cpl || dpl < rpl) {
2289         fail:
2290             CC_SRC = eflags & ~CC_Z;
2291             return 0;
2292         }
2293     }
2294     limit = get_seg_limit(e1, e2);
2295     CC_SRC = eflags | CC_Z;
2296     return limit;
2297 }
2298 
2299 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2300 {
2301     uint32_t e1, e2, eflags, selector;
2302     int rpl, dpl, cpl, type;
2303 
2304     selector = selector1 & 0xffff;
2305     eflags = cpu_cc_compute_all(env, CC_OP);
2306     if ((selector & 0xfffc) == 0) {
2307         goto fail;
2308     }
2309     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2310         goto fail;
2311     }
2312     rpl = selector & 3;
2313     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2314     cpl = env->hflags & HF_CPL_MASK;
2315     if (e2 & DESC_S_MASK) {
2316         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2317             /* conforming */
2318         } else {
2319             if (dpl < cpl || dpl < rpl) {
2320                 goto fail;
2321             }
2322         }
2323     } else {
2324         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2325         switch (type) {
2326         case 1:
2327         case 2:
2328         case 3:
2329         case 4:
2330         case 5:
2331         case 9:
2332         case 11:
2333         case 12:
2334             break;
2335         default:
2336             goto fail;
2337         }
2338         if (dpl < cpl || dpl < rpl) {
2339         fail:
2340             CC_SRC = eflags & ~CC_Z;
2341             return 0;
2342         }
2343     }
2344     CC_SRC = eflags | CC_Z;
2345     return e2 & 0x00f0ff00;
2346 }
2347 
2348 void helper_verr(CPUX86State *env, target_ulong selector1)
2349 {
2350     uint32_t e1, e2, eflags, selector;
2351     int rpl, dpl, cpl;
2352 
2353     selector = selector1 & 0xffff;
2354     eflags = cpu_cc_compute_all(env, CC_OP);
2355     if ((selector & 0xfffc) == 0) {
2356         goto fail;
2357     }
2358     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2359         goto fail;
2360     }
2361     if (!(e2 & DESC_S_MASK)) {
2362         goto fail;
2363     }
2364     rpl = selector & 3;
2365     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366     cpl = env->hflags & HF_CPL_MASK;
2367     if (e2 & DESC_CS_MASK) {
2368         if (!(e2 & DESC_R_MASK)) {
2369             goto fail;
2370         }
2371         if (!(e2 & DESC_C_MASK)) {
2372             if (dpl < cpl || dpl < rpl) {
2373                 goto fail;
2374             }
2375         }
2376     } else {
2377         if (dpl < cpl || dpl < rpl) {
2378         fail:
2379             CC_SRC = eflags & ~CC_Z;
2380             return;
2381         }
2382     }
2383     CC_SRC = eflags | CC_Z;
2384 }
2385 
2386 void helper_verw(CPUX86State *env, target_ulong selector1)
2387 {
2388     uint32_t e1, e2, eflags, selector;
2389     int rpl, dpl, cpl;
2390 
2391     selector = selector1 & 0xffff;
2392     eflags = cpu_cc_compute_all(env, CC_OP);
2393     if ((selector & 0xfffc) == 0) {
2394         goto fail;
2395     }
2396     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2397         goto fail;
2398     }
2399     if (!(e2 & DESC_S_MASK)) {
2400         goto fail;
2401     }
2402     rpl = selector & 3;
2403     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2404     cpl = env->hflags & HF_CPL_MASK;
2405     if (e2 & DESC_CS_MASK) {
2406         goto fail;
2407     } else {
2408         if (dpl < cpl || dpl < rpl) {
2409             goto fail;
2410         }
2411         if (!(e2 & DESC_W_MASK)) {
2412         fail:
2413             CC_SRC = eflags & ~CC_Z;
2414             return;
2415         }
2416     }
2417     CC_SRC = eflags | CC_Z;
2418 }
2419