xref: /openbmc/qemu/target/i386/tcg/seg_helper.c (revision 1f2146f7)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 
31 int get_pg_mode(CPUX86State *env)
32 {
33     int pg_mode = 0;
34     if (!(env->cr[0] & CR0_PG_MASK)) {
35         return 0;
36     }
37     if (env->cr[0] & CR0_WP_MASK) {
38         pg_mode |= PG_MODE_WP;
39     }
40     if (env->cr[4] & CR4_PAE_MASK) {
41         pg_mode |= PG_MODE_PAE;
42         if (env->efer & MSR_EFER_NXE) {
43             pg_mode |= PG_MODE_NXE;
44         }
45     }
46     if (env->cr[4] & CR4_PSE_MASK) {
47         pg_mode |= PG_MODE_PSE;
48     }
49     if (env->cr[4] & CR4_SMEP_MASK) {
50         pg_mode |= PG_MODE_SMEP;
51     }
52     if (env->hflags & HF_LMA_MASK) {
53         pg_mode |= PG_MODE_LMA;
54         if (env->cr[4] & CR4_PKE_MASK) {
55             pg_mode |= PG_MODE_PKE;
56         }
57         if (env->cr[4] & CR4_PKS_MASK) {
58             pg_mode |= PG_MODE_PKS;
59         }
60         if (env->cr[4] & CR4_LA57_MASK) {
61             pg_mode |= PG_MODE_LA57;
62         }
63     }
64     return pg_mode;
65 }
66 
67 /* return non zero if error */
68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
69                                uint32_t *e2_ptr, int selector,
70                                uintptr_t retaddr)
71 {
72     SegmentCache *dt;
73     int index;
74     target_ulong ptr;
75 
76     if (selector & 0x4) {
77         dt = &env->ldt;
78     } else {
79         dt = &env->gdt;
80     }
81     index = selector & ~7;
82     if ((index + 7) > dt->limit) {
83         return -1;
84     }
85     ptr = dt->base + index;
86     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
87     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
88     return 0;
89 }
90 
91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
92                                uint32_t *e2_ptr, int selector)
93 {
94     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
95 }
96 
97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
98 {
99     unsigned int limit;
100 
101     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
102     if (e2 & DESC_G_MASK) {
103         limit = (limit << 12) | 0xfff;
104     }
105     return limit;
106 }
107 
108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
109 {
110     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
111 }
112 
113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
114                                          uint32_t e2)
115 {
116     sc->base = get_seg_base(e1, e2);
117     sc->limit = get_seg_limit(e1, e2);
118     sc->flags = e2;
119 }
120 
121 /* init the segment cache in vm86 mode. */
122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
123 {
124     selector &= 0xffff;
125 
126     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
127                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
128                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
129 }
130 
131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
132                                        uint32_t *esp_ptr, int dpl,
133                                        uintptr_t retaddr)
134 {
135     X86CPU *cpu = env_archcpu(env);
136     int type, index, shift;
137 
138 #if 0
139     {
140         int i;
141         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
142         for (i = 0; i < env->tr.limit; i++) {
143             printf("%02x ", env->tr.base[i]);
144             if ((i & 7) == 7) {
145                 printf("\n");
146             }
147         }
148         printf("\n");
149     }
150 #endif
151 
152     if (!(env->tr.flags & DESC_P_MASK)) {
153         cpu_abort(CPU(cpu), "invalid tss");
154     }
155     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
156     if ((type & 7) != 1) {
157         cpu_abort(CPU(cpu), "invalid tss type");
158     }
159     shift = type >> 3;
160     index = (dpl * 4 + 2) << shift;
161     if (index + (4 << shift) - 1 > env->tr.limit) {
162         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
163     }
164     if (shift == 0) {
165         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
166         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
167     } else {
168         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
169         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
170     }
171 }
172 
173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
174                          int cpl, uintptr_t retaddr)
175 {
176     uint32_t e1, e2;
177     int rpl, dpl;
178 
179     if ((selector & 0xfffc) != 0) {
180         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
181             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
182         }
183         if (!(e2 & DESC_S_MASK)) {
184             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185         }
186         rpl = selector & 3;
187         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
188         if (seg_reg == R_CS) {
189             if (!(e2 & DESC_CS_MASK)) {
190                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191             }
192             if (dpl != rpl) {
193                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
194             }
195         } else if (seg_reg == R_SS) {
196             /* SS must be writable data */
197             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
198                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199             }
200             if (dpl != cpl || dpl != rpl) {
201                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202             }
203         } else {
204             /* not readable code */
205             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
206                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207             }
208             /* if data or non conforming code, checks the rights */
209             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
210                 if (dpl < cpl || dpl < rpl) {
211                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212                 }
213             }
214         }
215         if (!(e2 & DESC_P_MASK)) {
216             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
217         }
218         cpu_x86_load_seg_cache(env, seg_reg, selector,
219                                get_seg_base(e1, e2),
220                                get_seg_limit(e1, e2),
221                                e2);
222     } else {
223         if (seg_reg == R_SS || seg_reg == R_CS) {
224             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
225         }
226     }
227 }
228 
229 #define SWITCH_TSS_JMP  0
230 #define SWITCH_TSS_IRET 1
231 #define SWITCH_TSS_CALL 2
232 
233 /* XXX: restore CPU state in registers (PowerPC case) */
234 static void switch_tss_ra(CPUX86State *env, int tss_selector,
235                           uint32_t e1, uint32_t e2, int source,
236                           uint32_t next_eip, uintptr_t retaddr)
237 {
238     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
239     target_ulong tss_base;
240     uint32_t new_regs[8], new_segs[6];
241     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
242     uint32_t old_eflags, eflags_mask;
243     SegmentCache *dt;
244     int index;
245     target_ulong ptr;
246 
247     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
248     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
249               source);
250 
251     /* if task gate, we read the TSS segment and we load it */
252     if (type == 5) {
253         if (!(e2 & DESC_P_MASK)) {
254             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
255         }
256         tss_selector = e1 >> 16;
257         if (tss_selector & 4) {
258             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
259         }
260         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
261             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
262         }
263         if (e2 & DESC_S_MASK) {
264             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265         }
266         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
267         if ((type & 7) != 1) {
268             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
269         }
270     }
271 
272     if (!(e2 & DESC_P_MASK)) {
273         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
274     }
275 
276     if (type & 8) {
277         tss_limit_max = 103;
278     } else {
279         tss_limit_max = 43;
280     }
281     tss_limit = get_seg_limit(e1, e2);
282     tss_base = get_seg_base(e1, e2);
283     if ((tss_selector & 4) != 0 ||
284         tss_limit < tss_limit_max) {
285         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
286     }
287     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
288     if (old_type & 8) {
289         old_tss_limit_max = 103;
290     } else {
291         old_tss_limit_max = 43;
292     }
293 
294     /* read all the registers from the new TSS */
295     if (type & 8) {
296         /* 32 bit */
297         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
298         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
299         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
300         for (i = 0; i < 8; i++) {
301             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
302                                             retaddr);
303         }
304         for (i = 0; i < 6; i++) {
305             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
306                                              retaddr);
307         }
308         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
309         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
310     } else {
311         /* 16 bit */
312         new_cr3 = 0;
313         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
314         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
315         for (i = 0; i < 8; i++) {
316             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
317         }
318         for (i = 0; i < 4; i++) {
319             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
320                                              retaddr);
321         }
322         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
323         new_segs[R_FS] = 0;
324         new_segs[R_GS] = 0;
325         new_trap = 0;
326     }
327     /* XXX: avoid a compiler warning, see
328      http://support.amd.com/us/Processor_TechDocs/24593.pdf
329      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
330     (void)new_trap;
331 
332     /* NOTE: we must avoid memory exceptions during the task switch,
333        so we make dummy accesses before */
334     /* XXX: it can still fail in some cases, so a bigger hack is
335        necessary to valid the TLB after having done the accesses */
336 
337     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
338     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
339     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
340     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
341 
342     /* clear busy bit (it is restartable) */
343     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
344         target_ulong ptr;
345         uint32_t e2;
346 
347         ptr = env->gdt.base + (env->tr.selector & ~7);
348         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
349         e2 &= ~DESC_TSS_BUSY_MASK;
350         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
351     }
352     old_eflags = cpu_compute_eflags(env);
353     if (source == SWITCH_TSS_IRET) {
354         old_eflags &= ~NT_MASK;
355     }
356 
357     /* save the current state in the old TSS */
358     if (old_type & 8) {
359         /* 32 bit */
360         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
361         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
362         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
363         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
364         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
365         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
366         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
367         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
368         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
369         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
370         for (i = 0; i < 6; i++) {
371             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
372                               env->segs[i].selector, retaddr);
373         }
374     } else {
375         /* 16 bit */
376         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
377         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
378         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
379         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
380         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
381         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
382         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
383         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
384         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
385         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
386         for (i = 0; i < 4; i++) {
387             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
388                               env->segs[i].selector, retaddr);
389         }
390     }
391 
392     /* now if an exception occurs, it will occurs in the next task
393        context */
394 
395     if (source == SWITCH_TSS_CALL) {
396         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
397         new_eflags |= NT_MASK;
398     }
399 
400     /* set busy bit */
401     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
402         target_ulong ptr;
403         uint32_t e2;
404 
405         ptr = env->gdt.base + (tss_selector & ~7);
406         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
407         e2 |= DESC_TSS_BUSY_MASK;
408         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
409     }
410 
411     /* set the new CPU state */
412     /* from this point, any exception which occurs can give problems */
413     env->cr[0] |= CR0_TS_MASK;
414     env->hflags |= HF_TS_MASK;
415     env->tr.selector = tss_selector;
416     env->tr.base = tss_base;
417     env->tr.limit = tss_limit;
418     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
419 
420     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
421         cpu_x86_update_cr3(env, new_cr3);
422     }
423 
424     /* load all registers without an exception, then reload them with
425        possible exception */
426     env->eip = new_eip;
427     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
428         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
429     if (type & 8) {
430         cpu_load_eflags(env, new_eflags, eflags_mask);
431         for (i = 0; i < 8; i++) {
432             env->regs[i] = new_regs[i];
433         }
434     } else {
435         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
436         for (i = 0; i < 8; i++) {
437             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
438         }
439     }
440     if (new_eflags & VM_MASK) {
441         for (i = 0; i < 6; i++) {
442             load_seg_vm(env, i, new_segs[i]);
443         }
444     } else {
445         /* first just selectors as the rest may trigger exceptions */
446         for (i = 0; i < 6; i++) {
447             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
448         }
449     }
450 
451     env->ldt.selector = new_ldt & ~4;
452     env->ldt.base = 0;
453     env->ldt.limit = 0;
454     env->ldt.flags = 0;
455 
456     /* load the LDT */
457     if (new_ldt & 4) {
458         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
459     }
460 
461     if ((new_ldt & 0xfffc) != 0) {
462         dt = &env->gdt;
463         index = new_ldt & ~7;
464         if ((index + 7) > dt->limit) {
465             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466         }
467         ptr = dt->base + index;
468         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
469         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
470         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
471             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472         }
473         if (!(e2 & DESC_P_MASK)) {
474             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475         }
476         load_seg_cache_raw_dt(&env->ldt, e1, e2);
477     }
478 
479     /* load the segments */
480     if (!(new_eflags & VM_MASK)) {
481         int cpl = new_segs[R_CS] & 3;
482         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
483         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
484         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
485         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
486         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
487         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
488     }
489 
490     /* check that env->eip is in the CS segment limits */
491     if (new_eip > env->segs[R_CS].limit) {
492         /* XXX: different exception if CALL? */
493         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
494     }
495 
496 #ifndef CONFIG_USER_ONLY
497     /* reset local breakpoints */
498     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
499         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
500     }
501 #endif
502 }
503 
504 static void switch_tss(CPUX86State *env, int tss_selector,
505                        uint32_t e1, uint32_t e2, int source,
506                         uint32_t next_eip)
507 {
508     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
509 }
510 
511 static inline unsigned int get_sp_mask(unsigned int e2)
512 {
513 #ifdef TARGET_X86_64
514     if (e2 & DESC_L_MASK) {
515         return 0;
516     } else
517 #endif
518     if (e2 & DESC_B_MASK) {
519         return 0xffffffff;
520     } else {
521         return 0xffff;
522     }
523 }
524 
525 int exception_has_error_code(int intno)
526 {
527     switch (intno) {
528     case 8:
529     case 10:
530     case 11:
531     case 12:
532     case 13:
533     case 14:
534     case 17:
535         return 1;
536     }
537     return 0;
538 }
539 
540 #ifdef TARGET_X86_64
541 #define SET_ESP(val, sp_mask)                                   \
542     do {                                                        \
543         if ((sp_mask) == 0xffff) {                              \
544             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
545                 ((val) & 0xffff);                               \
546         } else if ((sp_mask) == 0xffffffffLL) {                 \
547             env->regs[R_ESP] = (uint32_t)(val);                 \
548         } else {                                                \
549             env->regs[R_ESP] = (val);                           \
550         }                                                       \
551     } while (0)
552 #else
553 #define SET_ESP(val, sp_mask)                                   \
554     do {                                                        \
555         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
556             ((val) & (sp_mask));                                \
557     } while (0)
558 #endif
559 
560 /* in 64-bit machines, this can overflow. So this segment addition macro
561  * can be used to trim the value to 32-bit whenever needed */
562 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
563 
564 /* XXX: add a is_user flag to have proper security support */
565 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
566     {                                                            \
567         sp -= 2;                                                 \
568         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
569     }
570 
571 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
572     {                                                                   \
573         sp -= 4;                                                        \
574         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
575     }
576 
577 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
578     {                                                            \
579         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
580         sp += 2;                                                 \
581     }
582 
583 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
584     {                                                                   \
585         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
586         sp += 4;                                                        \
587     }
588 
589 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
590 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
591 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
592 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
593 
594 /* protected mode interrupt */
595 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
596                                    int error_code, unsigned int next_eip,
597                                    int is_hw)
598 {
599     SegmentCache *dt;
600     target_ulong ptr, ssp;
601     int type, dpl, selector, ss_dpl, cpl;
602     int has_error_code, new_stack, shift;
603     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
604     uint32_t old_eip, sp_mask;
605     int vm86 = env->eflags & VM_MASK;
606 
607     has_error_code = 0;
608     if (!is_int && !is_hw) {
609         has_error_code = exception_has_error_code(intno);
610     }
611     if (is_int) {
612         old_eip = next_eip;
613     } else {
614         old_eip = env->eip;
615     }
616 
617     dt = &env->idt;
618     if (intno * 8 + 7 > dt->limit) {
619         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
620     }
621     ptr = dt->base + intno * 8;
622     e1 = cpu_ldl_kernel(env, ptr);
623     e2 = cpu_ldl_kernel(env, ptr + 4);
624     /* check gate type */
625     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
626     switch (type) {
627     case 5: /* task gate */
628     case 6: /* 286 interrupt gate */
629     case 7: /* 286 trap gate */
630     case 14: /* 386 interrupt gate */
631     case 15: /* 386 trap gate */
632         break;
633     default:
634         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
635         break;
636     }
637     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
638     cpl = env->hflags & HF_CPL_MASK;
639     /* check privilege if software int */
640     if (is_int && dpl < cpl) {
641         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
642     }
643 
644     if (type == 5) {
645         /* task gate */
646         /* must do that check here to return the correct error code */
647         if (!(e2 & DESC_P_MASK)) {
648             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
649         }
650         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
651         if (has_error_code) {
652             int type;
653             uint32_t mask;
654 
655             /* push the error code */
656             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
657             shift = type >> 3;
658             if (env->segs[R_SS].flags & DESC_B_MASK) {
659                 mask = 0xffffffff;
660             } else {
661                 mask = 0xffff;
662             }
663             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
664             ssp = env->segs[R_SS].base + esp;
665             if (shift) {
666                 cpu_stl_kernel(env, ssp, error_code);
667             } else {
668                 cpu_stw_kernel(env, ssp, error_code);
669             }
670             SET_ESP(esp, mask);
671         }
672         return;
673     }
674 
675     /* Otherwise, trap or interrupt gate */
676 
677     /* check valid bit */
678     if (!(e2 & DESC_P_MASK)) {
679         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
680     }
681     selector = e1 >> 16;
682     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
683     if ((selector & 0xfffc) == 0) {
684         raise_exception_err(env, EXCP0D_GPF, 0);
685     }
686     if (load_segment(env, &e1, &e2, selector) != 0) {
687         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688     }
689     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
690         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691     }
692     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
693     if (dpl > cpl) {
694         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
695     }
696     if (!(e2 & DESC_P_MASK)) {
697         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
698     }
699     if (e2 & DESC_C_MASK) {
700         dpl = cpl;
701     }
702     if (dpl < cpl) {
703         /* to inner privilege */
704         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
705         if ((ss & 0xfffc) == 0) {
706             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707         }
708         if ((ss & 3) != dpl) {
709             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710         }
711         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
712             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
713         }
714         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
715         if (ss_dpl != dpl) {
716             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
717         }
718         if (!(ss_e2 & DESC_S_MASK) ||
719             (ss_e2 & DESC_CS_MASK) ||
720             !(ss_e2 & DESC_W_MASK)) {
721             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722         }
723         if (!(ss_e2 & DESC_P_MASK)) {
724             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
725         }
726         new_stack = 1;
727         sp_mask = get_sp_mask(ss_e2);
728         ssp = get_seg_base(ss_e1, ss_e2);
729     } else  {
730         /* to same privilege */
731         if (vm86) {
732             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
733         }
734         new_stack = 0;
735         sp_mask = get_sp_mask(env->segs[R_SS].flags);
736         ssp = env->segs[R_SS].base;
737         esp = env->regs[R_ESP];
738     }
739 
740     shift = type >> 3;
741 
742 #if 0
743     /* XXX: check that enough room is available */
744     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
745     if (vm86) {
746         push_size += 8;
747     }
748     push_size <<= shift;
749 #endif
750     if (shift == 1) {
751         if (new_stack) {
752             if (vm86) {
753                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757             }
758             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
759             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
760         }
761         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
762         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763         PUSHL(ssp, esp, sp_mask, old_eip);
764         if (has_error_code) {
765             PUSHL(ssp, esp, sp_mask, error_code);
766         }
767     } else {
768         if (new_stack) {
769             if (vm86) {
770                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774             }
775             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
776             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
777         }
778         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
779         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780         PUSHW(ssp, esp, sp_mask, old_eip);
781         if (has_error_code) {
782             PUSHW(ssp, esp, sp_mask, error_code);
783         }
784     }
785 
786     /* interrupt gate clear IF mask */
787     if ((type & 1) == 0) {
788         env->eflags &= ~IF_MASK;
789     }
790     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791 
792     if (new_stack) {
793         if (vm86) {
794             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798         }
799         ss = (ss & ~3) | dpl;
800         cpu_x86_load_seg_cache(env, R_SS, ss,
801                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802     }
803     SET_ESP(esp, sp_mask);
804 
805     selector = (selector & ~3) | dpl;
806     cpu_x86_load_seg_cache(env, R_CS, selector,
807                    get_seg_base(e1, e2),
808                    get_seg_limit(e1, e2),
809                    e2);
810     env->eip = offset;
811 }
812 
813 #ifdef TARGET_X86_64
814 
815 #define PUSHQ_RA(sp, val, ra)                   \
816     {                                           \
817         sp -= 8;                                \
818         cpu_stq_kernel_ra(env, sp, (val), ra);  \
819     }
820 
821 #define POPQ_RA(sp, val, ra)                    \
822     {                                           \
823         val = cpu_ldq_kernel_ra(env, sp, ra);   \
824         sp += 8;                                \
825     }
826 
827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
829 
830 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
831 {
832     X86CPU *cpu = env_archcpu(env);
833     int index, pg_mode;
834     target_ulong rsp;
835     int32_t sext;
836 
837 #if 0
838     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
839            env->tr.base, env->tr.limit);
840 #endif
841 
842     if (!(env->tr.flags & DESC_P_MASK)) {
843         cpu_abort(CPU(cpu), "invalid tss");
844     }
845     index = 8 * level + 4;
846     if ((index + 7) > env->tr.limit) {
847         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
848     }
849 
850     rsp = cpu_ldq_kernel(env, env->tr.base + index);
851 
852     /* test virtual address sign extension */
853     pg_mode = get_pg_mode(env);
854     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
855     if (sext != 0 && sext != -1) {
856         raise_exception_err(env, EXCP0C_STACK, 0);
857     }
858 
859     return rsp;
860 }
861 
862 /* 64 bit interrupt */
863 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
864                            int error_code, target_ulong next_eip, int is_hw)
865 {
866     SegmentCache *dt;
867     target_ulong ptr;
868     int type, dpl, selector, cpl, ist;
869     int has_error_code, new_stack;
870     uint32_t e1, e2, e3, ss;
871     target_ulong old_eip, esp, offset;
872 
873     has_error_code = 0;
874     if (!is_int && !is_hw) {
875         has_error_code = exception_has_error_code(intno);
876     }
877     if (is_int) {
878         old_eip = next_eip;
879     } else {
880         old_eip = env->eip;
881     }
882 
883     dt = &env->idt;
884     if (intno * 16 + 15 > dt->limit) {
885         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
886     }
887     ptr = dt->base + intno * 16;
888     e1 = cpu_ldl_kernel(env, ptr);
889     e2 = cpu_ldl_kernel(env, ptr + 4);
890     e3 = cpu_ldl_kernel(env, ptr + 8);
891     /* check gate type */
892     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893     switch (type) {
894     case 14: /* 386 interrupt gate */
895     case 15: /* 386 trap gate */
896         break;
897     default:
898         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
899         break;
900     }
901     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902     cpl = env->hflags & HF_CPL_MASK;
903     /* check privilege if software int */
904     if (is_int && dpl < cpl) {
905         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
906     }
907     /* check valid bit */
908     if (!(e2 & DESC_P_MASK)) {
909         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
910     }
911     selector = e1 >> 16;
912     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
913     ist = e2 & 7;
914     if ((selector & 0xfffc) == 0) {
915         raise_exception_err(env, EXCP0D_GPF, 0);
916     }
917 
918     if (load_segment(env, &e1, &e2, selector) != 0) {
919         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
920     }
921     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
922         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
923     }
924     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925     if (dpl > cpl) {
926         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
927     }
928     if (!(e2 & DESC_P_MASK)) {
929         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
930     }
931     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
932         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
933     }
934     if (e2 & DESC_C_MASK) {
935         dpl = cpl;
936     }
937     if (dpl < cpl || ist != 0) {
938         /* to inner privilege */
939         new_stack = 1;
940         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
941         ss = 0;
942     } else {
943         /* to same privilege */
944         if (env->eflags & VM_MASK) {
945             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
946         }
947         new_stack = 0;
948         esp = env->regs[R_ESP];
949     }
950     esp &= ~0xfLL; /* align stack */
951 
952     PUSHQ(esp, env->segs[R_SS].selector);
953     PUSHQ(esp, env->regs[R_ESP]);
954     PUSHQ(esp, cpu_compute_eflags(env));
955     PUSHQ(esp, env->segs[R_CS].selector);
956     PUSHQ(esp, old_eip);
957     if (has_error_code) {
958         PUSHQ(esp, error_code);
959     }
960 
961     /* interrupt gate clear IF mask */
962     if ((type & 1) == 0) {
963         env->eflags &= ~IF_MASK;
964     }
965     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
966 
967     if (new_stack) {
968         ss = 0 | dpl;
969         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
970     }
971     env->regs[R_ESP] = esp;
972 
973     selector = (selector & ~3) | dpl;
974     cpu_x86_load_seg_cache(env, R_CS, selector,
975                    get_seg_base(e1, e2),
976                    get_seg_limit(e1, e2),
977                    e2);
978     env->eip = offset;
979 }
980 #endif /* TARGET_X86_64 */
981 
982 void helper_sysret(CPUX86State *env, int dflag)
983 {
984     int cpl, selector;
985 
986     if (!(env->efer & MSR_EFER_SCE)) {
987         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
988     }
989     cpl = env->hflags & HF_CPL_MASK;
990     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
991         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
992     }
993     selector = (env->star >> 48) & 0xffff;
994 #ifdef TARGET_X86_64
995     if (env->hflags & HF_LMA_MASK) {
996         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
997                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
998                         NT_MASK);
999         if (dflag == 2) {
1000             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1001                                    0, 0xffffffff,
1002                                    DESC_G_MASK | DESC_P_MASK |
1003                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1004                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005                                    DESC_L_MASK);
1006             env->eip = env->regs[R_ECX];
1007         } else {
1008             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1009                                    0, 0xffffffff,
1010                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1011                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1012                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1013             env->eip = (uint32_t)env->regs[R_ECX];
1014         }
1015         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1016                                0, 0xffffffff,
1017                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1019                                DESC_W_MASK | DESC_A_MASK);
1020     } else
1021 #endif
1022     {
1023         env->eflags |= IF_MASK;
1024         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1025                                0, 0xffffffff,
1026                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1028                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1029         env->eip = (uint32_t)env->regs[R_ECX];
1030         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1031                                0, 0xffffffff,
1032                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1034                                DESC_W_MASK | DESC_A_MASK);
1035     }
1036 }
1037 
1038 /* real mode interrupt */
1039 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1040                               int error_code, unsigned int next_eip)
1041 {
1042     SegmentCache *dt;
1043     target_ulong ptr, ssp;
1044     int selector;
1045     uint32_t offset, esp;
1046     uint32_t old_cs, old_eip;
1047 
1048     /* real mode (simpler!) */
1049     dt = &env->idt;
1050     if (intno * 4 + 3 > dt->limit) {
1051         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1052     }
1053     ptr = dt->base + intno * 4;
1054     offset = cpu_lduw_kernel(env, ptr);
1055     selector = cpu_lduw_kernel(env, ptr + 2);
1056     esp = env->regs[R_ESP];
1057     ssp = env->segs[R_SS].base;
1058     if (is_int) {
1059         old_eip = next_eip;
1060     } else {
1061         old_eip = env->eip;
1062     }
1063     old_cs = env->segs[R_CS].selector;
1064     /* XXX: use SS segment size? */
1065     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1066     PUSHW(ssp, esp, 0xffff, old_cs);
1067     PUSHW(ssp, esp, 0xffff, old_eip);
1068 
1069     /* update processor state */
1070     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1071     env->eip = offset;
1072     env->segs[R_CS].selector = selector;
1073     env->segs[R_CS].base = (selector << 4);
1074     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1075 }
1076 
1077 /*
1078  * Begin execution of an interruption. is_int is TRUE if coming from
1079  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1080  * instruction. It is only relevant if is_int is TRUE.
1081  */
1082 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1083                       int error_code, target_ulong next_eip, int is_hw)
1084 {
1085     CPUX86State *env = &cpu->env;
1086 
1087     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1088         if ((env->cr[0] & CR0_PE_MASK)) {
1089             static int count;
1090 
1091             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1092                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1093                      count, intno, error_code, is_int,
1094                      env->hflags & HF_CPL_MASK,
1095                      env->segs[R_CS].selector, env->eip,
1096                      (int)env->segs[R_CS].base + env->eip,
1097                      env->segs[R_SS].selector, env->regs[R_ESP]);
1098             if (intno == 0x0e) {
1099                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1100             } else {
1101                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1102             }
1103             qemu_log("\n");
1104             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1105 #if 0
1106             {
1107                 int i;
1108                 target_ulong ptr;
1109 
1110                 qemu_log("       code=");
1111                 ptr = env->segs[R_CS].base + env->eip;
1112                 for (i = 0; i < 16; i++) {
1113                     qemu_log(" %02x", ldub(ptr + i));
1114                 }
1115                 qemu_log("\n");
1116             }
1117 #endif
1118             count++;
1119         }
1120     }
1121     if (env->cr[0] & CR0_PE_MASK) {
1122 #if !defined(CONFIG_USER_ONLY)
1123         if (env->hflags & HF_GUEST_MASK) {
1124             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1125         }
1126 #endif
1127 #ifdef TARGET_X86_64
1128         if (env->hflags & HF_LMA_MASK) {
1129             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1130         } else
1131 #endif
1132         {
1133             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1134                                    is_hw);
1135         }
1136     } else {
1137 #if !defined(CONFIG_USER_ONLY)
1138         if (env->hflags & HF_GUEST_MASK) {
1139             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1140         }
1141 #endif
1142         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1143     }
1144 
1145 #if !defined(CONFIG_USER_ONLY)
1146     if (env->hflags & HF_GUEST_MASK) {
1147         CPUState *cs = CPU(cpu);
1148         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1149                                       offsetof(struct vmcb,
1150                                                control.event_inj));
1151 
1152         x86_stl_phys(cs,
1153                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1154                  event_inj & ~SVM_EVTINJ_VALID);
1155     }
1156 #endif
1157 }
1158 
1159 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1160 {
1161     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1162 }
1163 
1164 void helper_lldt(CPUX86State *env, int selector)
1165 {
1166     SegmentCache *dt;
1167     uint32_t e1, e2;
1168     int index, entry_limit;
1169     target_ulong ptr;
1170 
1171     selector &= 0xffff;
1172     if ((selector & 0xfffc) == 0) {
1173         /* XXX: NULL selector case: invalid LDT */
1174         env->ldt.base = 0;
1175         env->ldt.limit = 0;
1176     } else {
1177         if (selector & 0x4) {
1178             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1179         }
1180         dt = &env->gdt;
1181         index = selector & ~7;
1182 #ifdef TARGET_X86_64
1183         if (env->hflags & HF_LMA_MASK) {
1184             entry_limit = 15;
1185         } else
1186 #endif
1187         {
1188             entry_limit = 7;
1189         }
1190         if ((index + entry_limit) > dt->limit) {
1191             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1192         }
1193         ptr = dt->base + index;
1194         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1195         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1196         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1197             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1198         }
1199         if (!(e2 & DESC_P_MASK)) {
1200             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1201         }
1202 #ifdef TARGET_X86_64
1203         if (env->hflags & HF_LMA_MASK) {
1204             uint32_t e3;
1205 
1206             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1207             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1208             env->ldt.base |= (target_ulong)e3 << 32;
1209         } else
1210 #endif
1211         {
1212             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1213         }
1214     }
1215     env->ldt.selector = selector;
1216 }
1217 
1218 void helper_ltr(CPUX86State *env, int selector)
1219 {
1220     SegmentCache *dt;
1221     uint32_t e1, e2;
1222     int index, type, entry_limit;
1223     target_ulong ptr;
1224 
1225     selector &= 0xffff;
1226     if ((selector & 0xfffc) == 0) {
1227         /* NULL selector case: invalid TR */
1228         env->tr.base = 0;
1229         env->tr.limit = 0;
1230         env->tr.flags = 0;
1231     } else {
1232         if (selector & 0x4) {
1233             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1234         }
1235         dt = &env->gdt;
1236         index = selector & ~7;
1237 #ifdef TARGET_X86_64
1238         if (env->hflags & HF_LMA_MASK) {
1239             entry_limit = 15;
1240         } else
1241 #endif
1242         {
1243             entry_limit = 7;
1244         }
1245         if ((index + entry_limit) > dt->limit) {
1246             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1247         }
1248         ptr = dt->base + index;
1249         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1250         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1251         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1252         if ((e2 & DESC_S_MASK) ||
1253             (type != 1 && type != 9)) {
1254             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1255         }
1256         if (!(e2 & DESC_P_MASK)) {
1257             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1258         }
1259 #ifdef TARGET_X86_64
1260         if (env->hflags & HF_LMA_MASK) {
1261             uint32_t e3, e4;
1262 
1263             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1264             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1265             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1266                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1267             }
1268             load_seg_cache_raw_dt(&env->tr, e1, e2);
1269             env->tr.base |= (target_ulong)e3 << 32;
1270         } else
1271 #endif
1272         {
1273             load_seg_cache_raw_dt(&env->tr, e1, e2);
1274         }
1275         e2 |= DESC_TSS_BUSY_MASK;
1276         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1277     }
1278     env->tr.selector = selector;
1279 }
1280 
1281 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1282 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1283 {
1284     uint32_t e1, e2;
1285     int cpl, dpl, rpl;
1286     SegmentCache *dt;
1287     int index;
1288     target_ulong ptr;
1289 
1290     selector &= 0xffff;
1291     cpl = env->hflags & HF_CPL_MASK;
1292     if ((selector & 0xfffc) == 0) {
1293         /* null selector case */
1294         if (seg_reg == R_SS
1295 #ifdef TARGET_X86_64
1296             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1297 #endif
1298             ) {
1299             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1300         }
1301         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1302     } else {
1303 
1304         if (selector & 0x4) {
1305             dt = &env->ldt;
1306         } else {
1307             dt = &env->gdt;
1308         }
1309         index = selector & ~7;
1310         if ((index + 7) > dt->limit) {
1311             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1312         }
1313         ptr = dt->base + index;
1314         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1315         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1316 
1317         if (!(e2 & DESC_S_MASK)) {
1318             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1319         }
1320         rpl = selector & 3;
1321         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1322         if (seg_reg == R_SS) {
1323             /* must be writable segment */
1324             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1325                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1326             }
1327             if (rpl != cpl || dpl != cpl) {
1328                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1329             }
1330         } else {
1331             /* must be readable segment */
1332             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1333                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1334             }
1335 
1336             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1337                 /* if not conforming code, test rights */
1338                 if (dpl < cpl || dpl < rpl) {
1339                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1340                 }
1341             }
1342         }
1343 
1344         if (!(e2 & DESC_P_MASK)) {
1345             if (seg_reg == R_SS) {
1346                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1347             } else {
1348                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1349             }
1350         }
1351 
1352         /* set the access bit if not already set */
1353         if (!(e2 & DESC_A_MASK)) {
1354             e2 |= DESC_A_MASK;
1355             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1356         }
1357 
1358         cpu_x86_load_seg_cache(env, seg_reg, selector,
1359                        get_seg_base(e1, e2),
1360                        get_seg_limit(e1, e2),
1361                        e2);
1362 #if 0
1363         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1364                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1365 #endif
1366     }
1367 }
1368 
1369 /* protected mode jump */
1370 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1371                            target_ulong next_eip)
1372 {
1373     int gate_cs, type;
1374     uint32_t e1, e2, cpl, dpl, rpl, limit;
1375 
1376     if ((new_cs & 0xfffc) == 0) {
1377         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1378     }
1379     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1380         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1381     }
1382     cpl = env->hflags & HF_CPL_MASK;
1383     if (e2 & DESC_S_MASK) {
1384         if (!(e2 & DESC_CS_MASK)) {
1385             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1386         }
1387         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1388         if (e2 & DESC_C_MASK) {
1389             /* conforming code segment */
1390             if (dpl > cpl) {
1391                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1392             }
1393         } else {
1394             /* non conforming code segment */
1395             rpl = new_cs & 3;
1396             if (rpl > cpl) {
1397                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1398             }
1399             if (dpl != cpl) {
1400                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1401             }
1402         }
1403         if (!(e2 & DESC_P_MASK)) {
1404             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1405         }
1406         limit = get_seg_limit(e1, e2);
1407         if (new_eip > limit &&
1408             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1409             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1410         }
1411         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1412                        get_seg_base(e1, e2), limit, e2);
1413         env->eip = new_eip;
1414     } else {
1415         /* jump to call or task gate */
1416         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1417         rpl = new_cs & 3;
1418         cpl = env->hflags & HF_CPL_MASK;
1419         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1420 
1421 #ifdef TARGET_X86_64
1422         if (env->efer & MSR_EFER_LMA) {
1423             if (type != 12) {
1424                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1425             }
1426         }
1427 #endif
1428         switch (type) {
1429         case 1: /* 286 TSS */
1430         case 9: /* 386 TSS */
1431         case 5: /* task gate */
1432             if (dpl < cpl || dpl < rpl) {
1433                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1434             }
1435             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1436             break;
1437         case 4: /* 286 call gate */
1438         case 12: /* 386 call gate */
1439             if ((dpl < cpl) || (dpl < rpl)) {
1440                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1441             }
1442             if (!(e2 & DESC_P_MASK)) {
1443                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1444             }
1445             gate_cs = e1 >> 16;
1446             new_eip = (e1 & 0xffff);
1447             if (type == 12) {
1448                 new_eip |= (e2 & 0xffff0000);
1449             }
1450 
1451 #ifdef TARGET_X86_64
1452             if (env->efer & MSR_EFER_LMA) {
1453                 /* load the upper 8 bytes of the 64-bit call gate */
1454                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1455                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1456                                            GETPC());
1457                 }
1458                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1459                 if (type != 0) {
1460                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1461                                            GETPC());
1462                 }
1463                 new_eip |= ((target_ulong)e1) << 32;
1464             }
1465 #endif
1466 
1467             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1468                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1469             }
1470             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1471             /* must be code segment */
1472             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1473                  (DESC_S_MASK | DESC_CS_MASK))) {
1474                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1475             }
1476             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1477                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1478                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1479             }
1480 #ifdef TARGET_X86_64
1481             if (env->efer & MSR_EFER_LMA) {
1482                 if (!(e2 & DESC_L_MASK)) {
1483                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1484                 }
1485                 if (e2 & DESC_B_MASK) {
1486                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1487                 }
1488             }
1489 #endif
1490             if (!(e2 & DESC_P_MASK)) {
1491                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1492             }
1493             limit = get_seg_limit(e1, e2);
1494             if (new_eip > limit &&
1495                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1496                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1497             }
1498             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1499                                    get_seg_base(e1, e2), limit, e2);
1500             env->eip = new_eip;
1501             break;
1502         default:
1503             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1504             break;
1505         }
1506     }
1507 }
1508 
1509 /* real mode call */
1510 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1511                        int shift, uint32_t next_eip)
1512 {
1513     uint32_t esp, esp_mask;
1514     target_ulong ssp;
1515 
1516     esp = env->regs[R_ESP];
1517     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1518     ssp = env->segs[R_SS].base;
1519     if (shift) {
1520         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1521         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1522     } else {
1523         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1524         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1525     }
1526 
1527     SET_ESP(esp, esp_mask);
1528     env->eip = new_eip;
1529     env->segs[R_CS].selector = new_cs;
1530     env->segs[R_CS].base = (new_cs << 4);
1531 }
1532 
1533 /* protected mode call */
1534 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1535                             int shift, target_ulong next_eip)
1536 {
1537     int new_stack, i;
1538     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1539     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1540     uint32_t val, limit, old_sp_mask;
1541     target_ulong ssp, old_ssp, offset, sp;
1542 
1543     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1544     LOG_PCALL_STATE(env_cpu(env));
1545     if ((new_cs & 0xfffc) == 0) {
1546         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1547     }
1548     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1549         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1550     }
1551     cpl = env->hflags & HF_CPL_MASK;
1552     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1553     if (e2 & DESC_S_MASK) {
1554         if (!(e2 & DESC_CS_MASK)) {
1555             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1556         }
1557         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1558         if (e2 & DESC_C_MASK) {
1559             /* conforming code segment */
1560             if (dpl > cpl) {
1561                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1562             }
1563         } else {
1564             /* non conforming code segment */
1565             rpl = new_cs & 3;
1566             if (rpl > cpl) {
1567                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1568             }
1569             if (dpl != cpl) {
1570                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1571             }
1572         }
1573         if (!(e2 & DESC_P_MASK)) {
1574             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1575         }
1576 
1577 #ifdef TARGET_X86_64
1578         /* XXX: check 16/32 bit cases in long mode */
1579         if (shift == 2) {
1580             target_ulong rsp;
1581 
1582             /* 64 bit case */
1583             rsp = env->regs[R_ESP];
1584             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1585             PUSHQ_RA(rsp, next_eip, GETPC());
1586             /* from this point, not restartable */
1587             env->regs[R_ESP] = rsp;
1588             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1589                                    get_seg_base(e1, e2),
1590                                    get_seg_limit(e1, e2), e2);
1591             env->eip = new_eip;
1592         } else
1593 #endif
1594         {
1595             sp = env->regs[R_ESP];
1596             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1597             ssp = env->segs[R_SS].base;
1598             if (shift) {
1599                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1600                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1601             } else {
1602                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1603                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1604             }
1605 
1606             limit = get_seg_limit(e1, e2);
1607             if (new_eip > limit) {
1608                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1609             }
1610             /* from this point, not restartable */
1611             SET_ESP(sp, sp_mask);
1612             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1613                                    get_seg_base(e1, e2), limit, e2);
1614             env->eip = new_eip;
1615         }
1616     } else {
1617         /* check gate type */
1618         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1619         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1620         rpl = new_cs & 3;
1621 
1622 #ifdef TARGET_X86_64
1623         if (env->efer & MSR_EFER_LMA) {
1624             if (type != 12) {
1625                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1626             }
1627         }
1628 #endif
1629 
1630         switch (type) {
1631         case 1: /* available 286 TSS */
1632         case 9: /* available 386 TSS */
1633         case 5: /* task gate */
1634             if (dpl < cpl || dpl < rpl) {
1635                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1636             }
1637             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1638             return;
1639         case 4: /* 286 call gate */
1640         case 12: /* 386 call gate */
1641             break;
1642         default:
1643             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1644             break;
1645         }
1646         shift = type >> 3;
1647 
1648         if (dpl < cpl || dpl < rpl) {
1649             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1650         }
1651         /* check valid bit */
1652         if (!(e2 & DESC_P_MASK)) {
1653             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1654         }
1655         selector = e1 >> 16;
1656         param_count = e2 & 0x1f;
1657         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1658 #ifdef TARGET_X86_64
1659         if (env->efer & MSR_EFER_LMA) {
1660             /* load the upper 8 bytes of the 64-bit call gate */
1661             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1662                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1663                                        GETPC());
1664             }
1665             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1666             if (type != 0) {
1667                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1668                                        GETPC());
1669             }
1670             offset |= ((target_ulong)e1) << 32;
1671         }
1672 #endif
1673         if ((selector & 0xfffc) == 0) {
1674             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1675         }
1676 
1677         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1678             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1679         }
1680         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1681             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1682         }
1683         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1684         if (dpl > cpl) {
1685             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1686         }
1687 #ifdef TARGET_X86_64
1688         if (env->efer & MSR_EFER_LMA) {
1689             if (!(e2 & DESC_L_MASK)) {
1690                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1691             }
1692             if (e2 & DESC_B_MASK) {
1693                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1694             }
1695             shift++;
1696         }
1697 #endif
1698         if (!(e2 & DESC_P_MASK)) {
1699             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1700         }
1701 
1702         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1703             /* to inner privilege */
1704 #ifdef TARGET_X86_64
1705             if (shift == 2) {
1706                 sp = get_rsp_from_tss(env, dpl);
1707                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1708                 new_stack = 1;
1709                 sp_mask = 0;
1710                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1711                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1712                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1713             } else
1714 #endif
1715             {
1716                 uint32_t sp32;
1717                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1718                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1719                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1720                           env->regs[R_ESP]);
1721                 sp = sp32;
1722                 if ((ss & 0xfffc) == 0) {
1723                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1724                 }
1725                 if ((ss & 3) != dpl) {
1726                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1727                 }
1728                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1729                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1730                 }
1731                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1732                 if (ss_dpl != dpl) {
1733                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1734                 }
1735                 if (!(ss_e2 & DESC_S_MASK) ||
1736                     (ss_e2 & DESC_CS_MASK) ||
1737                     !(ss_e2 & DESC_W_MASK)) {
1738                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1739                 }
1740                 if (!(ss_e2 & DESC_P_MASK)) {
1741                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1742                 }
1743 
1744                 sp_mask = get_sp_mask(ss_e2);
1745                 ssp = get_seg_base(ss_e1, ss_e2);
1746             }
1747 
1748             /* push_size = ((param_count * 2) + 8) << shift; */
1749 
1750             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1751             old_ssp = env->segs[R_SS].base;
1752 #ifdef TARGET_X86_64
1753             if (shift == 2) {
1754                 /* XXX: verify if new stack address is canonical */
1755                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1756                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1757                 /* parameters aren't supported for 64-bit call gates */
1758             } else
1759 #endif
1760             if (shift == 1) {
1761                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1762                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1763                 for (i = param_count - 1; i >= 0; i--) {
1764                     val = cpu_ldl_kernel_ra(env, old_ssp +
1765                                             ((env->regs[R_ESP] + i * 4) &
1766                                              old_sp_mask), GETPC());
1767                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1768                 }
1769             } else {
1770                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1771                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1772                 for (i = param_count - 1; i >= 0; i--) {
1773                     val = cpu_lduw_kernel_ra(env, old_ssp +
1774                                              ((env->regs[R_ESP] + i * 2) &
1775                                               old_sp_mask), GETPC());
1776                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1777                 }
1778             }
1779             new_stack = 1;
1780         } else {
1781             /* to same privilege */
1782             sp = env->regs[R_ESP];
1783             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1784             ssp = env->segs[R_SS].base;
1785             /* push_size = (4 << shift); */
1786             new_stack = 0;
1787         }
1788 
1789 #ifdef TARGET_X86_64
1790         if (shift == 2) {
1791             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
1792             PUSHQ_RA(sp, next_eip, GETPC());
1793         } else
1794 #endif
1795         if (shift == 1) {
1796             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1797             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1798         } else {
1799             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1800             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1801         }
1802 
1803         /* from this point, not restartable */
1804 
1805         if (new_stack) {
1806 #ifdef TARGET_X86_64
1807             if (shift == 2) {
1808                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1809             } else
1810 #endif
1811             {
1812                 ss = (ss & ~3) | dpl;
1813                 cpu_x86_load_seg_cache(env, R_SS, ss,
1814                                        ssp,
1815                                        get_seg_limit(ss_e1, ss_e2),
1816                                        ss_e2);
1817             }
1818         }
1819 
1820         selector = (selector & ~3) | dpl;
1821         cpu_x86_load_seg_cache(env, R_CS, selector,
1822                        get_seg_base(e1, e2),
1823                        get_seg_limit(e1, e2),
1824                        e2);
1825         SET_ESP(sp, sp_mask);
1826         env->eip = offset;
1827     }
1828 }
1829 
1830 /* real and vm86 mode iret */
1831 void helper_iret_real(CPUX86State *env, int shift)
1832 {
1833     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1834     target_ulong ssp;
1835     int eflags_mask;
1836 
1837     sp_mask = 0xffff; /* XXXX: use SS segment size? */
1838     sp = env->regs[R_ESP];
1839     ssp = env->segs[R_SS].base;
1840     if (shift == 1) {
1841         /* 32 bits */
1842         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1843         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1844         new_cs &= 0xffff;
1845         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1846     } else {
1847         /* 16 bits */
1848         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1849         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1850         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1851     }
1852     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1853     env->segs[R_CS].selector = new_cs;
1854     env->segs[R_CS].base = (new_cs << 4);
1855     env->eip = new_eip;
1856     if (env->eflags & VM_MASK) {
1857         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1858             NT_MASK;
1859     } else {
1860         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1861             RF_MASK | NT_MASK;
1862     }
1863     if (shift == 0) {
1864         eflags_mask &= 0xffff;
1865     }
1866     cpu_load_eflags(env, new_eflags, eflags_mask);
1867     env->hflags2 &= ~HF2_NMI_MASK;
1868 }
1869 
1870 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1871 {
1872     int dpl;
1873     uint32_t e2;
1874 
1875     /* XXX: on x86_64, we do not want to nullify FS and GS because
1876        they may still contain a valid base. I would be interested to
1877        know how a real x86_64 CPU behaves */
1878     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1879         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1880         return;
1881     }
1882 
1883     e2 = env->segs[seg_reg].flags;
1884     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1885     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1886         /* data or non conforming code segment */
1887         if (dpl < cpl) {
1888             cpu_x86_load_seg_cache(env, seg_reg, 0,
1889                                    env->segs[seg_reg].base,
1890                                    env->segs[seg_reg].limit,
1891                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1892         }
1893     }
1894 }
1895 
1896 /* protected mode iret */
1897 static inline void helper_ret_protected(CPUX86State *env, int shift,
1898                                         int is_iret, int addend,
1899                                         uintptr_t retaddr)
1900 {
1901     uint32_t new_cs, new_eflags, new_ss;
1902     uint32_t new_es, new_ds, new_fs, new_gs;
1903     uint32_t e1, e2, ss_e1, ss_e2;
1904     int cpl, dpl, rpl, eflags_mask, iopl;
1905     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1906 
1907 #ifdef TARGET_X86_64
1908     if (shift == 2) {
1909         sp_mask = -1;
1910     } else
1911 #endif
1912     {
1913         sp_mask = get_sp_mask(env->segs[R_SS].flags);
1914     }
1915     sp = env->regs[R_ESP];
1916     ssp = env->segs[R_SS].base;
1917     new_eflags = 0; /* avoid warning */
1918 #ifdef TARGET_X86_64
1919     if (shift == 2) {
1920         POPQ_RA(sp, new_eip, retaddr);
1921         POPQ_RA(sp, new_cs, retaddr);
1922         new_cs &= 0xffff;
1923         if (is_iret) {
1924             POPQ_RA(sp, new_eflags, retaddr);
1925         }
1926     } else
1927 #endif
1928     {
1929         if (shift == 1) {
1930             /* 32 bits */
1931             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
1932             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
1933             new_cs &= 0xffff;
1934             if (is_iret) {
1935                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1936                 if (new_eflags & VM_MASK) {
1937                     goto return_to_vm86;
1938                 }
1939             }
1940         } else {
1941             /* 16 bits */
1942             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
1943             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
1944             if (is_iret) {
1945                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1946             }
1947         }
1948     }
1949     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
1950               new_cs, new_eip, shift, addend);
1951     LOG_PCALL_STATE(env_cpu(env));
1952     if ((new_cs & 0xfffc) == 0) {
1953         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1954     }
1955     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
1956         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1957     }
1958     if (!(e2 & DESC_S_MASK) ||
1959         !(e2 & DESC_CS_MASK)) {
1960         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1961     }
1962     cpl = env->hflags & HF_CPL_MASK;
1963     rpl = new_cs & 3;
1964     if (rpl < cpl) {
1965         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1966     }
1967     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1968     if (e2 & DESC_C_MASK) {
1969         if (dpl > rpl) {
1970             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1971         }
1972     } else {
1973         if (dpl != rpl) {
1974             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1975         }
1976     }
1977     if (!(e2 & DESC_P_MASK)) {
1978         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
1979     }
1980 
1981     sp += addend;
1982     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
1983                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1984         /* return to same privilege level */
1985         cpu_x86_load_seg_cache(env, R_CS, new_cs,
1986                        get_seg_base(e1, e2),
1987                        get_seg_limit(e1, e2),
1988                        e2);
1989     } else {
1990         /* return to different privilege level */
1991 #ifdef TARGET_X86_64
1992         if (shift == 2) {
1993             POPQ_RA(sp, new_esp, retaddr);
1994             POPQ_RA(sp, new_ss, retaddr);
1995             new_ss &= 0xffff;
1996         } else
1997 #endif
1998         {
1999             if (shift == 1) {
2000                 /* 32 bits */
2001                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2002                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2003                 new_ss &= 0xffff;
2004             } else {
2005                 /* 16 bits */
2006                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2007                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2008             }
2009         }
2010         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2011                   new_ss, new_esp);
2012         if ((new_ss & 0xfffc) == 0) {
2013 #ifdef TARGET_X86_64
2014             /* NULL ss is allowed in long mode if cpl != 3 */
2015             /* XXX: test CS64? */
2016             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2017                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2018                                        0, 0xffffffff,
2019                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2020                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2021                                        DESC_W_MASK | DESC_A_MASK);
2022                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2023             } else
2024 #endif
2025             {
2026                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2027             }
2028         } else {
2029             if ((new_ss & 3) != rpl) {
2030                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2031             }
2032             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2033                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2034             }
2035             if (!(ss_e2 & DESC_S_MASK) ||
2036                 (ss_e2 & DESC_CS_MASK) ||
2037                 !(ss_e2 & DESC_W_MASK)) {
2038                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2039             }
2040             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2041             if (dpl != rpl) {
2042                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2043             }
2044             if (!(ss_e2 & DESC_P_MASK)) {
2045                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2046             }
2047             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2048                                    get_seg_base(ss_e1, ss_e2),
2049                                    get_seg_limit(ss_e1, ss_e2),
2050                                    ss_e2);
2051         }
2052 
2053         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2054                        get_seg_base(e1, e2),
2055                        get_seg_limit(e1, e2),
2056                        e2);
2057         sp = new_esp;
2058 #ifdef TARGET_X86_64
2059         if (env->hflags & HF_CS64_MASK) {
2060             sp_mask = -1;
2061         } else
2062 #endif
2063         {
2064             sp_mask = get_sp_mask(ss_e2);
2065         }
2066 
2067         /* validate data segments */
2068         validate_seg(env, R_ES, rpl);
2069         validate_seg(env, R_DS, rpl);
2070         validate_seg(env, R_FS, rpl);
2071         validate_seg(env, R_GS, rpl);
2072 
2073         sp += addend;
2074     }
2075     SET_ESP(sp, sp_mask);
2076     env->eip = new_eip;
2077     if (is_iret) {
2078         /* NOTE: 'cpl' is the _old_ CPL */
2079         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2080         if (cpl == 0) {
2081             eflags_mask |= IOPL_MASK;
2082         }
2083         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2084         if (cpl <= iopl) {
2085             eflags_mask |= IF_MASK;
2086         }
2087         if (shift == 0) {
2088             eflags_mask &= 0xffff;
2089         }
2090         cpu_load_eflags(env, new_eflags, eflags_mask);
2091     }
2092     return;
2093 
2094  return_to_vm86:
2095     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2096     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2097     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2098     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2099     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2100     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2101 
2102     /* modify processor state */
2103     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2104                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2105                     VIP_MASK);
2106     load_seg_vm(env, R_CS, new_cs & 0xffff);
2107     load_seg_vm(env, R_SS, new_ss & 0xffff);
2108     load_seg_vm(env, R_ES, new_es & 0xffff);
2109     load_seg_vm(env, R_DS, new_ds & 0xffff);
2110     load_seg_vm(env, R_FS, new_fs & 0xffff);
2111     load_seg_vm(env, R_GS, new_gs & 0xffff);
2112 
2113     env->eip = new_eip & 0xffff;
2114     env->regs[R_ESP] = new_esp;
2115 }
2116 
2117 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2118 {
2119     int tss_selector, type;
2120     uint32_t e1, e2;
2121 
2122     /* specific case for TSS */
2123     if (env->eflags & NT_MASK) {
2124 #ifdef TARGET_X86_64
2125         if (env->hflags & HF_LMA_MASK) {
2126             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2127         }
2128 #endif
2129         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2130         if (tss_selector & 4) {
2131             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2132         }
2133         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2134             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2135         }
2136         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2137         /* NOTE: we check both segment and busy TSS */
2138         if (type != 3) {
2139             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2140         }
2141         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2142     } else {
2143         helper_ret_protected(env, shift, 1, 0, GETPC());
2144     }
2145     env->hflags2 &= ~HF2_NMI_MASK;
2146 }
2147 
2148 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2149 {
2150     helper_ret_protected(env, shift, 0, addend, GETPC());
2151 }
2152 
2153 void helper_sysenter(CPUX86State *env)
2154 {
2155     if (env->sysenter_cs == 0) {
2156         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2157     }
2158     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2159 
2160 #ifdef TARGET_X86_64
2161     if (env->hflags & HF_LMA_MASK) {
2162         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2163                                0, 0xffffffff,
2164                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2165                                DESC_S_MASK |
2166                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2167                                DESC_L_MASK);
2168     } else
2169 #endif
2170     {
2171         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2172                                0, 0xffffffff,
2173                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2174                                DESC_S_MASK |
2175                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2176     }
2177     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2178                            0, 0xffffffff,
2179                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2180                            DESC_S_MASK |
2181                            DESC_W_MASK | DESC_A_MASK);
2182     env->regs[R_ESP] = env->sysenter_esp;
2183     env->eip = env->sysenter_eip;
2184 }
2185 
2186 void helper_sysexit(CPUX86State *env, int dflag)
2187 {
2188     int cpl;
2189 
2190     cpl = env->hflags & HF_CPL_MASK;
2191     if (env->sysenter_cs == 0 || cpl != 0) {
2192         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2193     }
2194 #ifdef TARGET_X86_64
2195     if (dflag == 2) {
2196         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2197                                3, 0, 0xffffffff,
2198                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2199                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2200                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2201                                DESC_L_MASK);
2202         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2203                                3, 0, 0xffffffff,
2204                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2205                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2206                                DESC_W_MASK | DESC_A_MASK);
2207     } else
2208 #endif
2209     {
2210         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2211                                3, 0, 0xffffffff,
2212                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2213                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2214                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2215         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2216                                3, 0, 0xffffffff,
2217                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2218                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2219                                DESC_W_MASK | DESC_A_MASK);
2220     }
2221     env->regs[R_ESP] = env->regs[R_ECX];
2222     env->eip = env->regs[R_EDX];
2223 }
2224 
2225 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2226 {
2227     unsigned int limit;
2228     uint32_t e1, e2, eflags, selector;
2229     int rpl, dpl, cpl, type;
2230 
2231     selector = selector1 & 0xffff;
2232     eflags = cpu_cc_compute_all(env, CC_OP);
2233     if ((selector & 0xfffc) == 0) {
2234         goto fail;
2235     }
2236     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2237         goto fail;
2238     }
2239     rpl = selector & 3;
2240     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2241     cpl = env->hflags & HF_CPL_MASK;
2242     if (e2 & DESC_S_MASK) {
2243         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2244             /* conforming */
2245         } else {
2246             if (dpl < cpl || dpl < rpl) {
2247                 goto fail;
2248             }
2249         }
2250     } else {
2251         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2252         switch (type) {
2253         case 1:
2254         case 2:
2255         case 3:
2256         case 9:
2257         case 11:
2258             break;
2259         default:
2260             goto fail;
2261         }
2262         if (dpl < cpl || dpl < rpl) {
2263         fail:
2264             CC_SRC = eflags & ~CC_Z;
2265             return 0;
2266         }
2267     }
2268     limit = get_seg_limit(e1, e2);
2269     CC_SRC = eflags | CC_Z;
2270     return limit;
2271 }
2272 
2273 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2274 {
2275     uint32_t e1, e2, eflags, selector;
2276     int rpl, dpl, cpl, type;
2277 
2278     selector = selector1 & 0xffff;
2279     eflags = cpu_cc_compute_all(env, CC_OP);
2280     if ((selector & 0xfffc) == 0) {
2281         goto fail;
2282     }
2283     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2284         goto fail;
2285     }
2286     rpl = selector & 3;
2287     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2288     cpl = env->hflags & HF_CPL_MASK;
2289     if (e2 & DESC_S_MASK) {
2290         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2291             /* conforming */
2292         } else {
2293             if (dpl < cpl || dpl < rpl) {
2294                 goto fail;
2295             }
2296         }
2297     } else {
2298         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2299         switch (type) {
2300         case 1:
2301         case 2:
2302         case 3:
2303         case 4:
2304         case 5:
2305         case 9:
2306         case 11:
2307         case 12:
2308             break;
2309         default:
2310             goto fail;
2311         }
2312         if (dpl < cpl || dpl < rpl) {
2313         fail:
2314             CC_SRC = eflags & ~CC_Z;
2315             return 0;
2316         }
2317     }
2318     CC_SRC = eflags | CC_Z;
2319     return e2 & 0x00f0ff00;
2320 }
2321 
2322 void helper_verr(CPUX86State *env, target_ulong selector1)
2323 {
2324     uint32_t e1, e2, eflags, selector;
2325     int rpl, dpl, cpl;
2326 
2327     selector = selector1 & 0xffff;
2328     eflags = cpu_cc_compute_all(env, CC_OP);
2329     if ((selector & 0xfffc) == 0) {
2330         goto fail;
2331     }
2332     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2333         goto fail;
2334     }
2335     if (!(e2 & DESC_S_MASK)) {
2336         goto fail;
2337     }
2338     rpl = selector & 3;
2339     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2340     cpl = env->hflags & HF_CPL_MASK;
2341     if (e2 & DESC_CS_MASK) {
2342         if (!(e2 & DESC_R_MASK)) {
2343             goto fail;
2344         }
2345         if (!(e2 & DESC_C_MASK)) {
2346             if (dpl < cpl || dpl < rpl) {
2347                 goto fail;
2348             }
2349         }
2350     } else {
2351         if (dpl < cpl || dpl < rpl) {
2352         fail:
2353             CC_SRC = eflags & ~CC_Z;
2354             return;
2355         }
2356     }
2357     CC_SRC = eflags | CC_Z;
2358 }
2359 
2360 void helper_verw(CPUX86State *env, target_ulong selector1)
2361 {
2362     uint32_t e1, e2, eflags, selector;
2363     int rpl, dpl, cpl;
2364 
2365     selector = selector1 & 0xffff;
2366     eflags = cpu_cc_compute_all(env, CC_OP);
2367     if ((selector & 0xfffc) == 0) {
2368         goto fail;
2369     }
2370     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2371         goto fail;
2372     }
2373     if (!(e2 & DESC_S_MASK)) {
2374         goto fail;
2375     }
2376     rpl = selector & 3;
2377     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2378     cpl = env->hflags & HF_CPL_MASK;
2379     if (e2 & DESC_CS_MASK) {
2380         goto fail;
2381     } else {
2382         if (dpl < cpl || dpl < rpl) {
2383             goto fail;
2384         }
2385         if (!(e2 & DESC_W_MASK)) {
2386         fail:
2387             CC_SRC = eflags & ~CC_Z;
2388             return;
2389         }
2390     }
2391     CC_SRC = eflags | CC_Z;
2392 }
2393