xref: /openbmc/qemu/target/i386/tcg/seg_helper.c (revision fe1a3ace13a8b53fc20c74fb7e3337f754396e6b)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "accel/tcg/cpu-ldst.h"
27 #include "accel/tcg/probe.h"
28 #include "exec/log.h"
29 #include "helper-tcg.h"
30 #include "seg_helper.h"
31 #include "access.h"
32 #include "tcg-cpu.h"
33 
34 #ifdef TARGET_X86_64
35 #define SET_ESP(val, sp_mask)                                   \
36     do {                                                        \
37         if ((sp_mask) == 0xffff) {                              \
38             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
39                 ((val) & 0xffff);                               \
40         } else if ((sp_mask) == 0xffffffffLL) {                 \
41             env->regs[R_ESP] = (uint32_t)(val);                 \
42         } else {                                                \
43             env->regs[R_ESP] = (val);                           \
44         }                                                       \
45     } while (0)
46 #else
47 #define SET_ESP(val, sp_mask)                                   \
48     do {                                                        \
49         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
50             ((val) & (sp_mask));                                \
51     } while (0)
52 #endif
53 
54 /* XXX: use mmu_index to have proper DPL support */
55 typedef struct StackAccess
56 {
57     CPUX86State *env;
58     uintptr_t ra;
59     target_ulong ss_base;
60     target_ulong sp;
61     target_ulong sp_mask;
62     int mmu_index;
63 } StackAccess;
64 
65 static void pushw(StackAccess *sa, uint16_t val)
66 {
67     sa->sp -= 2;
68     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
69                       val, sa->mmu_index, sa->ra);
70 }
71 
72 static void pushl(StackAccess *sa, uint32_t val)
73 {
74     sa->sp -= 4;
75     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
76                       val, sa->mmu_index, sa->ra);
77 }
78 
79 static uint16_t popw(StackAccess *sa)
80 {
81     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
82                                       sa->ss_base + (sa->sp & sa->sp_mask),
83                                       sa->mmu_index, sa->ra);
84     sa->sp += 2;
85     return ret;
86 }
87 
88 static uint32_t popl(StackAccess *sa)
89 {
90     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
91                                      sa->ss_base + (sa->sp & sa->sp_mask),
92                                      sa->mmu_index, sa->ra);
93     sa->sp += 4;
94     return ret;
95 }
96 
97 int get_pg_mode(CPUX86State *env)
98 {
99     int pg_mode = PG_MODE_PG;
100     if (!(env->cr[0] & CR0_PG_MASK)) {
101         return 0;
102     }
103     if (env->cr[0] & CR0_WP_MASK) {
104         pg_mode |= PG_MODE_WP;
105     }
106     if (env->cr[4] & CR4_PAE_MASK) {
107         pg_mode |= PG_MODE_PAE;
108         if (env->efer & MSR_EFER_NXE) {
109             pg_mode |= PG_MODE_NXE;
110         }
111     }
112     if (env->cr[4] & CR4_PSE_MASK) {
113         pg_mode |= PG_MODE_PSE;
114     }
115     if (env->cr[4] & CR4_SMEP_MASK) {
116         pg_mode |= PG_MODE_SMEP;
117     }
118     if (env->hflags & HF_LMA_MASK) {
119         pg_mode |= PG_MODE_LMA;
120         if (env->cr[4] & CR4_PKE_MASK) {
121             pg_mode |= PG_MODE_PKE;
122         }
123         if (env->cr[4] & CR4_PKS_MASK) {
124             pg_mode |= PG_MODE_PKS;
125         }
126         if (env->cr[4] & CR4_LA57_MASK) {
127             pg_mode |= PG_MODE_LA57;
128         }
129     }
130     return pg_mode;
131 }
132 
133 static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
134 {
135     int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
136     int mmu_index_base =
137         !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
138         (pl < 3 && (env->eflags & AC_MASK)
139          ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
140 
141     return mmu_index_base + mmu_index_32;
142 }
143 
144 int cpu_mmu_index_kernel(CPUX86State *env)
145 {
146     return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
147 }
148 
149 /* return non zero if error */
150 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
151                                uint32_t *e2_ptr, int selector,
152                                uintptr_t retaddr)
153 {
154     SegmentCache *dt;
155     int index;
156     target_ulong ptr;
157 
158     if (selector & 0x4) {
159         dt = &env->ldt;
160     } else {
161         dt = &env->gdt;
162     }
163     index = selector & ~7;
164     if ((index + 7) > dt->limit) {
165         return -1;
166     }
167     ptr = dt->base + index;
168     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
169     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
170     return 0;
171 }
172 
173 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
174                                uint32_t *e2_ptr, int selector)
175 {
176     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
177 }
178 
179 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
180 {
181     unsigned int limit;
182 
183     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
184     if (e2 & DESC_G_MASK) {
185         limit = (limit << 12) | 0xfff;
186     }
187     return limit;
188 }
189 
190 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
191 {
192     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
193 }
194 
195 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
196                                          uint32_t e2)
197 {
198     sc->base = get_seg_base(e1, e2);
199     sc->limit = get_seg_limit(e1, e2);
200     sc->flags = e2;
201 }
202 
203 /* init the segment cache in vm86 mode. */
204 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
205 {
206     selector &= 0xffff;
207 
208     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
209                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
210                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
211 }
212 
213 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
214                                        uint32_t *esp_ptr, int dpl,
215                                        uintptr_t retaddr)
216 {
217     X86CPU *cpu = env_archcpu(env);
218     int type, index, shift;
219 
220 #if 0
221     {
222         int i;
223         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
224         for (i = 0; i < env->tr.limit; i++) {
225             printf("%02x ", env->tr.base[i]);
226             if ((i & 7) == 7) {
227                 printf("\n");
228             }
229         }
230         printf("\n");
231     }
232 #endif
233 
234     if (!(env->tr.flags & DESC_P_MASK)) {
235         cpu_abort(CPU(cpu), "invalid tss");
236     }
237     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
238     if ((type & 7) != 1) {
239         cpu_abort(CPU(cpu), "invalid tss type");
240     }
241     shift = type >> 3;
242     index = (dpl * 4 + 2) << shift;
243     if (index + (4 << shift) - 1 > env->tr.limit) {
244         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
245     }
246     if (shift == 0) {
247         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
248         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
249     } else {
250         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
251         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
252     }
253 }
254 
255 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
256                          int cpl, uintptr_t retaddr)
257 {
258     uint32_t e1, e2;
259     int rpl, dpl;
260 
261     if ((selector & 0xfffc) != 0) {
262         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
263             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
264         }
265         if (!(e2 & DESC_S_MASK)) {
266             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
267         }
268         rpl = selector & 3;
269         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
270         if (seg_reg == R_CS) {
271             if (!(e2 & DESC_CS_MASK)) {
272                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
273             }
274             if (dpl != rpl) {
275                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
276             }
277         } else if (seg_reg == R_SS) {
278             /* SS must be writable data */
279             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
280                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
281             }
282             if (dpl != cpl || dpl != rpl) {
283                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
284             }
285         } else {
286             /* not readable code */
287             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
288                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
289             }
290             /* if data or non conforming code, checks the rights */
291             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
292                 if (dpl < cpl || dpl < rpl) {
293                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
294                 }
295             }
296         }
297         if (!(e2 & DESC_P_MASK)) {
298             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
299         }
300         cpu_x86_load_seg_cache(env, seg_reg, selector,
301                                get_seg_base(e1, e2),
302                                get_seg_limit(e1, e2),
303                                e2);
304     } else {
305         if (seg_reg == R_SS || seg_reg == R_CS) {
306             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
307         }
308     }
309 }
310 
311 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
312                          uintptr_t retaddr)
313 {
314     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
315     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
316 
317     if (value) {
318         e2 |= DESC_TSS_BUSY_MASK;
319     } else {
320         e2 &= ~DESC_TSS_BUSY_MASK;
321     }
322 
323     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
324 }
325 
326 #define SWITCH_TSS_JMP  0
327 #define SWITCH_TSS_IRET 1
328 #define SWITCH_TSS_CALL 2
329 
330 /* return 0 if switching to a 16-bit selector */
331 static int switch_tss_ra(CPUX86State *env, int tss_selector,
332                          uint32_t e1, uint32_t e2, int source,
333                          uint32_t next_eip, uintptr_t retaddr)
334 {
335     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
336     target_ulong tss_base;
337     uint32_t new_regs[8], new_segs[6];
338     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
339     uint32_t old_eflags, eflags_mask;
340     SegmentCache *dt;
341     int mmu_index, index;
342     target_ulong ptr;
343     X86Access old, new;
344 
345     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
346     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
347               source);
348 
349     /* if task gate, we read the TSS segment and we load it */
350     if (type == 5) {
351         if (!(e2 & DESC_P_MASK)) {
352             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
353         }
354         tss_selector = e1 >> 16;
355         if (tss_selector & 4) {
356             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
357         }
358         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
359             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
360         }
361         if (e2 & DESC_S_MASK) {
362             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
363         }
364         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
365         if ((type & 7) != 1) {
366             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
367         }
368     }
369 
370     if (!(e2 & DESC_P_MASK)) {
371         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
372     }
373 
374     if (type & 8) {
375         tss_limit_max = 103;
376     } else {
377         tss_limit_max = 43;
378     }
379     tss_limit = get_seg_limit(e1, e2);
380     tss_base = get_seg_base(e1, e2);
381     if ((tss_selector & 4) != 0 ||
382         tss_limit < tss_limit_max) {
383         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
384     }
385     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
386     if (old_type & 8) {
387         old_tss_limit_max = 103;
388     } else {
389         old_tss_limit_max = 43;
390     }
391 
392     /* new TSS must be busy iff the source is an IRET instruction  */
393     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
394         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
395     }
396 
397     /* X86Access avoids memory exceptions during the task switch */
398     mmu_index = cpu_mmu_index_kernel(env);
399     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
400                        MMU_DATA_STORE, mmu_index, retaddr);
401 
402     if (source == SWITCH_TSS_CALL) {
403         /* Probe for future write of parent task */
404         probe_access(env, tss_base, 2, MMU_DATA_STORE,
405                      mmu_index, retaddr);
406     }
407     /* While true tss_limit may be larger, we don't access the iopb here. */
408     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
409                        MMU_DATA_LOAD, mmu_index, retaddr);
410 
411     /* save the current state in the old TSS */
412     old_eflags = cpu_compute_eflags(env);
413     if (old_type & 8) {
414         /* 32 bit */
415         access_stl(&old, env->tr.base + 0x20, next_eip);
416         access_stl(&old, env->tr.base + 0x24, old_eflags);
417         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
418         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
419         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
420         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
421         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
422         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
423         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
424         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
425         for (i = 0; i < 6; i++) {
426             access_stw(&old, env->tr.base + (0x48 + i * 4),
427                        env->segs[i].selector);
428         }
429     } else {
430         /* 16 bit */
431         access_stw(&old, env->tr.base + 0x0e, next_eip);
432         access_stw(&old, env->tr.base + 0x10, old_eflags);
433         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
434         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
435         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
436         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
437         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
438         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
439         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
440         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
441         for (i = 0; i < 4; i++) {
442             access_stw(&old, env->tr.base + (0x22 + i * 2),
443                        env->segs[i].selector);
444         }
445     }
446 
447     /* read all the registers from the new TSS */
448     if (type & 8) {
449         /* 32 bit */
450         new_cr3 = access_ldl(&new, tss_base + 0x1c);
451         new_eip = access_ldl(&new, tss_base + 0x20);
452         new_eflags = access_ldl(&new, tss_base + 0x24);
453         for (i = 0; i < 8; i++) {
454             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
455         }
456         for (i = 0; i < 6; i++) {
457             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
458         }
459         new_ldt = access_ldw(&new, tss_base + 0x60);
460         new_trap = access_ldl(&new, tss_base + 0x64);
461     } else {
462         /* 16 bit */
463         new_cr3 = 0;
464         new_eip = access_ldw(&new, tss_base + 0x0e);
465         new_eflags = access_ldw(&new, tss_base + 0x10);
466         for (i = 0; i < 8; i++) {
467             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
468         }
469         for (i = 0; i < 4; i++) {
470             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
471         }
472         new_ldt = access_ldw(&new, tss_base + 0x2a);
473         new_segs[R_FS] = 0;
474         new_segs[R_GS] = 0;
475         new_trap = 0;
476     }
477     /* XXX: avoid a compiler warning, see
478      http://support.amd.com/us/Processor_TechDocs/24593.pdf
479      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
480     (void)new_trap;
481 
482     /* clear busy bit (it is restartable) */
483     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
484         tss_set_busy(env, env->tr.selector, 0, retaddr);
485     }
486 
487     if (source == SWITCH_TSS_IRET) {
488         old_eflags &= ~NT_MASK;
489         if (old_type & 8) {
490             access_stl(&old, env->tr.base + 0x24, old_eflags);
491         } else {
492             access_stw(&old, env->tr.base + 0x10, old_eflags);
493 	}
494     }
495 
496     if (source == SWITCH_TSS_CALL) {
497         /*
498          * Thanks to the probe_access above, we know the first two
499          * bytes addressed by &new are writable too.
500          */
501         access_stw(&new, tss_base, env->tr.selector);
502         new_eflags |= NT_MASK;
503     }
504 
505     /* set busy bit */
506     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
507         tss_set_busy(env, tss_selector, 1, retaddr);
508     }
509 
510     /* set the new CPU state */
511 
512     /* now if an exception occurs, it will occur in the next task context */
513 
514     env->cr[0] |= CR0_TS_MASK;
515     env->hflags |= HF_TS_MASK;
516     env->tr.selector = tss_selector;
517     env->tr.base = tss_base;
518     env->tr.limit = tss_limit;
519     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
520 
521     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
522         cpu_x86_update_cr3(env, new_cr3);
523     }
524 
525     /* load all registers without an exception, then reload them with
526        possible exception */
527     env->eip = new_eip;
528     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
529         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
530     if (type & 8) {
531         cpu_load_eflags(env, new_eflags, eflags_mask);
532         for (i = 0; i < 8; i++) {
533             env->regs[i] = new_regs[i];
534         }
535     } else {
536         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
537         for (i = 0; i < 8; i++) {
538             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
539         }
540     }
541     if (new_eflags & VM_MASK) {
542         for (i = 0; i < 6; i++) {
543             load_seg_vm(env, i, new_segs[i]);
544         }
545     } else {
546         /* first just selectors as the rest may trigger exceptions */
547         for (i = 0; i < 6; i++) {
548             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
549         }
550     }
551 
552     env->ldt.selector = new_ldt & ~4;
553     env->ldt.base = 0;
554     env->ldt.limit = 0;
555     env->ldt.flags = 0;
556 
557     /* load the LDT */
558     if (new_ldt & 4) {
559         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
560     }
561 
562     if ((new_ldt & 0xfffc) != 0) {
563         dt = &env->gdt;
564         index = new_ldt & ~7;
565         if ((index + 7) > dt->limit) {
566             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
567         }
568         ptr = dt->base + index;
569         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
570         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
571         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
572             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
573         }
574         if (!(e2 & DESC_P_MASK)) {
575             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
576         }
577         load_seg_cache_raw_dt(&env->ldt, e1, e2);
578     }
579 
580     /* load the segments */
581     if (!(new_eflags & VM_MASK)) {
582         int cpl = new_segs[R_CS] & 3;
583         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
584         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
585         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
586         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
587         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
588         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
589     }
590 
591     /* check that env->eip is in the CS segment limits */
592     if (new_eip > env->segs[R_CS].limit) {
593         /* XXX: different exception if CALL? */
594         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
595     }
596 
597 #ifndef CONFIG_USER_ONLY
598     /* reset local breakpoints */
599     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
600         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
601     }
602 #endif
603     return type >> 3;
604 }
605 
606 static int switch_tss(CPUX86State *env, int tss_selector,
607                       uint32_t e1, uint32_t e2, int source,
608                       uint32_t next_eip)
609 {
610     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
611 }
612 
613 static inline unsigned int get_sp_mask(unsigned int e2)
614 {
615 #ifdef TARGET_X86_64
616     if (e2 & DESC_L_MASK) {
617         return 0;
618     } else
619 #endif
620     if (e2 & DESC_B_MASK) {
621         return 0xffffffff;
622     } else {
623         return 0xffff;
624     }
625 }
626 
627 static int exception_is_fault(int intno)
628 {
629     switch (intno) {
630         /*
631          * #DB can be both fault- and trap-like, but it never sets RF=1
632          * in the RFLAGS value pushed on the stack.
633          */
634     case EXCP01_DB:
635     case EXCP03_INT3:
636     case EXCP04_INTO:
637     case EXCP08_DBLE:
638     case EXCP12_MCHK:
639         return 0;
640     }
641     /* Everything else including reserved exception is a fault.  */
642     return 1;
643 }
644 
645 int exception_has_error_code(int intno)
646 {
647     switch (intno) {
648     case 8:
649     case 10:
650     case 11:
651     case 12:
652     case 13:
653     case 14:
654     case 17:
655         return 1;
656     }
657     return 0;
658 }
659 
660 /* protected mode interrupt */
661 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
662                                    int error_code, unsigned int next_eip,
663                                    int is_hw)
664 {
665     SegmentCache *dt;
666     target_ulong ptr;
667     int type, dpl, selector, ss_dpl, cpl;
668     int has_error_code, new_stack, shift;
669     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
670     uint32_t old_eip, eflags;
671     int vm86 = env->eflags & VM_MASK;
672     StackAccess sa;
673     bool set_rf;
674 
675     has_error_code = 0;
676     if (!is_int && !is_hw) {
677         has_error_code = exception_has_error_code(intno);
678     }
679     if (is_int) {
680         old_eip = next_eip;
681         set_rf = false;
682     } else {
683         old_eip = env->eip;
684         set_rf = exception_is_fault(intno);
685     }
686 
687     dt = &env->idt;
688     if (intno * 8 + 7 > dt->limit) {
689         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
690     }
691     ptr = dt->base + intno * 8;
692     e1 = cpu_ldl_kernel(env, ptr);
693     e2 = cpu_ldl_kernel(env, ptr + 4);
694     /* check gate type */
695     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
696     switch (type) {
697     case 5: /* task gate */
698     case 6: /* 286 interrupt gate */
699     case 7: /* 286 trap gate */
700     case 14: /* 386 interrupt gate */
701     case 15: /* 386 trap gate */
702         break;
703     default:
704         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
705         break;
706     }
707     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708     cpl = env->hflags & HF_CPL_MASK;
709     /* check privilege if software int */
710     if (is_int && dpl < cpl) {
711         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
712     }
713 
714     sa.env = env;
715     sa.ra = 0;
716 
717     if (type == 5) {
718         /* task gate */
719         /* must do that check here to return the correct error code */
720         if (!(e2 & DESC_P_MASK)) {
721             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
722         }
723         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
724         if (has_error_code) {
725             /* push the error code on the destination stack */
726             cpl = env->hflags & HF_CPL_MASK;
727             sa.mmu_index = x86_mmu_index_pl(env, cpl);
728             if (env->segs[R_SS].flags & DESC_B_MASK) {
729                 sa.sp_mask = 0xffffffff;
730             } else {
731                 sa.sp_mask = 0xffff;
732             }
733             sa.sp = env->regs[R_ESP];
734             sa.ss_base = env->segs[R_SS].base;
735             if (shift) {
736                 pushl(&sa, error_code);
737             } else {
738                 pushw(&sa, error_code);
739             }
740             SET_ESP(sa.sp, sa.sp_mask);
741         }
742         return;
743     }
744 
745     /* Otherwise, trap or interrupt gate */
746 
747     /* check valid bit */
748     if (!(e2 & DESC_P_MASK)) {
749         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
750     }
751     selector = e1 >> 16;
752     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
753     if ((selector & 0xfffc) == 0) {
754         raise_exception_err(env, EXCP0D_GPF, 0);
755     }
756     if (load_segment(env, &e1, &e2, selector) != 0) {
757         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
758     }
759     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
760         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
761     }
762     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
763     if (dpl > cpl) {
764         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
765     }
766     if (!(e2 & DESC_P_MASK)) {
767         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
768     }
769     if (e2 & DESC_C_MASK) {
770         dpl = cpl;
771     }
772     sa.mmu_index = x86_mmu_index_pl(env, dpl);
773     if (dpl < cpl) {
774         /* to inner privilege */
775         uint32_t esp;
776         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
777         if ((ss & 0xfffc) == 0) {
778             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
779         }
780         if ((ss & 3) != dpl) {
781             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
782         }
783         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
784             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
785         }
786         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
787         if (ss_dpl != dpl) {
788             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
789         }
790         if (!(ss_e2 & DESC_S_MASK) ||
791             (ss_e2 & DESC_CS_MASK) ||
792             !(ss_e2 & DESC_W_MASK)) {
793             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
794         }
795         if (!(ss_e2 & DESC_P_MASK)) {
796             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
797         }
798         new_stack = 1;
799         sa.sp = esp;
800         sa.sp_mask = get_sp_mask(ss_e2);
801         sa.ss_base = get_seg_base(ss_e1, ss_e2);
802     } else  {
803         /* to same privilege */
804         if (vm86) {
805             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
806         }
807         new_stack = 0;
808         sa.sp = env->regs[R_ESP];
809         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
810         sa.ss_base = env->segs[R_SS].base;
811     }
812 
813     shift = type >> 3;
814 
815 #if 0
816     /* XXX: check that enough room is available */
817     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
818     if (vm86) {
819         push_size += 8;
820     }
821     push_size <<= shift;
822 #endif
823     eflags = cpu_compute_eflags(env);
824     /*
825      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
826      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
827      */
828     if (set_rf) {
829         eflags |= RF_MASK;
830     }
831 
832     if (shift == 1) {
833         if (new_stack) {
834             if (vm86) {
835                 pushl(&sa, env->segs[R_GS].selector);
836                 pushl(&sa, env->segs[R_FS].selector);
837                 pushl(&sa, env->segs[R_DS].selector);
838                 pushl(&sa, env->segs[R_ES].selector);
839             }
840             pushl(&sa, env->segs[R_SS].selector);
841             pushl(&sa, env->regs[R_ESP]);
842         }
843         pushl(&sa, eflags);
844         pushl(&sa, env->segs[R_CS].selector);
845         pushl(&sa, old_eip);
846         if (has_error_code) {
847             pushl(&sa, error_code);
848         }
849     } else {
850         if (new_stack) {
851             if (vm86) {
852                 pushw(&sa, env->segs[R_GS].selector);
853                 pushw(&sa, env->segs[R_FS].selector);
854                 pushw(&sa, env->segs[R_DS].selector);
855                 pushw(&sa, env->segs[R_ES].selector);
856             }
857             pushw(&sa, env->segs[R_SS].selector);
858             pushw(&sa, env->regs[R_ESP]);
859         }
860         pushw(&sa, eflags);
861         pushw(&sa, env->segs[R_CS].selector);
862         pushw(&sa, old_eip);
863         if (has_error_code) {
864             pushw(&sa, error_code);
865         }
866     }
867 
868     /* interrupt gate clear IF mask */
869     if ((type & 1) == 0) {
870         env->eflags &= ~IF_MASK;
871     }
872     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
873 
874     if (new_stack) {
875         if (vm86) {
876             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
877             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
878             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
879             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
880         }
881         ss = (ss & ~3) | dpl;
882         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
883                                get_seg_limit(ss_e1, ss_e2), ss_e2);
884     }
885     SET_ESP(sa.sp, sa.sp_mask);
886 
887     selector = (selector & ~3) | dpl;
888     cpu_x86_load_seg_cache(env, R_CS, selector,
889                    get_seg_base(e1, e2),
890                    get_seg_limit(e1, e2),
891                    e2);
892     env->eip = offset;
893 }
894 
895 #ifdef TARGET_X86_64
896 
897 static void pushq(StackAccess *sa, uint64_t val)
898 {
899     sa->sp -= 8;
900     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
901 }
902 
903 static uint64_t popq(StackAccess *sa)
904 {
905     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
906     sa->sp += 8;
907     return ret;
908 }
909 
910 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
911 {
912     X86CPU *cpu = env_archcpu(env);
913     int index, pg_mode;
914     target_ulong rsp;
915     int32_t sext;
916 
917 #if 0
918     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
919            env->tr.base, env->tr.limit);
920 #endif
921 
922     if (!(env->tr.flags & DESC_P_MASK)) {
923         cpu_abort(CPU(cpu), "invalid tss");
924     }
925     index = 8 * level + 4;
926     if ((index + 7) > env->tr.limit) {
927         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
928     }
929 
930     rsp = cpu_ldq_kernel(env, env->tr.base + index);
931 
932     /* test virtual address sign extension */
933     pg_mode = get_pg_mode(env);
934     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
935     if (sext != 0 && sext != -1) {
936         raise_exception_err(env, EXCP0C_STACK, 0);
937     }
938 
939     return rsp;
940 }
941 
942 /* 64 bit interrupt */
943 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
944                            int error_code, target_ulong next_eip, int is_hw)
945 {
946     SegmentCache *dt;
947     target_ulong ptr;
948     int type, dpl, selector, cpl, ist;
949     int has_error_code, new_stack;
950     uint32_t e1, e2, e3, eflags;
951     target_ulong old_eip, offset;
952     bool set_rf;
953     StackAccess sa;
954 
955     has_error_code = 0;
956     if (!is_int && !is_hw) {
957         has_error_code = exception_has_error_code(intno);
958     }
959     if (is_int) {
960         old_eip = next_eip;
961         set_rf = false;
962     } else {
963         old_eip = env->eip;
964         set_rf = exception_is_fault(intno);
965     }
966 
967     dt = &env->idt;
968     if (intno * 16 + 15 > dt->limit) {
969         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
970     }
971     ptr = dt->base + intno * 16;
972     e1 = cpu_ldl_kernel(env, ptr);
973     e2 = cpu_ldl_kernel(env, ptr + 4);
974     e3 = cpu_ldl_kernel(env, ptr + 8);
975     /* check gate type */
976     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
977     switch (type) {
978     case 14: /* 386 interrupt gate */
979     case 15: /* 386 trap gate */
980         break;
981     default:
982         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
983         break;
984     }
985     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
986     cpl = env->hflags & HF_CPL_MASK;
987     /* check privilege if software int */
988     if (is_int && dpl < cpl) {
989         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
990     }
991     /* check valid bit */
992     if (!(e2 & DESC_P_MASK)) {
993         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
994     }
995     selector = e1 >> 16;
996     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
997     ist = e2 & 7;
998     if ((selector & 0xfffc) == 0) {
999         raise_exception_err(env, EXCP0D_GPF, 0);
1000     }
1001 
1002     if (load_segment(env, &e1, &e2, selector) != 0) {
1003         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1004     }
1005     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1006         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1007     }
1008     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1009     if (dpl > cpl) {
1010         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1011     }
1012     if (!(e2 & DESC_P_MASK)) {
1013         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1014     }
1015     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1016         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1017     }
1018     if (e2 & DESC_C_MASK) {
1019         dpl = cpl;
1020     }
1021 
1022     sa.env = env;
1023     sa.ra = 0;
1024     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1025     sa.sp_mask = -1;
1026     sa.ss_base = 0;
1027     if (dpl < cpl || ist != 0) {
1028         /* to inner privilege */
1029         new_stack = 1;
1030         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1031     } else {
1032         /* to same privilege */
1033         if (env->eflags & VM_MASK) {
1034             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1035         }
1036         new_stack = 0;
1037         sa.sp = env->regs[R_ESP];
1038     }
1039     sa.sp &= ~0xfLL; /* align stack */
1040 
1041     /* See do_interrupt_protected.  */
1042     eflags = cpu_compute_eflags(env);
1043     if (set_rf) {
1044         eflags |= RF_MASK;
1045     }
1046 
1047     pushq(&sa, env->segs[R_SS].selector);
1048     pushq(&sa, env->regs[R_ESP]);
1049     pushq(&sa, eflags);
1050     pushq(&sa, env->segs[R_CS].selector);
1051     pushq(&sa, old_eip);
1052     if (has_error_code) {
1053         pushq(&sa, error_code);
1054     }
1055 
1056     /* interrupt gate clear IF mask */
1057     if ((type & 1) == 0) {
1058         env->eflags &= ~IF_MASK;
1059     }
1060     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1061 
1062     if (new_stack) {
1063         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1064         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1065     }
1066     env->regs[R_ESP] = sa.sp;
1067 
1068     selector = (selector & ~3) | dpl;
1069     cpu_x86_load_seg_cache(env, R_CS, selector,
1070                    get_seg_base(e1, e2),
1071                    get_seg_limit(e1, e2),
1072                    e2);
1073     env->eip = offset;
1074 }
1075 #endif /* TARGET_X86_64 */
1076 
1077 void helper_sysret(CPUX86State *env, int dflag)
1078 {
1079     int cpl, selector;
1080 
1081     if (!(env->efer & MSR_EFER_SCE)) {
1082         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1083     }
1084     cpl = env->hflags & HF_CPL_MASK;
1085     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1086         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1087     }
1088     selector = (env->star >> 48) & 0xffff;
1089 #ifdef TARGET_X86_64
1090     if (env->hflags & HF_LMA_MASK) {
1091         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1092                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1093                         NT_MASK);
1094         if (dflag == 2) {
1095             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1096                                    0, 0xffffffff,
1097                                    DESC_G_MASK | DESC_P_MASK |
1098                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1100                                    DESC_L_MASK);
1101             env->eip = env->regs[R_ECX];
1102         } else {
1103             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104                                    0, 0xffffffff,
1105                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108             env->eip = (uint32_t)env->regs[R_ECX];
1109         }
1110         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1111                                0, 0xffffffff,
1112                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114                                DESC_W_MASK | DESC_A_MASK);
1115     } else
1116 #endif
1117     {
1118         env->eflags |= IF_MASK;
1119         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1120                                0, 0xffffffff,
1121                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1122                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1123                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1124         env->eip = (uint32_t)env->regs[R_ECX];
1125         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1126                                0, 0xffffffff,
1127                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1128                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1129                                DESC_W_MASK | DESC_A_MASK);
1130     }
1131 }
1132 
1133 /* real mode interrupt */
1134 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1135                               int error_code, unsigned int next_eip)
1136 {
1137     SegmentCache *dt;
1138     target_ulong ptr;
1139     int selector;
1140     uint32_t offset;
1141     uint32_t old_cs, old_eip;
1142     StackAccess sa;
1143 
1144     /* real mode (simpler!) */
1145     dt = &env->idt;
1146     if (intno * 4 + 3 > dt->limit) {
1147         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1148     }
1149     ptr = dt->base + intno * 4;
1150     offset = cpu_lduw_kernel(env, ptr);
1151     selector = cpu_lduw_kernel(env, ptr + 2);
1152 
1153     sa.env = env;
1154     sa.ra = 0;
1155     sa.sp = env->regs[R_ESP];
1156     sa.sp_mask = 0xffff;
1157     sa.ss_base = env->segs[R_SS].base;
1158     sa.mmu_index = x86_mmu_index_pl(env, 0);
1159 
1160     if (is_int) {
1161         old_eip = next_eip;
1162     } else {
1163         old_eip = env->eip;
1164     }
1165     old_cs = env->segs[R_CS].selector;
1166     /* XXX: use SS segment size? */
1167     pushw(&sa, cpu_compute_eflags(env));
1168     pushw(&sa, old_cs);
1169     pushw(&sa, old_eip);
1170 
1171     /* update processor state */
1172     SET_ESP(sa.sp, sa.sp_mask);
1173     env->eip = offset;
1174     env->segs[R_CS].selector = selector;
1175     env->segs[R_CS].base = (selector << 4);
1176     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1177 }
1178 
1179 /*
1180  * Begin execution of an interruption. is_int is TRUE if coming from
1181  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1182  * instruction. It is only relevant if is_int is TRUE.
1183  */
1184 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1185                       int error_code, target_ulong next_eip, int is_hw)
1186 {
1187     CPUX86State *env = &cpu->env;
1188 
1189     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1190         if ((env->cr[0] & CR0_PE_MASK)) {
1191             static int count;
1192 
1193             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1194                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1195                      count, intno, error_code, is_int,
1196                      env->hflags & HF_CPL_MASK,
1197                      env->segs[R_CS].selector, env->eip,
1198                      (int)env->segs[R_CS].base + env->eip,
1199                      env->segs[R_SS].selector, env->regs[R_ESP]);
1200             if (intno == 0x0e) {
1201                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1202             } else {
1203                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1204             }
1205             qemu_log("\n");
1206             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1207 #if 0
1208             {
1209                 int i;
1210                 target_ulong ptr;
1211 
1212                 qemu_log("       code=");
1213                 ptr = env->segs[R_CS].base + env->eip;
1214                 for (i = 0; i < 16; i++) {
1215                     qemu_log(" %02x", ldub(ptr + i));
1216                 }
1217                 qemu_log("\n");
1218             }
1219 #endif
1220             count++;
1221         }
1222     }
1223     if (env->cr[0] & CR0_PE_MASK) {
1224 #if !defined(CONFIG_USER_ONLY)
1225         if (env->hflags & HF_GUEST_MASK) {
1226             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1227         }
1228 #endif
1229 #ifdef TARGET_X86_64
1230         if (env->hflags & HF_LMA_MASK) {
1231             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1232         } else
1233 #endif
1234         {
1235             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1236                                    is_hw);
1237         }
1238     } else {
1239 #if !defined(CONFIG_USER_ONLY)
1240         if (env->hflags & HF_GUEST_MASK) {
1241             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1242         }
1243 #endif
1244         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1245     }
1246 
1247 #if !defined(CONFIG_USER_ONLY)
1248     if (env->hflags & HF_GUEST_MASK) {
1249         CPUState *cs = CPU(cpu);
1250         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1251                                       offsetof(struct vmcb,
1252                                                control.event_inj));
1253 
1254         x86_stl_phys(cs,
1255                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1256                  event_inj & ~SVM_EVTINJ_VALID);
1257     }
1258 #endif
1259 }
1260 
1261 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1262 {
1263     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1264 }
1265 
1266 void helper_lldt(CPUX86State *env, int selector)
1267 {
1268     SegmentCache *dt;
1269     uint32_t e1, e2;
1270     int index, entry_limit;
1271     target_ulong ptr;
1272 
1273     selector &= 0xffff;
1274     if ((selector & 0xfffc) == 0) {
1275         /* XXX: NULL selector case: invalid LDT */
1276         env->ldt.base = 0;
1277         env->ldt.limit = 0;
1278     } else {
1279         if (selector & 0x4) {
1280             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1281         }
1282         dt = &env->gdt;
1283         index = selector & ~7;
1284 #ifdef TARGET_X86_64
1285         if (env->hflags & HF_LMA_MASK) {
1286             entry_limit = 15;
1287         } else
1288 #endif
1289         {
1290             entry_limit = 7;
1291         }
1292         if ((index + entry_limit) > dt->limit) {
1293             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1294         }
1295         ptr = dt->base + index;
1296         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1297         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1298         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1299             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1300         }
1301         if (!(e2 & DESC_P_MASK)) {
1302             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1303         }
1304 #ifdef TARGET_X86_64
1305         if (env->hflags & HF_LMA_MASK) {
1306             uint32_t e3;
1307 
1308             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1309             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1310             env->ldt.base |= (target_ulong)e3 << 32;
1311         } else
1312 #endif
1313         {
1314             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1315         }
1316     }
1317     env->ldt.selector = selector;
1318 }
1319 
1320 void helper_ltr(CPUX86State *env, int selector)
1321 {
1322     SegmentCache *dt;
1323     uint32_t e1, e2;
1324     int index, type, entry_limit;
1325     target_ulong ptr;
1326 
1327     selector &= 0xffff;
1328     if ((selector & 0xfffc) == 0) {
1329         /* NULL selector case: invalid TR */
1330         env->tr.base = 0;
1331         env->tr.limit = 0;
1332         env->tr.flags = 0;
1333     } else {
1334         if (selector & 0x4) {
1335             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1336         }
1337         dt = &env->gdt;
1338         index = selector & ~7;
1339 #ifdef TARGET_X86_64
1340         if (env->hflags & HF_LMA_MASK) {
1341             entry_limit = 15;
1342         } else
1343 #endif
1344         {
1345             entry_limit = 7;
1346         }
1347         if ((index + entry_limit) > dt->limit) {
1348             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1349         }
1350         ptr = dt->base + index;
1351         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1352         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1353         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1354         if ((e2 & DESC_S_MASK) ||
1355             (type != 1 && type != 9)) {
1356             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1357         }
1358         if (!(e2 & DESC_P_MASK)) {
1359             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1360         }
1361 #ifdef TARGET_X86_64
1362         if (env->hflags & HF_LMA_MASK) {
1363             uint32_t e3, e4;
1364 
1365             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1366             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1367             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1368                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1369             }
1370             load_seg_cache_raw_dt(&env->tr, e1, e2);
1371             env->tr.base |= (target_ulong)e3 << 32;
1372         } else
1373 #endif
1374         {
1375             load_seg_cache_raw_dt(&env->tr, e1, e2);
1376         }
1377         e2 |= DESC_TSS_BUSY_MASK;
1378         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1379     }
1380     env->tr.selector = selector;
1381 }
1382 
1383 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1384 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1385 {
1386     uint32_t e1, e2;
1387     int cpl, dpl, rpl;
1388     SegmentCache *dt;
1389     int index;
1390     target_ulong ptr;
1391 
1392     selector &= 0xffff;
1393     cpl = env->hflags & HF_CPL_MASK;
1394     if ((selector & 0xfffc) == 0) {
1395         /* null selector case */
1396         if (seg_reg == R_SS
1397 #ifdef TARGET_X86_64
1398             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1399 #endif
1400             ) {
1401             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1402         }
1403         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1404     } else {
1405 
1406         if (selector & 0x4) {
1407             dt = &env->ldt;
1408         } else {
1409             dt = &env->gdt;
1410         }
1411         index = selector & ~7;
1412         if ((index + 7) > dt->limit) {
1413             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1414         }
1415         ptr = dt->base + index;
1416         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1417         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1418 
1419         if (!(e2 & DESC_S_MASK)) {
1420             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421         }
1422         rpl = selector & 3;
1423         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1424         if (seg_reg == R_SS) {
1425             /* must be writable segment */
1426             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1427                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1428             }
1429             if (rpl != cpl || dpl != cpl) {
1430                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1431             }
1432         } else {
1433             /* must be readable segment */
1434             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1435                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1436             }
1437 
1438             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1439                 /* if not conforming code, test rights */
1440                 if (dpl < cpl || dpl < rpl) {
1441                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1442                 }
1443             }
1444         }
1445 
1446         if (!(e2 & DESC_P_MASK)) {
1447             if (seg_reg == R_SS) {
1448                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1449             } else {
1450                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1451             }
1452         }
1453 
1454         /* set the access bit if not already set */
1455         if (!(e2 & DESC_A_MASK)) {
1456             e2 |= DESC_A_MASK;
1457             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1458         }
1459 
1460         cpu_x86_load_seg_cache(env, seg_reg, selector,
1461                        get_seg_base(e1, e2),
1462                        get_seg_limit(e1, e2),
1463                        e2);
1464 #if 0
1465         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1466                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1467 #endif
1468     }
1469 }
1470 
1471 /* protected mode jump */
1472 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1473                            target_ulong next_eip)
1474 {
1475     int gate_cs, type;
1476     uint32_t e1, e2, cpl, dpl, rpl, limit;
1477 
1478     if ((new_cs & 0xfffc) == 0) {
1479         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1480     }
1481     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1482         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1483     }
1484     cpl = env->hflags & HF_CPL_MASK;
1485     if (e2 & DESC_S_MASK) {
1486         if (!(e2 & DESC_CS_MASK)) {
1487             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1488         }
1489         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1490         if (e2 & DESC_C_MASK) {
1491             /* conforming code segment */
1492             if (dpl > cpl) {
1493                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1494             }
1495         } else {
1496             /* non conforming code segment */
1497             rpl = new_cs & 3;
1498             if (rpl > cpl) {
1499                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1500             }
1501             if (dpl != cpl) {
1502                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1503             }
1504         }
1505         if (!(e2 & DESC_P_MASK)) {
1506             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1507         }
1508         limit = get_seg_limit(e1, e2);
1509         if (new_eip > limit &&
1510             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1511             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1512         }
1513         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1514                        get_seg_base(e1, e2), limit, e2);
1515         env->eip = new_eip;
1516     } else {
1517         /* jump to call or task gate */
1518         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1519         rpl = new_cs & 3;
1520         cpl = env->hflags & HF_CPL_MASK;
1521         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1522 
1523 #ifdef TARGET_X86_64
1524         if (env->efer & MSR_EFER_LMA) {
1525             if (type != 12) {
1526                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1527             }
1528         }
1529 #endif
1530         switch (type) {
1531         case 1: /* 286 TSS */
1532         case 9: /* 386 TSS */
1533         case 5: /* task gate */
1534             if (dpl < cpl || dpl < rpl) {
1535                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1536             }
1537             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1538             break;
1539         case 4: /* 286 call gate */
1540         case 12: /* 386 call gate */
1541             if ((dpl < cpl) || (dpl < rpl)) {
1542                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1543             }
1544             if (!(e2 & DESC_P_MASK)) {
1545                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1546             }
1547             gate_cs = e1 >> 16;
1548             new_eip = (e1 & 0xffff);
1549             if (type == 12) {
1550                 new_eip |= (e2 & 0xffff0000);
1551             }
1552 
1553 #ifdef TARGET_X86_64
1554             if (env->efer & MSR_EFER_LMA) {
1555                 /* load the upper 8 bytes of the 64-bit call gate */
1556                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1557                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1558                                            GETPC());
1559                 }
1560                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1561                 if (type != 0) {
1562                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1563                                            GETPC());
1564                 }
1565                 new_eip |= ((target_ulong)e1) << 32;
1566             }
1567 #endif
1568 
1569             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1570                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1571             }
1572             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1573             /* must be code segment */
1574             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1575                  (DESC_S_MASK | DESC_CS_MASK))) {
1576                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1577             }
1578             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1579                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1580                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1581             }
1582 #ifdef TARGET_X86_64
1583             if (env->efer & MSR_EFER_LMA) {
1584                 if (!(e2 & DESC_L_MASK)) {
1585                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1586                 }
1587                 if (e2 & DESC_B_MASK) {
1588                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1589                 }
1590             }
1591 #endif
1592             if (!(e2 & DESC_P_MASK)) {
1593                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1594             }
1595             limit = get_seg_limit(e1, e2);
1596             if (new_eip > limit &&
1597                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1598                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1599             }
1600             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1601                                    get_seg_base(e1, e2), limit, e2);
1602             env->eip = new_eip;
1603             break;
1604         default:
1605             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1606             break;
1607         }
1608     }
1609 }
1610 
1611 /* real mode call */
1612 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1613                        int shift, uint32_t next_eip)
1614 {
1615     StackAccess sa;
1616 
1617     sa.env = env;
1618     sa.ra = GETPC();
1619     sa.sp = env->regs[R_ESP];
1620     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1621     sa.ss_base = env->segs[R_SS].base;
1622     sa.mmu_index = x86_mmu_index_pl(env, 0);
1623 
1624     if (shift) {
1625         pushl(&sa, env->segs[R_CS].selector);
1626         pushl(&sa, next_eip);
1627     } else {
1628         pushw(&sa, env->segs[R_CS].selector);
1629         pushw(&sa, next_eip);
1630     }
1631 
1632     SET_ESP(sa.sp, sa.sp_mask);
1633     env->eip = new_eip;
1634     env->segs[R_CS].selector = new_cs;
1635     env->segs[R_CS].base = (new_cs << 4);
1636 }
1637 
1638 /* protected mode call */
1639 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1640                             int shift, target_ulong next_eip)
1641 {
1642     int new_stack, i;
1643     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1644     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1645     uint32_t val, limit, old_sp_mask;
1646     target_ulong old_ssp, offset;
1647     StackAccess sa;
1648 
1649     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1650     LOG_PCALL_STATE(env_cpu(env));
1651     if ((new_cs & 0xfffc) == 0) {
1652         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1653     }
1654     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1655         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1656     }
1657     cpl = env->hflags & HF_CPL_MASK;
1658     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1659 
1660     sa.env = env;
1661     sa.ra = GETPC();
1662 
1663     if (e2 & DESC_S_MASK) {
1664         /* "normal" far call, no stack switch possible */
1665         if (!(e2 & DESC_CS_MASK)) {
1666             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1667         }
1668         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1669         if (e2 & DESC_C_MASK) {
1670             /* conforming code segment */
1671             if (dpl > cpl) {
1672                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1673             }
1674         } else {
1675             /* non conforming code segment */
1676             rpl = new_cs & 3;
1677             if (rpl > cpl) {
1678                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1679             }
1680             if (dpl != cpl) {
1681                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1682             }
1683         }
1684         if (!(e2 & DESC_P_MASK)) {
1685             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1686         }
1687 
1688         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1689 #ifdef TARGET_X86_64
1690         /* XXX: check 16/32 bit cases in long mode */
1691         if (shift == 2) {
1692             /* 64 bit case */
1693             sa.sp = env->regs[R_ESP];
1694             sa.sp_mask = -1;
1695             sa.ss_base = 0;
1696             pushq(&sa, env->segs[R_CS].selector);
1697             pushq(&sa, next_eip);
1698             /* from this point, not restartable */
1699             env->regs[R_ESP] = sa.sp;
1700             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1701                                    get_seg_base(e1, e2),
1702                                    get_seg_limit(e1, e2), e2);
1703             env->eip = new_eip;
1704         } else
1705 #endif
1706         {
1707             sa.sp = env->regs[R_ESP];
1708             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1709             sa.ss_base = env->segs[R_SS].base;
1710             if (shift) {
1711                 pushl(&sa, env->segs[R_CS].selector);
1712                 pushl(&sa, next_eip);
1713             } else {
1714                 pushw(&sa, env->segs[R_CS].selector);
1715                 pushw(&sa, next_eip);
1716             }
1717 
1718             limit = get_seg_limit(e1, e2);
1719             if (new_eip > limit) {
1720                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1721             }
1722             /* from this point, not restartable */
1723             SET_ESP(sa.sp, sa.sp_mask);
1724             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1725                                    get_seg_base(e1, e2), limit, e2);
1726             env->eip = new_eip;
1727         }
1728     } else {
1729         /* check gate type */
1730         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1731         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1732         rpl = new_cs & 3;
1733 
1734 #ifdef TARGET_X86_64
1735         if (env->efer & MSR_EFER_LMA) {
1736             if (type != 12) {
1737                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1738             }
1739         }
1740 #endif
1741 
1742         switch (type) {
1743         case 1: /* available 286 TSS */
1744         case 9: /* available 386 TSS */
1745         case 5: /* task gate */
1746             if (dpl < cpl || dpl < rpl) {
1747                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1748             }
1749             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1750             return;
1751         case 4: /* 286 call gate */
1752         case 12: /* 386 call gate */
1753             break;
1754         default:
1755             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1756             break;
1757         }
1758         shift = type >> 3;
1759 
1760         if (dpl < cpl || dpl < rpl) {
1761             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1762         }
1763         /* check valid bit */
1764         if (!(e2 & DESC_P_MASK)) {
1765             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1766         }
1767         selector = e1 >> 16;
1768         param_count = e2 & 0x1f;
1769         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1770 #ifdef TARGET_X86_64
1771         if (env->efer & MSR_EFER_LMA) {
1772             /* load the upper 8 bytes of the 64-bit call gate */
1773             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1774                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1775                                        GETPC());
1776             }
1777             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1778             if (type != 0) {
1779                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1780                                        GETPC());
1781             }
1782             offset |= ((target_ulong)e1) << 32;
1783         }
1784 #endif
1785         if ((selector & 0xfffc) == 0) {
1786             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1787         }
1788 
1789         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1790             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1791         }
1792         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1793             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1794         }
1795         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1796         if (dpl > cpl) {
1797             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1798         }
1799 #ifdef TARGET_X86_64
1800         if (env->efer & MSR_EFER_LMA) {
1801             if (!(e2 & DESC_L_MASK)) {
1802                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1803             }
1804             if (e2 & DESC_B_MASK) {
1805                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1806             }
1807             shift++;
1808         }
1809 #endif
1810         if (!(e2 & DESC_P_MASK)) {
1811             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1812         }
1813 
1814         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1815             /* to inner privilege */
1816             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1817 #ifdef TARGET_X86_64
1818             if (shift == 2) {
1819                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1820                 new_stack = 1;
1821                 sa.sp = get_rsp_from_tss(env, dpl);
1822                 sa.sp_mask = -1;
1823                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1824                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1825                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1826             } else
1827 #endif
1828             {
1829                 uint32_t sp32;
1830                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1831                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1832                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1833                           env->regs[R_ESP]);
1834                 if ((ss & 0xfffc) == 0) {
1835                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1836                 }
1837                 if ((ss & 3) != dpl) {
1838                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1839                 }
1840                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1841                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1842                 }
1843                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1844                 if (ss_dpl != dpl) {
1845                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1846                 }
1847                 if (!(ss_e2 & DESC_S_MASK) ||
1848                     (ss_e2 & DESC_CS_MASK) ||
1849                     !(ss_e2 & DESC_W_MASK)) {
1850                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1851                 }
1852                 if (!(ss_e2 & DESC_P_MASK)) {
1853                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1854                 }
1855 
1856                 sa.sp = sp32;
1857                 sa.sp_mask = get_sp_mask(ss_e2);
1858                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1859             }
1860 
1861             /* push_size = ((param_count * 2) + 8) << shift; */
1862             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1863             old_ssp = env->segs[R_SS].base;
1864 
1865 #ifdef TARGET_X86_64
1866             if (shift == 2) {
1867                 /* XXX: verify if new stack address is canonical */
1868                 pushq(&sa, env->segs[R_SS].selector);
1869                 pushq(&sa, env->regs[R_ESP]);
1870                 /* parameters aren't supported for 64-bit call gates */
1871             } else
1872 #endif
1873             if (shift == 1) {
1874                 pushl(&sa, env->segs[R_SS].selector);
1875                 pushl(&sa, env->regs[R_ESP]);
1876                 for (i = param_count - 1; i >= 0; i--) {
1877                     val = cpu_ldl_data_ra(env,
1878                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1879                                           GETPC());
1880                     pushl(&sa, val);
1881                 }
1882             } else {
1883                 pushw(&sa, env->segs[R_SS].selector);
1884                 pushw(&sa, env->regs[R_ESP]);
1885                 for (i = param_count - 1; i >= 0; i--) {
1886                     val = cpu_lduw_data_ra(env,
1887                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1888                                            GETPC());
1889                     pushw(&sa, val);
1890                 }
1891             }
1892             new_stack = 1;
1893         } else {
1894             /* to same privilege */
1895             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1896             sa.sp = env->regs[R_ESP];
1897             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1898             sa.ss_base = env->segs[R_SS].base;
1899             /* push_size = (4 << shift); */
1900             new_stack = 0;
1901         }
1902 
1903 #ifdef TARGET_X86_64
1904         if (shift == 2) {
1905             pushq(&sa, env->segs[R_CS].selector);
1906             pushq(&sa, next_eip);
1907         } else
1908 #endif
1909         if (shift == 1) {
1910             pushl(&sa, env->segs[R_CS].selector);
1911             pushl(&sa, next_eip);
1912         } else {
1913             pushw(&sa, env->segs[R_CS].selector);
1914             pushw(&sa, next_eip);
1915         }
1916 
1917         /* from this point, not restartable */
1918 
1919         if (new_stack) {
1920 #ifdef TARGET_X86_64
1921             if (shift == 2) {
1922                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1923             } else
1924 #endif
1925             {
1926                 ss = (ss & ~3) | dpl;
1927                 cpu_x86_load_seg_cache(env, R_SS, ss,
1928                                        sa.ss_base,
1929                                        get_seg_limit(ss_e1, ss_e2),
1930                                        ss_e2);
1931             }
1932         }
1933 
1934         selector = (selector & ~3) | dpl;
1935         cpu_x86_load_seg_cache(env, R_CS, selector,
1936                        get_seg_base(e1, e2),
1937                        get_seg_limit(e1, e2),
1938                        e2);
1939         SET_ESP(sa.sp, sa.sp_mask);
1940         env->eip = offset;
1941     }
1942 }
1943 
1944 /* real and vm86 mode iret */
1945 void helper_iret_real(CPUX86State *env, int shift)
1946 {
1947     uint32_t new_cs, new_eip, new_eflags;
1948     int eflags_mask;
1949     StackAccess sa;
1950 
1951     sa.env = env;
1952     sa.ra = GETPC();
1953     sa.mmu_index = x86_mmu_index_pl(env, 0);
1954     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1955     sa.sp = env->regs[R_ESP];
1956     sa.ss_base = env->segs[R_SS].base;
1957 
1958     if (shift == 1) {
1959         /* 32 bits */
1960         new_eip = popl(&sa);
1961         new_cs = popl(&sa) & 0xffff;
1962         new_eflags = popl(&sa);
1963     } else {
1964         /* 16 bits */
1965         new_eip = popw(&sa);
1966         new_cs = popw(&sa);
1967         new_eflags = popw(&sa);
1968     }
1969     SET_ESP(sa.sp, sa.sp_mask);
1970     env->segs[R_CS].selector = new_cs;
1971     env->segs[R_CS].base = (new_cs << 4);
1972     env->eip = new_eip;
1973     if (env->eflags & VM_MASK) {
1974         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1975             NT_MASK;
1976     } else {
1977         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1978             RF_MASK | NT_MASK;
1979     }
1980     if (shift == 0) {
1981         eflags_mask &= 0xffff;
1982     }
1983     cpu_load_eflags(env, new_eflags, eflags_mask);
1984     env->hflags2 &= ~HF2_NMI_MASK;
1985 }
1986 
1987 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1988 {
1989     int dpl;
1990     uint32_t e2;
1991 
1992     /* XXX: on x86_64, we do not want to nullify FS and GS because
1993        they may still contain a valid base. I would be interested to
1994        know how a real x86_64 CPU behaves */
1995     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1996         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1997         return;
1998     }
1999 
2000     e2 = env->segs[seg_reg].flags;
2001     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2002     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2003         /* data or non conforming code segment */
2004         if (dpl < cpl) {
2005             cpu_x86_load_seg_cache(env, seg_reg, 0,
2006                                    env->segs[seg_reg].base,
2007                                    env->segs[seg_reg].limit,
2008                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2009         }
2010     }
2011 }
2012 
2013 /* protected mode iret */
2014 static inline void helper_ret_protected(CPUX86State *env, int shift,
2015                                         int is_iret, int addend,
2016                                         uintptr_t retaddr)
2017 {
2018     uint32_t new_cs, new_eflags, new_ss;
2019     uint32_t new_es, new_ds, new_fs, new_gs;
2020     uint32_t e1, e2, ss_e1, ss_e2;
2021     int cpl, dpl, rpl, eflags_mask, iopl;
2022     target_ulong new_eip, new_esp;
2023     StackAccess sa;
2024 
2025     cpl = env->hflags & HF_CPL_MASK;
2026 
2027     sa.env = env;
2028     sa.ra = retaddr;
2029     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2030 
2031 #ifdef TARGET_X86_64
2032     if (shift == 2) {
2033         sa.sp_mask = -1;
2034     } else
2035 #endif
2036     {
2037         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2038     }
2039     sa.sp = env->regs[R_ESP];
2040     sa.ss_base = env->segs[R_SS].base;
2041     new_eflags = 0; /* avoid warning */
2042 #ifdef TARGET_X86_64
2043     if (shift == 2) {
2044         new_eip = popq(&sa);
2045         new_cs = popq(&sa) & 0xffff;
2046         if (is_iret) {
2047             new_eflags = popq(&sa);
2048         }
2049     } else
2050 #endif
2051     {
2052         if (shift == 1) {
2053             /* 32 bits */
2054             new_eip = popl(&sa);
2055             new_cs = popl(&sa) & 0xffff;
2056             if (is_iret) {
2057                 new_eflags = popl(&sa);
2058                 if (new_eflags & VM_MASK) {
2059                     goto return_to_vm86;
2060                 }
2061             }
2062         } else {
2063             /* 16 bits */
2064             new_eip = popw(&sa);
2065             new_cs = popw(&sa);
2066             if (is_iret) {
2067                 new_eflags = popw(&sa);
2068             }
2069         }
2070     }
2071     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2072               new_cs, new_eip, shift, addend);
2073     LOG_PCALL_STATE(env_cpu(env));
2074     if ((new_cs & 0xfffc) == 0) {
2075         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2076     }
2077     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2078         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2079     }
2080     if (!(e2 & DESC_S_MASK) ||
2081         !(e2 & DESC_CS_MASK)) {
2082         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2083     }
2084     rpl = new_cs & 3;
2085     if (rpl < cpl) {
2086         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2087     }
2088     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2089     if (e2 & DESC_C_MASK) {
2090         if (dpl > rpl) {
2091             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2092         }
2093     } else {
2094         if (dpl != rpl) {
2095             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2096         }
2097     }
2098     if (!(e2 & DESC_P_MASK)) {
2099         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2100     }
2101 
2102     sa.sp += addend;
2103     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2104                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2105         /* return to same privilege level */
2106         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2107                        get_seg_base(e1, e2),
2108                        get_seg_limit(e1, e2),
2109                        e2);
2110     } else {
2111         /* return to different privilege level */
2112 #ifdef TARGET_X86_64
2113         if (shift == 2) {
2114             new_esp = popq(&sa);
2115             new_ss = popq(&sa) & 0xffff;
2116         } else
2117 #endif
2118         {
2119             if (shift == 1) {
2120                 /* 32 bits */
2121                 new_esp = popl(&sa);
2122                 new_ss = popl(&sa) & 0xffff;
2123             } else {
2124                 /* 16 bits */
2125                 new_esp = popw(&sa);
2126                 new_ss = popw(&sa);
2127             }
2128         }
2129         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2130                   new_ss, new_esp);
2131         if ((new_ss & 0xfffc) == 0) {
2132 #ifdef TARGET_X86_64
2133             /* NULL ss is allowed in long mode if cpl != 3 */
2134             /* XXX: test CS64? */
2135             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2136                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2137                                        0, 0xffffffff,
2138                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2139                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2140                                        DESC_W_MASK | DESC_A_MASK);
2141                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2142             } else
2143 #endif
2144             {
2145                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2146             }
2147         } else {
2148             if ((new_ss & 3) != rpl) {
2149                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2150             }
2151             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2152                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2153             }
2154             if (!(ss_e2 & DESC_S_MASK) ||
2155                 (ss_e2 & DESC_CS_MASK) ||
2156                 !(ss_e2 & DESC_W_MASK)) {
2157                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2158             }
2159             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2160             if (dpl != rpl) {
2161                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2162             }
2163             if (!(ss_e2 & DESC_P_MASK)) {
2164                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2165             }
2166             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2167                                    get_seg_base(ss_e1, ss_e2),
2168                                    get_seg_limit(ss_e1, ss_e2),
2169                                    ss_e2);
2170         }
2171 
2172         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2173                        get_seg_base(e1, e2),
2174                        get_seg_limit(e1, e2),
2175                        e2);
2176         sa.sp = new_esp;
2177 #ifdef TARGET_X86_64
2178         if (env->hflags & HF_CS64_MASK) {
2179             sa.sp_mask = -1;
2180         } else
2181 #endif
2182         {
2183             sa.sp_mask = get_sp_mask(ss_e2);
2184         }
2185 
2186         /* validate data segments */
2187         validate_seg(env, R_ES, rpl);
2188         validate_seg(env, R_DS, rpl);
2189         validate_seg(env, R_FS, rpl);
2190         validate_seg(env, R_GS, rpl);
2191 
2192         sa.sp += addend;
2193     }
2194     SET_ESP(sa.sp, sa.sp_mask);
2195     env->eip = new_eip;
2196     if (is_iret) {
2197         /* NOTE: 'cpl' is the _old_ CPL */
2198         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2199         if (cpl == 0) {
2200             eflags_mask |= IOPL_MASK;
2201         }
2202         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2203         if (cpl <= iopl) {
2204             eflags_mask |= IF_MASK;
2205         }
2206         if (shift == 0) {
2207             eflags_mask &= 0xffff;
2208         }
2209         cpu_load_eflags(env, new_eflags, eflags_mask);
2210     }
2211     return;
2212 
2213  return_to_vm86:
2214     new_esp = popl(&sa);
2215     new_ss = popl(&sa);
2216     new_es = popl(&sa);
2217     new_ds = popl(&sa);
2218     new_fs = popl(&sa);
2219     new_gs = popl(&sa);
2220 
2221     /* modify processor state */
2222     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2223                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2224                     VIP_MASK);
2225     load_seg_vm(env, R_CS, new_cs & 0xffff);
2226     load_seg_vm(env, R_SS, new_ss & 0xffff);
2227     load_seg_vm(env, R_ES, new_es & 0xffff);
2228     load_seg_vm(env, R_DS, new_ds & 0xffff);
2229     load_seg_vm(env, R_FS, new_fs & 0xffff);
2230     load_seg_vm(env, R_GS, new_gs & 0xffff);
2231 
2232     env->eip = new_eip & 0xffff;
2233     env->regs[R_ESP] = new_esp;
2234 }
2235 
2236 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2237 {
2238     int tss_selector, type;
2239     uint32_t e1, e2;
2240 
2241     /* specific case for TSS */
2242     if (env->eflags & NT_MASK) {
2243 #ifdef TARGET_X86_64
2244         if (env->hflags & HF_LMA_MASK) {
2245             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2246         }
2247 #endif
2248         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2249         if (tss_selector & 4) {
2250             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2251         }
2252         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2253             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2254         }
2255         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2256         /* NOTE: we check both segment and busy TSS */
2257         if (type != 3) {
2258             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2259         }
2260         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2261     } else {
2262         helper_ret_protected(env, shift, 1, 0, GETPC());
2263     }
2264     env->hflags2 &= ~HF2_NMI_MASK;
2265 }
2266 
2267 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2268 {
2269     helper_ret_protected(env, shift, 0, addend, GETPC());
2270 }
2271 
2272 void helper_sysenter(CPUX86State *env)
2273 {
2274     if (env->sysenter_cs == 0) {
2275         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2276     }
2277     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2278 
2279 #ifdef TARGET_X86_64
2280     if (env->hflags & HF_LMA_MASK) {
2281         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2282                                0, 0xffffffff,
2283                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2284                                DESC_S_MASK |
2285                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2286                                DESC_L_MASK);
2287     } else
2288 #endif
2289     {
2290         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2291                                0, 0xffffffff,
2292                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2293                                DESC_S_MASK |
2294                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2295     }
2296     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2297                            0, 0xffffffff,
2298                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2299                            DESC_S_MASK |
2300                            DESC_W_MASK | DESC_A_MASK);
2301     env->regs[R_ESP] = env->sysenter_esp;
2302     env->eip = env->sysenter_eip;
2303 }
2304 
2305 void helper_sysexit(CPUX86State *env, int dflag)
2306 {
2307     int cpl;
2308 
2309     cpl = env->hflags & HF_CPL_MASK;
2310     if (env->sysenter_cs == 0 || cpl != 0) {
2311         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2312     }
2313 #ifdef TARGET_X86_64
2314     if (dflag == 2) {
2315         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2316                                3, 0, 0xffffffff,
2317                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2318                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2319                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2320                                DESC_L_MASK);
2321         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2322                                3, 0, 0xffffffff,
2323                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2324                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2325                                DESC_W_MASK | DESC_A_MASK);
2326     } else
2327 #endif
2328     {
2329         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2330                                3, 0, 0xffffffff,
2331                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2332                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2333                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2334         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2335                                3, 0, 0xffffffff,
2336                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2337                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2338                                DESC_W_MASK | DESC_A_MASK);
2339     }
2340     env->regs[R_ESP] = env->regs[R_ECX];
2341     env->eip = env->regs[R_EDX];
2342 }
2343 
2344 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2345 {
2346     unsigned int limit;
2347     uint32_t e1, e2, selector;
2348     int rpl, dpl, cpl, type;
2349 
2350     selector = selector1 & 0xffff;
2351     assert(CC_OP == CC_OP_EFLAGS);
2352     if ((selector & 0xfffc) == 0) {
2353         goto fail;
2354     }
2355     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2356         goto fail;
2357     }
2358     rpl = selector & 3;
2359     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2360     cpl = env->hflags & HF_CPL_MASK;
2361     if (e2 & DESC_S_MASK) {
2362         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2363             /* conforming */
2364         } else {
2365             if (dpl < cpl || dpl < rpl) {
2366                 goto fail;
2367             }
2368         }
2369     } else {
2370         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2371         switch (type) {
2372         case 1:
2373         case 2:
2374         case 3:
2375         case 9:
2376         case 11:
2377             break;
2378         default:
2379             goto fail;
2380         }
2381         if (dpl < cpl || dpl < rpl) {
2382         fail:
2383             CC_SRC &= ~CC_Z;
2384             return 0;
2385         }
2386     }
2387     limit = get_seg_limit(e1, e2);
2388     CC_SRC |= CC_Z;
2389     return limit;
2390 }
2391 
2392 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2393 {
2394     uint32_t e1, e2, selector;
2395     int rpl, dpl, cpl, type;
2396 
2397     selector = selector1 & 0xffff;
2398     assert(CC_OP == CC_OP_EFLAGS);
2399     if ((selector & 0xfffc) == 0) {
2400         goto fail;
2401     }
2402     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2403         goto fail;
2404     }
2405     rpl = selector & 3;
2406     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2407     cpl = env->hflags & HF_CPL_MASK;
2408     if (e2 & DESC_S_MASK) {
2409         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2410             /* conforming */
2411         } else {
2412             if (dpl < cpl || dpl < rpl) {
2413                 goto fail;
2414             }
2415         }
2416     } else {
2417         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2418         switch (type) {
2419         case 1:
2420         case 2:
2421         case 3:
2422         case 4:
2423         case 5:
2424         case 9:
2425         case 11:
2426         case 12:
2427             break;
2428         default:
2429             goto fail;
2430         }
2431         if (dpl < cpl || dpl < rpl) {
2432         fail:
2433             CC_SRC &= ~CC_Z;
2434             return 0;
2435         }
2436     }
2437     CC_SRC |= CC_Z;
2438     return e2 & 0x00f0ff00;
2439 }
2440 
2441 void helper_verr(CPUX86State *env, target_ulong selector1)
2442 {
2443     uint32_t e1, e2, eflags, selector;
2444     int rpl, dpl, cpl;
2445 
2446     selector = selector1 & 0xffff;
2447     eflags = cpu_cc_compute_all(env) | CC_Z;
2448     if ((selector & 0xfffc) == 0) {
2449         goto fail;
2450     }
2451     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2452         goto fail;
2453     }
2454     if (!(e2 & DESC_S_MASK)) {
2455         goto fail;
2456     }
2457     rpl = selector & 3;
2458     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2459     cpl = env->hflags & HF_CPL_MASK;
2460     if (e2 & DESC_CS_MASK) {
2461         if (!(e2 & DESC_R_MASK)) {
2462             goto fail;
2463         }
2464         if (!(e2 & DESC_C_MASK)) {
2465             if (dpl < cpl || dpl < rpl) {
2466                 goto fail;
2467             }
2468         }
2469     } else {
2470         if (dpl < cpl || dpl < rpl) {
2471         fail:
2472             eflags &= ~CC_Z;
2473         }
2474     }
2475     CC_SRC = eflags;
2476     CC_OP = CC_OP_EFLAGS;
2477 }
2478 
2479 void helper_verw(CPUX86State *env, target_ulong selector1)
2480 {
2481     uint32_t e1, e2, eflags, selector;
2482     int rpl, dpl, cpl;
2483 
2484     selector = selector1 & 0xffff;
2485     eflags = cpu_cc_compute_all(env) | CC_Z;
2486     if ((selector & 0xfffc) == 0) {
2487         goto fail;
2488     }
2489     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2490         goto fail;
2491     }
2492     if (!(e2 & DESC_S_MASK)) {
2493         goto fail;
2494     }
2495     rpl = selector & 3;
2496     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2497     cpl = env->hflags & HF_CPL_MASK;
2498     if (e2 & DESC_CS_MASK) {
2499         goto fail;
2500     } else {
2501         if (dpl < cpl || dpl < rpl) {
2502             goto fail;
2503         }
2504         if (!(e2 & DESC_W_MASK)) {
2505         fail:
2506             eflags &= ~CC_Z;
2507         }
2508     }
2509     CC_SRC = eflags;
2510     CC_OP = CC_OP_EFLAGS;
2511 }
2512