xref: /openbmc/qemu/target/i386/tcg/seg_helper.c (revision f7ff24a6)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 
32 #ifdef TARGET_X86_64
33 #define SET_ESP(val, sp_mask)                                   \
34     do {                                                        \
35         if ((sp_mask) == 0xffff) {                              \
36             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
37                 ((val) & 0xffff);                               \
38         } else if ((sp_mask) == 0xffffffffLL) {                 \
39             env->regs[R_ESP] = (uint32_t)(val);                 \
40         } else {                                                \
41             env->regs[R_ESP] = (val);                           \
42         }                                                       \
43     } while (0)
44 #else
45 #define SET_ESP(val, sp_mask)                                   \
46     do {                                                        \
47         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
48             ((val) & (sp_mask));                                \
49     } while (0)
50 #endif
51 
52 /* XXX: use mmu_index to have proper DPL support */
53 typedef struct StackAccess
54 {
55     CPUX86State *env;
56     uintptr_t ra;
57     target_ulong ss_base;
58     target_ulong sp;
59     target_ulong sp_mask;
60     int mmu_index;
61 } StackAccess;
62 
pushw(StackAccess * sa,uint16_t val)63 static void pushw(StackAccess *sa, uint16_t val)
64 {
65     sa->sp -= 2;
66     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
67                       val, sa->mmu_index, sa->ra);
68 }
69 
pushl(StackAccess * sa,uint32_t val)70 static void pushl(StackAccess *sa, uint32_t val)
71 {
72     sa->sp -= 4;
73     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
74                       val, sa->mmu_index, sa->ra);
75 }
76 
popw(StackAccess * sa)77 static uint16_t popw(StackAccess *sa)
78 {
79     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
80                                       sa->ss_base + (sa->sp & sa->sp_mask),
81                                       sa->mmu_index, sa->ra);
82     sa->sp += 2;
83     return ret;
84 }
85 
popl(StackAccess * sa)86 static uint32_t popl(StackAccess *sa)
87 {
88     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
89                                      sa->ss_base + (sa->sp & sa->sp_mask),
90                                      sa->mmu_index, sa->ra);
91     sa->sp += 4;
92     return ret;
93 }
94 
get_pg_mode(CPUX86State * env)95 int get_pg_mode(CPUX86State *env)
96 {
97     int pg_mode = PG_MODE_PG;
98     if (!(env->cr[0] & CR0_PG_MASK)) {
99         return 0;
100     }
101     if (env->cr[0] & CR0_WP_MASK) {
102         pg_mode |= PG_MODE_WP;
103     }
104     if (env->cr[4] & CR4_PAE_MASK) {
105         pg_mode |= PG_MODE_PAE;
106         if (env->efer & MSR_EFER_NXE) {
107             pg_mode |= PG_MODE_NXE;
108         }
109     }
110     if (env->cr[4] & CR4_PSE_MASK) {
111         pg_mode |= PG_MODE_PSE;
112     }
113     if (env->cr[4] & CR4_SMEP_MASK) {
114         pg_mode |= PG_MODE_SMEP;
115     }
116     if (env->hflags & HF_LMA_MASK) {
117         pg_mode |= PG_MODE_LMA;
118         if (env->cr[4] & CR4_PKE_MASK) {
119             pg_mode |= PG_MODE_PKE;
120         }
121         if (env->cr[4] & CR4_PKS_MASK) {
122             pg_mode |= PG_MODE_PKS;
123         }
124         if (env->cr[4] & CR4_LA57_MASK) {
125             pg_mode |= PG_MODE_LA57;
126         }
127     }
128     return pg_mode;
129 }
130 
131 /* return non zero if error */
load_segment_ra(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector,uintptr_t retaddr)132 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
133                                uint32_t *e2_ptr, int selector,
134                                uintptr_t retaddr)
135 {
136     SegmentCache *dt;
137     int index;
138     target_ulong ptr;
139 
140     if (selector & 0x4) {
141         dt = &env->ldt;
142     } else {
143         dt = &env->gdt;
144     }
145     index = selector & ~7;
146     if ((index + 7) > dt->limit) {
147         return -1;
148     }
149     ptr = dt->base + index;
150     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
151     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
152     return 0;
153 }
154 
load_segment(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)155 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
156                                uint32_t *e2_ptr, int selector)
157 {
158     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
159 }
160 
get_seg_limit(uint32_t e1,uint32_t e2)161 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
162 {
163     unsigned int limit;
164 
165     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
166     if (e2 & DESC_G_MASK) {
167         limit = (limit << 12) | 0xfff;
168     }
169     return limit;
170 }
171 
get_seg_base(uint32_t e1,uint32_t e2)172 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
173 {
174     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
175 }
176 
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)177 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
178                                          uint32_t e2)
179 {
180     sc->base = get_seg_base(e1, e2);
181     sc->limit = get_seg_limit(e1, e2);
182     sc->flags = e2;
183 }
184 
185 /* init the segment cache in vm86 mode. */
load_seg_vm(CPUX86State * env,int seg,int selector)186 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
187 {
188     selector &= 0xffff;
189 
190     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
191                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
192                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
193 }
194 
get_ss_esp_from_tss(CPUX86State * env,uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl,uintptr_t retaddr)195 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
196                                        uint32_t *esp_ptr, int dpl,
197                                        uintptr_t retaddr)
198 {
199     X86CPU *cpu = env_archcpu(env);
200     int type, index, shift;
201 
202 #if 0
203     {
204         int i;
205         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
206         for (i = 0; i < env->tr.limit; i++) {
207             printf("%02x ", env->tr.base[i]);
208             if ((i & 7) == 7) {
209                 printf("\n");
210             }
211         }
212         printf("\n");
213     }
214 #endif
215 
216     if (!(env->tr.flags & DESC_P_MASK)) {
217         cpu_abort(CPU(cpu), "invalid tss");
218     }
219     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
220     if ((type & 7) != 1) {
221         cpu_abort(CPU(cpu), "invalid tss type");
222     }
223     shift = type >> 3;
224     index = (dpl * 4 + 2) << shift;
225     if (index + (4 << shift) - 1 > env->tr.limit) {
226         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
227     }
228     if (shift == 0) {
229         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
230         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
231     } else {
232         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
233         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
234     }
235 }
236 
tss_load_seg(CPUX86State * env,X86Seg seg_reg,int selector,int cpl,uintptr_t retaddr)237 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
238                          int cpl, uintptr_t retaddr)
239 {
240     uint32_t e1, e2;
241     int rpl, dpl;
242 
243     if ((selector & 0xfffc) != 0) {
244         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
245             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
246         }
247         if (!(e2 & DESC_S_MASK)) {
248             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
249         }
250         rpl = selector & 3;
251         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
252         if (seg_reg == R_CS) {
253             if (!(e2 & DESC_CS_MASK)) {
254                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
255             }
256             if (dpl != rpl) {
257                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
258             }
259         } else if (seg_reg == R_SS) {
260             /* SS must be writable data */
261             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
262                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
263             }
264             if (dpl != cpl || dpl != rpl) {
265                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
266             }
267         } else {
268             /* not readable code */
269             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
270                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
271             }
272             /* if data or non conforming code, checks the rights */
273             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
274                 if (dpl < cpl || dpl < rpl) {
275                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
276                 }
277             }
278         }
279         if (!(e2 & DESC_P_MASK)) {
280             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
281         }
282         cpu_x86_load_seg_cache(env, seg_reg, selector,
283                                get_seg_base(e1, e2),
284                                get_seg_limit(e1, e2),
285                                e2);
286     } else {
287         if (seg_reg == R_SS || seg_reg == R_CS) {
288             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
289         }
290     }
291 }
292 
tss_set_busy(CPUX86State * env,int tss_selector,bool value,uintptr_t retaddr)293 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
294                          uintptr_t retaddr)
295 {
296     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
297     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
298 
299     if (value) {
300         e2 |= DESC_TSS_BUSY_MASK;
301     } else {
302         e2 &= ~DESC_TSS_BUSY_MASK;
303     }
304 
305     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
306 }
307 
308 #define SWITCH_TSS_JMP  0
309 #define SWITCH_TSS_IRET 1
310 #define SWITCH_TSS_CALL 2
311 
312 /* return 0 if switching to a 16-bit selector */
switch_tss_ra(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip,uintptr_t retaddr)313 static int switch_tss_ra(CPUX86State *env, int tss_selector,
314                          uint32_t e1, uint32_t e2, int source,
315                          uint32_t next_eip, uintptr_t retaddr)
316 {
317     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
318     target_ulong tss_base;
319     uint32_t new_regs[8], new_segs[6];
320     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
321     uint32_t old_eflags, eflags_mask;
322     SegmentCache *dt;
323     int mmu_index, index;
324     target_ulong ptr;
325     X86Access old, new;
326 
327     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
328     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
329               source);
330 
331     /* if task gate, we read the TSS segment and we load it */
332     if (type == 5) {
333         if (!(e2 & DESC_P_MASK)) {
334             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
335         }
336         tss_selector = e1 >> 16;
337         if (tss_selector & 4) {
338             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
339         }
340         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
341             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
342         }
343         if (e2 & DESC_S_MASK) {
344             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
345         }
346         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
347         if ((type & 7) != 1) {
348             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
349         }
350     }
351 
352     if (!(e2 & DESC_P_MASK)) {
353         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
354     }
355 
356     if (type & 8) {
357         tss_limit_max = 103;
358     } else {
359         tss_limit_max = 43;
360     }
361     tss_limit = get_seg_limit(e1, e2);
362     tss_base = get_seg_base(e1, e2);
363     if ((tss_selector & 4) != 0 ||
364         tss_limit < tss_limit_max) {
365         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
366     }
367     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
368     if (old_type & 8) {
369         old_tss_limit_max = 103;
370     } else {
371         old_tss_limit_max = 43;
372     }
373 
374     /* new TSS must be busy iff the source is an IRET instruction  */
375     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
376         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
377     }
378 
379     /* X86Access avoids memory exceptions during the task switch */
380     mmu_index = cpu_mmu_index_kernel(env);
381     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
382                        MMU_DATA_STORE, mmu_index, retaddr);
383 
384     if (source == SWITCH_TSS_CALL) {
385         /* Probe for future write of parent task */
386         probe_access(env, tss_base, 2, MMU_DATA_STORE,
387                      mmu_index, retaddr);
388     }
389     /* While true tss_limit may be larger, we don't access the iopb here. */
390     access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
391                        MMU_DATA_LOAD, mmu_index, retaddr);
392 
393     /* save the current state in the old TSS */
394     old_eflags = cpu_compute_eflags(env);
395     if (old_type & 8) {
396         /* 32 bit */
397         access_stl(&old, env->tr.base + 0x20, next_eip);
398         access_stl(&old, env->tr.base + 0x24, old_eflags);
399         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
400         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
401         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
402         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
403         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
404         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
405         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
406         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
407         for (i = 0; i < 6; i++) {
408             access_stw(&old, env->tr.base + (0x48 + i * 4),
409                        env->segs[i].selector);
410         }
411     } else {
412         /* 16 bit */
413         access_stw(&old, env->tr.base + 0x0e, next_eip);
414         access_stw(&old, env->tr.base + 0x10, old_eflags);
415         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
416         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
417         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
418         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
419         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
420         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
421         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
422         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
423         for (i = 0; i < 4; i++) {
424             access_stw(&old, env->tr.base + (0x22 + i * 2),
425                        env->segs[i].selector);
426         }
427     }
428 
429     /* read all the registers from the new TSS */
430     if (type & 8) {
431         /* 32 bit */
432         new_cr3 = access_ldl(&new, tss_base + 0x1c);
433         new_eip = access_ldl(&new, tss_base + 0x20);
434         new_eflags = access_ldl(&new, tss_base + 0x24);
435         for (i = 0; i < 8; i++) {
436             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
437         }
438         for (i = 0; i < 6; i++) {
439             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
440         }
441         new_ldt = access_ldw(&new, tss_base + 0x60);
442         new_trap = access_ldl(&new, tss_base + 0x64);
443     } else {
444         /* 16 bit */
445         new_cr3 = 0;
446         new_eip = access_ldw(&new, tss_base + 0x0e);
447         new_eflags = access_ldw(&new, tss_base + 0x10);
448         for (i = 0; i < 8; i++) {
449             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
450         }
451         for (i = 0; i < 4; i++) {
452             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
453         }
454         new_ldt = access_ldw(&new, tss_base + 0x2a);
455         new_segs[R_FS] = 0;
456         new_segs[R_GS] = 0;
457         new_trap = 0;
458     }
459     /* XXX: avoid a compiler warning, see
460      http://support.amd.com/us/Processor_TechDocs/24593.pdf
461      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
462     (void)new_trap;
463 
464     /* clear busy bit (it is restartable) */
465     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
466         tss_set_busy(env, env->tr.selector, 0, retaddr);
467     }
468 
469     if (source == SWITCH_TSS_IRET) {
470         old_eflags &= ~NT_MASK;
471         if (old_type & 8) {
472             access_stl(&old, env->tr.base + 0x24, old_eflags);
473         } else {
474             access_stw(&old, env->tr.base + 0x10, old_eflags);
475 	}
476     }
477 
478     if (source == SWITCH_TSS_CALL) {
479         /*
480          * Thanks to the probe_access above, we know the first two
481          * bytes addressed by &new are writable too.
482          */
483         access_stw(&new, tss_base, env->tr.selector);
484         new_eflags |= NT_MASK;
485     }
486 
487     /* set busy bit */
488     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
489         tss_set_busy(env, tss_selector, 1, retaddr);
490     }
491 
492     /* set the new CPU state */
493 
494     /* now if an exception occurs, it will occur in the next task context */
495 
496     env->cr[0] |= CR0_TS_MASK;
497     env->hflags |= HF_TS_MASK;
498     env->tr.selector = tss_selector;
499     env->tr.base = tss_base;
500     env->tr.limit = tss_limit;
501     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
502 
503     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
504         cpu_x86_update_cr3(env, new_cr3);
505     }
506 
507     /* load all registers without an exception, then reload them with
508        possible exception */
509     env->eip = new_eip;
510     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
511         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
512     if (type & 8) {
513         cpu_load_eflags(env, new_eflags, eflags_mask);
514         for (i = 0; i < 8; i++) {
515             env->regs[i] = new_regs[i];
516         }
517     } else {
518         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
519         for (i = 0; i < 8; i++) {
520             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
521         }
522     }
523     if (new_eflags & VM_MASK) {
524         for (i = 0; i < 6; i++) {
525             load_seg_vm(env, i, new_segs[i]);
526         }
527     } else {
528         /* first just selectors as the rest may trigger exceptions */
529         for (i = 0; i < 6; i++) {
530             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
531         }
532     }
533 
534     env->ldt.selector = new_ldt & ~4;
535     env->ldt.base = 0;
536     env->ldt.limit = 0;
537     env->ldt.flags = 0;
538 
539     /* load the LDT */
540     if (new_ldt & 4) {
541         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
542     }
543 
544     if ((new_ldt & 0xfffc) != 0) {
545         dt = &env->gdt;
546         index = new_ldt & ~7;
547         if ((index + 7) > dt->limit) {
548             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
549         }
550         ptr = dt->base + index;
551         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
552         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
553         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
554             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
555         }
556         if (!(e2 & DESC_P_MASK)) {
557             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
558         }
559         load_seg_cache_raw_dt(&env->ldt, e1, e2);
560     }
561 
562     /* load the segments */
563     if (!(new_eflags & VM_MASK)) {
564         int cpl = new_segs[R_CS] & 3;
565         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
566         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
567         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
568         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
569         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
570         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
571     }
572 
573     /* check that env->eip is in the CS segment limits */
574     if (new_eip > env->segs[R_CS].limit) {
575         /* XXX: different exception if CALL? */
576         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
577     }
578 
579 #ifndef CONFIG_USER_ONLY
580     /* reset local breakpoints */
581     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
582         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
583     }
584 #endif
585     return type >> 3;
586 }
587 
switch_tss(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip)588 static int switch_tss(CPUX86State *env, int tss_selector,
589                       uint32_t e1, uint32_t e2, int source,
590                       uint32_t next_eip)
591 {
592     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
593 }
594 
get_sp_mask(unsigned int e2)595 static inline unsigned int get_sp_mask(unsigned int e2)
596 {
597 #ifdef TARGET_X86_64
598     if (e2 & DESC_L_MASK) {
599         return 0;
600     } else
601 #endif
602     if (e2 & DESC_B_MASK) {
603         return 0xffffffff;
604     } else {
605         return 0xffff;
606     }
607 }
608 
exception_is_fault(int intno)609 static int exception_is_fault(int intno)
610 {
611     switch (intno) {
612         /*
613          * #DB can be both fault- and trap-like, but it never sets RF=1
614          * in the RFLAGS value pushed on the stack.
615          */
616     case EXCP01_DB:
617     case EXCP03_INT3:
618     case EXCP04_INTO:
619     case EXCP08_DBLE:
620     case EXCP12_MCHK:
621         return 0;
622     }
623     /* Everything else including reserved exception is a fault.  */
624     return 1;
625 }
626 
exception_has_error_code(int intno)627 int exception_has_error_code(int intno)
628 {
629     switch (intno) {
630     case 8:
631     case 10:
632     case 11:
633     case 12:
634     case 13:
635     case 14:
636     case 17:
637         return 1;
638     }
639     return 0;
640 }
641 
642 /* protected mode interrupt */
do_interrupt_protected(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)643 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
644                                    int error_code, unsigned int next_eip,
645                                    int is_hw)
646 {
647     SegmentCache *dt;
648     target_ulong ptr;
649     int type, dpl, selector, ss_dpl, cpl;
650     int has_error_code, new_stack, shift;
651     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
652     uint32_t old_eip, eflags;
653     int vm86 = env->eflags & VM_MASK;
654     StackAccess sa;
655     bool set_rf;
656 
657     has_error_code = 0;
658     if (!is_int && !is_hw) {
659         has_error_code = exception_has_error_code(intno);
660     }
661     if (is_int) {
662         old_eip = next_eip;
663         set_rf = false;
664     } else {
665         old_eip = env->eip;
666         set_rf = exception_is_fault(intno);
667     }
668 
669     dt = &env->idt;
670     if (intno * 8 + 7 > dt->limit) {
671         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
672     }
673     ptr = dt->base + intno * 8;
674     e1 = cpu_ldl_kernel(env, ptr);
675     e2 = cpu_ldl_kernel(env, ptr + 4);
676     /* check gate type */
677     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
678     switch (type) {
679     case 5: /* task gate */
680     case 6: /* 286 interrupt gate */
681     case 7: /* 286 trap gate */
682     case 14: /* 386 interrupt gate */
683     case 15: /* 386 trap gate */
684         break;
685     default:
686         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
687         break;
688     }
689     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690     cpl = env->hflags & HF_CPL_MASK;
691     /* check privilege if software int */
692     if (is_int && dpl < cpl) {
693         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
694     }
695 
696     sa.env = env;
697     sa.ra = 0;
698 
699     if (type == 5) {
700         /* task gate */
701         /* must do that check here to return the correct error code */
702         if (!(e2 & DESC_P_MASK)) {
703             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
704         }
705         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
706         if (has_error_code) {
707             /* push the error code on the destination stack */
708             cpl = env->hflags & HF_CPL_MASK;
709             sa.mmu_index = x86_mmu_index_pl(env, cpl);
710             if (env->segs[R_SS].flags & DESC_B_MASK) {
711                 sa.sp_mask = 0xffffffff;
712             } else {
713                 sa.sp_mask = 0xffff;
714             }
715             sa.sp = env->regs[R_ESP];
716             sa.ss_base = env->segs[R_SS].base;
717             if (shift) {
718                 pushl(&sa, error_code);
719             } else {
720                 pushw(&sa, error_code);
721             }
722             SET_ESP(sa.sp, sa.sp_mask);
723         }
724         return;
725     }
726 
727     /* Otherwise, trap or interrupt gate */
728 
729     /* check valid bit */
730     if (!(e2 & DESC_P_MASK)) {
731         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
732     }
733     selector = e1 >> 16;
734     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
735     if ((selector & 0xfffc) == 0) {
736         raise_exception_err(env, EXCP0D_GPF, 0);
737     }
738     if (load_segment(env, &e1, &e2, selector) != 0) {
739         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
740     }
741     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
742         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
743     }
744     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
745     if (dpl > cpl) {
746         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
747     }
748     if (!(e2 & DESC_P_MASK)) {
749         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
750     }
751     if (e2 & DESC_C_MASK) {
752         dpl = cpl;
753     }
754     sa.mmu_index = x86_mmu_index_pl(env, dpl);
755     if (dpl < cpl) {
756         /* to inner privilege */
757         uint32_t esp;
758         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
759         if ((ss & 0xfffc) == 0) {
760             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
761         }
762         if ((ss & 3) != dpl) {
763             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
764         }
765         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
766             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
767         }
768         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
769         if (ss_dpl != dpl) {
770             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
771         }
772         if (!(ss_e2 & DESC_S_MASK) ||
773             (ss_e2 & DESC_CS_MASK) ||
774             !(ss_e2 & DESC_W_MASK)) {
775             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
776         }
777         if (!(ss_e2 & DESC_P_MASK)) {
778             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
779         }
780         new_stack = 1;
781         sa.sp = esp;
782         sa.sp_mask = get_sp_mask(ss_e2);
783         sa.ss_base = get_seg_base(ss_e1, ss_e2);
784     } else  {
785         /* to same privilege */
786         if (vm86) {
787             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
788         }
789         new_stack = 0;
790         sa.sp = env->regs[R_ESP];
791         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
792         sa.ss_base = env->segs[R_SS].base;
793     }
794 
795     shift = type >> 3;
796 
797 #if 0
798     /* XXX: check that enough room is available */
799     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
800     if (vm86) {
801         push_size += 8;
802     }
803     push_size <<= shift;
804 #endif
805     eflags = cpu_compute_eflags(env);
806     /*
807      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
808      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
809      */
810     if (set_rf) {
811         eflags |= RF_MASK;
812     }
813 
814     if (shift == 1) {
815         if (new_stack) {
816             if (vm86) {
817                 pushl(&sa, env->segs[R_GS].selector);
818                 pushl(&sa, env->segs[R_FS].selector);
819                 pushl(&sa, env->segs[R_DS].selector);
820                 pushl(&sa, env->segs[R_ES].selector);
821             }
822             pushl(&sa, env->segs[R_SS].selector);
823             pushl(&sa, env->regs[R_ESP]);
824         }
825         pushl(&sa, eflags);
826         pushl(&sa, env->segs[R_CS].selector);
827         pushl(&sa, old_eip);
828         if (has_error_code) {
829             pushl(&sa, error_code);
830         }
831     } else {
832         if (new_stack) {
833             if (vm86) {
834                 pushw(&sa, env->segs[R_GS].selector);
835                 pushw(&sa, env->segs[R_FS].selector);
836                 pushw(&sa, env->segs[R_DS].selector);
837                 pushw(&sa, env->segs[R_ES].selector);
838             }
839             pushw(&sa, env->segs[R_SS].selector);
840             pushw(&sa, env->regs[R_ESP]);
841         }
842         pushw(&sa, eflags);
843         pushw(&sa, env->segs[R_CS].selector);
844         pushw(&sa, old_eip);
845         if (has_error_code) {
846             pushw(&sa, error_code);
847         }
848     }
849 
850     /* interrupt gate clear IF mask */
851     if ((type & 1) == 0) {
852         env->eflags &= ~IF_MASK;
853     }
854     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
855 
856     if (new_stack) {
857         if (vm86) {
858             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
859             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
860             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
861             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
862         }
863         ss = (ss & ~3) | dpl;
864         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
865                                get_seg_limit(ss_e1, ss_e2), ss_e2);
866     }
867     SET_ESP(sa.sp, sa.sp_mask);
868 
869     selector = (selector & ~3) | dpl;
870     cpu_x86_load_seg_cache(env, R_CS, selector,
871                    get_seg_base(e1, e2),
872                    get_seg_limit(e1, e2),
873                    e2);
874     env->eip = offset;
875 }
876 
877 #ifdef TARGET_X86_64
878 
pushq(StackAccess * sa,uint64_t val)879 static void pushq(StackAccess *sa, uint64_t val)
880 {
881     sa->sp -= 8;
882     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
883 }
884 
popq(StackAccess * sa)885 static uint64_t popq(StackAccess *sa)
886 {
887     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
888     sa->sp += 8;
889     return ret;
890 }
891 
get_rsp_from_tss(CPUX86State * env,int level)892 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
893 {
894     X86CPU *cpu = env_archcpu(env);
895     int index, pg_mode;
896     target_ulong rsp;
897     int32_t sext;
898 
899 #if 0
900     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
901            env->tr.base, env->tr.limit);
902 #endif
903 
904     if (!(env->tr.flags & DESC_P_MASK)) {
905         cpu_abort(CPU(cpu), "invalid tss");
906     }
907     index = 8 * level + 4;
908     if ((index + 7) > env->tr.limit) {
909         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
910     }
911 
912     rsp = cpu_ldq_kernel(env, env->tr.base + index);
913 
914     /* test virtual address sign extension */
915     pg_mode = get_pg_mode(env);
916     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
917     if (sext != 0 && sext != -1) {
918         raise_exception_err(env, EXCP0C_STACK, 0);
919     }
920 
921     return rsp;
922 }
923 
924 /* 64 bit interrupt */
do_interrupt64(CPUX86State * env,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)925 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
926                            int error_code, target_ulong next_eip, int is_hw)
927 {
928     SegmentCache *dt;
929     target_ulong ptr;
930     int type, dpl, selector, cpl, ist;
931     int has_error_code, new_stack;
932     uint32_t e1, e2, e3, eflags;
933     target_ulong old_eip, offset;
934     bool set_rf;
935     StackAccess sa;
936 
937     has_error_code = 0;
938     if (!is_int && !is_hw) {
939         has_error_code = exception_has_error_code(intno);
940     }
941     if (is_int) {
942         old_eip = next_eip;
943         set_rf = false;
944     } else {
945         old_eip = env->eip;
946         set_rf = exception_is_fault(intno);
947     }
948 
949     dt = &env->idt;
950     if (intno * 16 + 15 > dt->limit) {
951         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
952     }
953     ptr = dt->base + intno * 16;
954     e1 = cpu_ldl_kernel(env, ptr);
955     e2 = cpu_ldl_kernel(env, ptr + 4);
956     e3 = cpu_ldl_kernel(env, ptr + 8);
957     /* check gate type */
958     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
959     switch (type) {
960     case 14: /* 386 interrupt gate */
961     case 15: /* 386 trap gate */
962         break;
963     default:
964         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
965         break;
966     }
967     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
968     cpl = env->hflags & HF_CPL_MASK;
969     /* check privilege if software int */
970     if (is_int && dpl < cpl) {
971         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
972     }
973     /* check valid bit */
974     if (!(e2 & DESC_P_MASK)) {
975         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
976     }
977     selector = e1 >> 16;
978     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
979     ist = e2 & 7;
980     if ((selector & 0xfffc) == 0) {
981         raise_exception_err(env, EXCP0D_GPF, 0);
982     }
983 
984     if (load_segment(env, &e1, &e2, selector) != 0) {
985         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
986     }
987     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
988         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
989     }
990     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
991     if (dpl > cpl) {
992         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
993     }
994     if (!(e2 & DESC_P_MASK)) {
995         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
996     }
997     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
998         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
999     }
1000     if (e2 & DESC_C_MASK) {
1001         dpl = cpl;
1002     }
1003 
1004     sa.env = env;
1005     sa.ra = 0;
1006     sa.mmu_index = x86_mmu_index_pl(env, dpl);
1007     sa.sp_mask = -1;
1008     sa.ss_base = 0;
1009     if (dpl < cpl || ist != 0) {
1010         /* to inner privilege */
1011         new_stack = 1;
1012         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1013     } else {
1014         /* to same privilege */
1015         if (env->eflags & VM_MASK) {
1016             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1017         }
1018         new_stack = 0;
1019         sa.sp = env->regs[R_ESP];
1020     }
1021     sa.sp &= ~0xfLL; /* align stack */
1022 
1023     /* See do_interrupt_protected.  */
1024     eflags = cpu_compute_eflags(env);
1025     if (set_rf) {
1026         eflags |= RF_MASK;
1027     }
1028 
1029     pushq(&sa, env->segs[R_SS].selector);
1030     pushq(&sa, env->regs[R_ESP]);
1031     pushq(&sa, eflags);
1032     pushq(&sa, env->segs[R_CS].selector);
1033     pushq(&sa, old_eip);
1034     if (has_error_code) {
1035         pushq(&sa, error_code);
1036     }
1037 
1038     /* interrupt gate clear IF mask */
1039     if ((type & 1) == 0) {
1040         env->eflags &= ~IF_MASK;
1041     }
1042     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1043 
1044     if (new_stack) {
1045         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1046         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1047     }
1048     env->regs[R_ESP] = sa.sp;
1049 
1050     selector = (selector & ~3) | dpl;
1051     cpu_x86_load_seg_cache(env, R_CS, selector,
1052                    get_seg_base(e1, e2),
1053                    get_seg_limit(e1, e2),
1054                    e2);
1055     env->eip = offset;
1056 }
1057 #endif /* TARGET_X86_64 */
1058 
helper_sysret(CPUX86State * env,int dflag)1059 void helper_sysret(CPUX86State *env, int dflag)
1060 {
1061     int cpl, selector;
1062 
1063     if (!(env->efer & MSR_EFER_SCE)) {
1064         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1065     }
1066     cpl = env->hflags & HF_CPL_MASK;
1067     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1068         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1069     }
1070     selector = (env->star >> 48) & 0xffff;
1071 #ifdef TARGET_X86_64
1072     if (env->hflags & HF_LMA_MASK) {
1073         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1074                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1075                         NT_MASK);
1076         if (dflag == 2) {
1077             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078                                    0, 0xffffffff,
1079                                    DESC_G_MASK | DESC_P_MASK |
1080                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082                                    DESC_L_MASK);
1083             env->eip = env->regs[R_ECX];
1084         } else {
1085             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086                                    0, 0xffffffff,
1087                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090             env->eip = (uint32_t)env->regs[R_ECX];
1091         }
1092         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1093                                0, 0xffffffff,
1094                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096                                DESC_W_MASK | DESC_A_MASK);
1097     } else
1098 #endif
1099     {
1100         env->eflags |= IF_MASK;
1101         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102                                0, 0xffffffff,
1103                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106         env->eip = (uint32_t)env->regs[R_ECX];
1107         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1108                                0, 0xffffffff,
1109                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111                                DESC_W_MASK | DESC_A_MASK);
1112     }
1113 }
1114 
1115 /* real mode interrupt */
do_interrupt_real(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip)1116 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1117                               int error_code, unsigned int next_eip)
1118 {
1119     SegmentCache *dt;
1120     target_ulong ptr;
1121     int selector;
1122     uint32_t offset;
1123     uint32_t old_cs, old_eip;
1124     StackAccess sa;
1125 
1126     /* real mode (simpler!) */
1127     dt = &env->idt;
1128     if (intno * 4 + 3 > dt->limit) {
1129         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1130     }
1131     ptr = dt->base + intno * 4;
1132     offset = cpu_lduw_kernel(env, ptr);
1133     selector = cpu_lduw_kernel(env, ptr + 2);
1134 
1135     sa.env = env;
1136     sa.ra = 0;
1137     sa.sp = env->regs[R_ESP];
1138     sa.sp_mask = 0xffff;
1139     sa.ss_base = env->segs[R_SS].base;
1140     sa.mmu_index = x86_mmu_index_pl(env, 0);
1141 
1142     if (is_int) {
1143         old_eip = next_eip;
1144     } else {
1145         old_eip = env->eip;
1146     }
1147     old_cs = env->segs[R_CS].selector;
1148     /* XXX: use SS segment size? */
1149     pushw(&sa, cpu_compute_eflags(env));
1150     pushw(&sa, old_cs);
1151     pushw(&sa, old_eip);
1152 
1153     /* update processor state */
1154     SET_ESP(sa.sp, sa.sp_mask);
1155     env->eip = offset;
1156     env->segs[R_CS].selector = selector;
1157     env->segs[R_CS].base = (selector << 4);
1158     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1159 }
1160 
1161 /*
1162  * Begin execution of an interruption. is_int is TRUE if coming from
1163  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1164  * instruction. It is only relevant if is_int is TRUE.
1165  */
do_interrupt_all(X86CPU * cpu,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1166 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1167                       int error_code, target_ulong next_eip, int is_hw)
1168 {
1169     CPUX86State *env = &cpu->env;
1170 
1171     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1172         if ((env->cr[0] & CR0_PE_MASK)) {
1173             static int count;
1174 
1175             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1176                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1177                      count, intno, error_code, is_int,
1178                      env->hflags & HF_CPL_MASK,
1179                      env->segs[R_CS].selector, env->eip,
1180                      (int)env->segs[R_CS].base + env->eip,
1181                      env->segs[R_SS].selector, env->regs[R_ESP]);
1182             if (intno == 0x0e) {
1183                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1184             } else {
1185                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1186             }
1187             qemu_log("\n");
1188             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1189 #if 0
1190             {
1191                 int i;
1192                 target_ulong ptr;
1193 
1194                 qemu_log("       code=");
1195                 ptr = env->segs[R_CS].base + env->eip;
1196                 for (i = 0; i < 16; i++) {
1197                     qemu_log(" %02x", ldub(ptr + i));
1198                 }
1199                 qemu_log("\n");
1200             }
1201 #endif
1202             count++;
1203         }
1204     }
1205     if (env->cr[0] & CR0_PE_MASK) {
1206 #if !defined(CONFIG_USER_ONLY)
1207         if (env->hflags & HF_GUEST_MASK) {
1208             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1209         }
1210 #endif
1211 #ifdef TARGET_X86_64
1212         if (env->hflags & HF_LMA_MASK) {
1213             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1214         } else
1215 #endif
1216         {
1217             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1218                                    is_hw);
1219         }
1220     } else {
1221 #if !defined(CONFIG_USER_ONLY)
1222         if (env->hflags & HF_GUEST_MASK) {
1223             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1224         }
1225 #endif
1226         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1227     }
1228 
1229 #if !defined(CONFIG_USER_ONLY)
1230     if (env->hflags & HF_GUEST_MASK) {
1231         CPUState *cs = CPU(cpu);
1232         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1233                                       offsetof(struct vmcb,
1234                                                control.event_inj));
1235 
1236         x86_stl_phys(cs,
1237                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1238                  event_inj & ~SVM_EVTINJ_VALID);
1239     }
1240 #endif
1241 }
1242 
do_interrupt_x86_hardirq(CPUX86State * env,int intno,int is_hw)1243 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1244 {
1245     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1246 }
1247 
helper_lldt(CPUX86State * env,int selector)1248 void helper_lldt(CPUX86State *env, int selector)
1249 {
1250     SegmentCache *dt;
1251     uint32_t e1, e2;
1252     int index, entry_limit;
1253     target_ulong ptr;
1254 
1255     selector &= 0xffff;
1256     if ((selector & 0xfffc) == 0) {
1257         /* XXX: NULL selector case: invalid LDT */
1258         env->ldt.base = 0;
1259         env->ldt.limit = 0;
1260     } else {
1261         if (selector & 0x4) {
1262             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1263         }
1264         dt = &env->gdt;
1265         index = selector & ~7;
1266 #ifdef TARGET_X86_64
1267         if (env->hflags & HF_LMA_MASK) {
1268             entry_limit = 15;
1269         } else
1270 #endif
1271         {
1272             entry_limit = 7;
1273         }
1274         if ((index + entry_limit) > dt->limit) {
1275             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1276         }
1277         ptr = dt->base + index;
1278         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1279         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1280         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1281             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1282         }
1283         if (!(e2 & DESC_P_MASK)) {
1284             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1285         }
1286 #ifdef TARGET_X86_64
1287         if (env->hflags & HF_LMA_MASK) {
1288             uint32_t e3;
1289 
1290             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1291             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1292             env->ldt.base |= (target_ulong)e3 << 32;
1293         } else
1294 #endif
1295         {
1296             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1297         }
1298     }
1299     env->ldt.selector = selector;
1300 }
1301 
helper_ltr(CPUX86State * env,int selector)1302 void helper_ltr(CPUX86State *env, int selector)
1303 {
1304     SegmentCache *dt;
1305     uint32_t e1, e2;
1306     int index, type, entry_limit;
1307     target_ulong ptr;
1308 
1309     selector &= 0xffff;
1310     if ((selector & 0xfffc) == 0) {
1311         /* NULL selector case: invalid TR */
1312         env->tr.base = 0;
1313         env->tr.limit = 0;
1314         env->tr.flags = 0;
1315     } else {
1316         if (selector & 0x4) {
1317             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1318         }
1319         dt = &env->gdt;
1320         index = selector & ~7;
1321 #ifdef TARGET_X86_64
1322         if (env->hflags & HF_LMA_MASK) {
1323             entry_limit = 15;
1324         } else
1325 #endif
1326         {
1327             entry_limit = 7;
1328         }
1329         if ((index + entry_limit) > dt->limit) {
1330             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1331         }
1332         ptr = dt->base + index;
1333         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1334         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1335         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1336         if ((e2 & DESC_S_MASK) ||
1337             (type != 1 && type != 9)) {
1338             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1339         }
1340         if (!(e2 & DESC_P_MASK)) {
1341             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1342         }
1343 #ifdef TARGET_X86_64
1344         if (env->hflags & HF_LMA_MASK) {
1345             uint32_t e3, e4;
1346 
1347             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1348             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1349             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1350                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1351             }
1352             load_seg_cache_raw_dt(&env->tr, e1, e2);
1353             env->tr.base |= (target_ulong)e3 << 32;
1354         } else
1355 #endif
1356         {
1357             load_seg_cache_raw_dt(&env->tr, e1, e2);
1358         }
1359         e2 |= DESC_TSS_BUSY_MASK;
1360         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1361     }
1362     env->tr.selector = selector;
1363 }
1364 
1365 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(CPUX86State * env,int seg_reg,int selector)1366 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1367 {
1368     uint32_t e1, e2;
1369     int cpl, dpl, rpl;
1370     SegmentCache *dt;
1371     int index;
1372     target_ulong ptr;
1373 
1374     selector &= 0xffff;
1375     cpl = env->hflags & HF_CPL_MASK;
1376     if ((selector & 0xfffc) == 0) {
1377         /* null selector case */
1378         if (seg_reg == R_SS
1379 #ifdef TARGET_X86_64
1380             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1381 #endif
1382             ) {
1383             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1384         }
1385         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1386     } else {
1387 
1388         if (selector & 0x4) {
1389             dt = &env->ldt;
1390         } else {
1391             dt = &env->gdt;
1392         }
1393         index = selector & ~7;
1394         if ((index + 7) > dt->limit) {
1395             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1396         }
1397         ptr = dt->base + index;
1398         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1399         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1400 
1401         if (!(e2 & DESC_S_MASK)) {
1402             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1403         }
1404         rpl = selector & 3;
1405         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1406         if (seg_reg == R_SS) {
1407             /* must be writable segment */
1408             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1409                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1410             }
1411             if (rpl != cpl || dpl != cpl) {
1412                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1413             }
1414         } else {
1415             /* must be readable segment */
1416             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1417                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1418             }
1419 
1420             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1421                 /* if not conforming code, test rights */
1422                 if (dpl < cpl || dpl < rpl) {
1423                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1424                 }
1425             }
1426         }
1427 
1428         if (!(e2 & DESC_P_MASK)) {
1429             if (seg_reg == R_SS) {
1430                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1431             } else {
1432                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1433             }
1434         }
1435 
1436         /* set the access bit if not already set */
1437         if (!(e2 & DESC_A_MASK)) {
1438             e2 |= DESC_A_MASK;
1439             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1440         }
1441 
1442         cpu_x86_load_seg_cache(env, seg_reg, selector,
1443                        get_seg_base(e1, e2),
1444                        get_seg_limit(e1, e2),
1445                        e2);
1446 #if 0
1447         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1448                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1449 #endif
1450     }
1451 }
1452 
1453 /* protected mode jump */
helper_ljmp_protected(CPUX86State * env,int new_cs,target_ulong new_eip,target_ulong next_eip)1454 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1455                            target_ulong next_eip)
1456 {
1457     int gate_cs, type;
1458     uint32_t e1, e2, cpl, dpl, rpl, limit;
1459 
1460     if ((new_cs & 0xfffc) == 0) {
1461         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1462     }
1463     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1464         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1465     }
1466     cpl = env->hflags & HF_CPL_MASK;
1467     if (e2 & DESC_S_MASK) {
1468         if (!(e2 & DESC_CS_MASK)) {
1469             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1470         }
1471         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1472         if (e2 & DESC_C_MASK) {
1473             /* conforming code segment */
1474             if (dpl > cpl) {
1475                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1476             }
1477         } else {
1478             /* non conforming code segment */
1479             rpl = new_cs & 3;
1480             if (rpl > cpl) {
1481                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1482             }
1483             if (dpl != cpl) {
1484                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1485             }
1486         }
1487         if (!(e2 & DESC_P_MASK)) {
1488             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1489         }
1490         limit = get_seg_limit(e1, e2);
1491         if (new_eip > limit &&
1492             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1493             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1494         }
1495         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1496                        get_seg_base(e1, e2), limit, e2);
1497         env->eip = new_eip;
1498     } else {
1499         /* jump to call or task gate */
1500         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1501         rpl = new_cs & 3;
1502         cpl = env->hflags & HF_CPL_MASK;
1503         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1504 
1505 #ifdef TARGET_X86_64
1506         if (env->efer & MSR_EFER_LMA) {
1507             if (type != 12) {
1508                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1509             }
1510         }
1511 #endif
1512         switch (type) {
1513         case 1: /* 286 TSS */
1514         case 9: /* 386 TSS */
1515         case 5: /* task gate */
1516             if (dpl < cpl || dpl < rpl) {
1517                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1518             }
1519             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1520             break;
1521         case 4: /* 286 call gate */
1522         case 12: /* 386 call gate */
1523             if ((dpl < cpl) || (dpl < rpl)) {
1524                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1525             }
1526             if (!(e2 & DESC_P_MASK)) {
1527                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1528             }
1529             gate_cs = e1 >> 16;
1530             new_eip = (e1 & 0xffff);
1531             if (type == 12) {
1532                 new_eip |= (e2 & 0xffff0000);
1533             }
1534 
1535 #ifdef TARGET_X86_64
1536             if (env->efer & MSR_EFER_LMA) {
1537                 /* load the upper 8 bytes of the 64-bit call gate */
1538                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1539                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1540                                            GETPC());
1541                 }
1542                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1543                 if (type != 0) {
1544                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1545                                            GETPC());
1546                 }
1547                 new_eip |= ((target_ulong)e1) << 32;
1548             }
1549 #endif
1550 
1551             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1552                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1553             }
1554             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1555             /* must be code segment */
1556             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1557                  (DESC_S_MASK | DESC_CS_MASK))) {
1558                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1559             }
1560             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1561                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1562                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1563             }
1564 #ifdef TARGET_X86_64
1565             if (env->efer & MSR_EFER_LMA) {
1566                 if (!(e2 & DESC_L_MASK)) {
1567                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1568                 }
1569                 if (e2 & DESC_B_MASK) {
1570                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1571                 }
1572             }
1573 #endif
1574             if (!(e2 & DESC_P_MASK)) {
1575                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1576             }
1577             limit = get_seg_limit(e1, e2);
1578             if (new_eip > limit &&
1579                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1580                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1581             }
1582             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1583                                    get_seg_base(e1, e2), limit, e2);
1584             env->eip = new_eip;
1585             break;
1586         default:
1587             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1588             break;
1589         }
1590     }
1591 }
1592 
1593 /* real mode call */
helper_lcall_real(CPUX86State * env,uint32_t new_cs,uint32_t new_eip,int shift,uint32_t next_eip)1594 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1595                        int shift, uint32_t next_eip)
1596 {
1597     StackAccess sa;
1598 
1599     sa.env = env;
1600     sa.ra = GETPC();
1601     sa.sp = env->regs[R_ESP];
1602     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1603     sa.ss_base = env->segs[R_SS].base;
1604     sa.mmu_index = x86_mmu_index_pl(env, 0);
1605 
1606     if (shift) {
1607         pushl(&sa, env->segs[R_CS].selector);
1608         pushl(&sa, next_eip);
1609     } else {
1610         pushw(&sa, env->segs[R_CS].selector);
1611         pushw(&sa, next_eip);
1612     }
1613 
1614     SET_ESP(sa.sp, sa.sp_mask);
1615     env->eip = new_eip;
1616     env->segs[R_CS].selector = new_cs;
1617     env->segs[R_CS].base = (new_cs << 4);
1618 }
1619 
1620 /* protected mode call */
helper_lcall_protected(CPUX86State * env,int new_cs,target_ulong new_eip,int shift,target_ulong next_eip)1621 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1622                             int shift, target_ulong next_eip)
1623 {
1624     int new_stack, i;
1625     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1626     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1627     uint32_t val, limit, old_sp_mask;
1628     target_ulong old_ssp, offset;
1629     StackAccess sa;
1630 
1631     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1632     LOG_PCALL_STATE(env_cpu(env));
1633     if ((new_cs & 0xfffc) == 0) {
1634         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1635     }
1636     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1637         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1638     }
1639     cpl = env->hflags & HF_CPL_MASK;
1640     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1641 
1642     sa.env = env;
1643     sa.ra = GETPC();
1644 
1645     if (e2 & DESC_S_MASK) {
1646         /* "normal" far call, no stack switch possible */
1647         if (!(e2 & DESC_CS_MASK)) {
1648             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1649         }
1650         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1651         if (e2 & DESC_C_MASK) {
1652             /* conforming code segment */
1653             if (dpl > cpl) {
1654                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655             }
1656         } else {
1657             /* non conforming code segment */
1658             rpl = new_cs & 3;
1659             if (rpl > cpl) {
1660                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1661             }
1662             if (dpl != cpl) {
1663                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1664             }
1665         }
1666         if (!(e2 & DESC_P_MASK)) {
1667             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1668         }
1669 
1670         sa.mmu_index = x86_mmu_index_pl(env, cpl);
1671 #ifdef TARGET_X86_64
1672         /* XXX: check 16/32 bit cases in long mode */
1673         if (shift == 2) {
1674             /* 64 bit case */
1675             sa.sp = env->regs[R_ESP];
1676             sa.sp_mask = -1;
1677             sa.ss_base = 0;
1678             pushq(&sa, env->segs[R_CS].selector);
1679             pushq(&sa, next_eip);
1680             /* from this point, not restartable */
1681             env->regs[R_ESP] = sa.sp;
1682             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1683                                    get_seg_base(e1, e2),
1684                                    get_seg_limit(e1, e2), e2);
1685             env->eip = new_eip;
1686         } else
1687 #endif
1688         {
1689             sa.sp = env->regs[R_ESP];
1690             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1691             sa.ss_base = env->segs[R_SS].base;
1692             if (shift) {
1693                 pushl(&sa, env->segs[R_CS].selector);
1694                 pushl(&sa, next_eip);
1695             } else {
1696                 pushw(&sa, env->segs[R_CS].selector);
1697                 pushw(&sa, next_eip);
1698             }
1699 
1700             limit = get_seg_limit(e1, e2);
1701             if (new_eip > limit) {
1702                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1703             }
1704             /* from this point, not restartable */
1705             SET_ESP(sa.sp, sa.sp_mask);
1706             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1707                                    get_seg_base(e1, e2), limit, e2);
1708             env->eip = new_eip;
1709         }
1710     } else {
1711         /* check gate type */
1712         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1713         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1714         rpl = new_cs & 3;
1715 
1716 #ifdef TARGET_X86_64
1717         if (env->efer & MSR_EFER_LMA) {
1718             if (type != 12) {
1719                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1720             }
1721         }
1722 #endif
1723 
1724         switch (type) {
1725         case 1: /* available 286 TSS */
1726         case 9: /* available 386 TSS */
1727         case 5: /* task gate */
1728             if (dpl < cpl || dpl < rpl) {
1729                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1730             }
1731             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1732             return;
1733         case 4: /* 286 call gate */
1734         case 12: /* 386 call gate */
1735             break;
1736         default:
1737             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1738             break;
1739         }
1740         shift = type >> 3;
1741 
1742         if (dpl < cpl || dpl < rpl) {
1743             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1744         }
1745         /* check valid bit */
1746         if (!(e2 & DESC_P_MASK)) {
1747             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1748         }
1749         selector = e1 >> 16;
1750         param_count = e2 & 0x1f;
1751         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1752 #ifdef TARGET_X86_64
1753         if (env->efer & MSR_EFER_LMA) {
1754             /* load the upper 8 bytes of the 64-bit call gate */
1755             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1756                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1757                                        GETPC());
1758             }
1759             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1760             if (type != 0) {
1761                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1762                                        GETPC());
1763             }
1764             offset |= ((target_ulong)e1) << 32;
1765         }
1766 #endif
1767         if ((selector & 0xfffc) == 0) {
1768             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1769         }
1770 
1771         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1772             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1773         }
1774         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1775             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1776         }
1777         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1778         if (dpl > cpl) {
1779             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1780         }
1781 #ifdef TARGET_X86_64
1782         if (env->efer & MSR_EFER_LMA) {
1783             if (!(e2 & DESC_L_MASK)) {
1784                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1785             }
1786             if (e2 & DESC_B_MASK) {
1787                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1788             }
1789             shift++;
1790         }
1791 #endif
1792         if (!(e2 & DESC_P_MASK)) {
1793             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1794         }
1795 
1796         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1797             /* to inner privilege */
1798             sa.mmu_index = x86_mmu_index_pl(env, dpl);
1799 #ifdef TARGET_X86_64
1800             if (shift == 2) {
1801                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1802                 new_stack = 1;
1803                 sa.sp = get_rsp_from_tss(env, dpl);
1804                 sa.sp_mask = -1;
1805                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1806                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1807                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1808             } else
1809 #endif
1810             {
1811                 uint32_t sp32;
1812                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1813                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1814                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1815                           env->regs[R_ESP]);
1816                 if ((ss & 0xfffc) == 0) {
1817                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1818                 }
1819                 if ((ss & 3) != dpl) {
1820                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1821                 }
1822                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1823                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1824                 }
1825                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1826                 if (ss_dpl != dpl) {
1827                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1828                 }
1829                 if (!(ss_e2 & DESC_S_MASK) ||
1830                     (ss_e2 & DESC_CS_MASK) ||
1831                     !(ss_e2 & DESC_W_MASK)) {
1832                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1833                 }
1834                 if (!(ss_e2 & DESC_P_MASK)) {
1835                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1836                 }
1837 
1838                 sa.sp = sp32;
1839                 sa.sp_mask = get_sp_mask(ss_e2);
1840                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1841             }
1842 
1843             /* push_size = ((param_count * 2) + 8) << shift; */
1844             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1845             old_ssp = env->segs[R_SS].base;
1846 
1847 #ifdef TARGET_X86_64
1848             if (shift == 2) {
1849                 /* XXX: verify if new stack address is canonical */
1850                 pushq(&sa, env->segs[R_SS].selector);
1851                 pushq(&sa, env->regs[R_ESP]);
1852                 /* parameters aren't supported for 64-bit call gates */
1853             } else
1854 #endif
1855             if (shift == 1) {
1856                 pushl(&sa, env->segs[R_SS].selector);
1857                 pushl(&sa, env->regs[R_ESP]);
1858                 for (i = param_count - 1; i >= 0; i--) {
1859                     val = cpu_ldl_data_ra(env,
1860                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1861                                           GETPC());
1862                     pushl(&sa, val);
1863                 }
1864             } else {
1865                 pushw(&sa, env->segs[R_SS].selector);
1866                 pushw(&sa, env->regs[R_ESP]);
1867                 for (i = param_count - 1; i >= 0; i--) {
1868                     val = cpu_lduw_data_ra(env,
1869                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1870                                            GETPC());
1871                     pushw(&sa, val);
1872                 }
1873             }
1874             new_stack = 1;
1875         } else {
1876             /* to same privilege */
1877             sa.mmu_index = x86_mmu_index_pl(env, cpl);
1878             sa.sp = env->regs[R_ESP];
1879             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1880             sa.ss_base = env->segs[R_SS].base;
1881             /* push_size = (4 << shift); */
1882             new_stack = 0;
1883         }
1884 
1885 #ifdef TARGET_X86_64
1886         if (shift == 2) {
1887             pushq(&sa, env->segs[R_CS].selector);
1888             pushq(&sa, next_eip);
1889         } else
1890 #endif
1891         if (shift == 1) {
1892             pushl(&sa, env->segs[R_CS].selector);
1893             pushl(&sa, next_eip);
1894         } else {
1895             pushw(&sa, env->segs[R_CS].selector);
1896             pushw(&sa, next_eip);
1897         }
1898 
1899         /* from this point, not restartable */
1900 
1901         if (new_stack) {
1902 #ifdef TARGET_X86_64
1903             if (shift == 2) {
1904                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1905             } else
1906 #endif
1907             {
1908                 ss = (ss & ~3) | dpl;
1909                 cpu_x86_load_seg_cache(env, R_SS, ss,
1910                                        sa.ss_base,
1911                                        get_seg_limit(ss_e1, ss_e2),
1912                                        ss_e2);
1913             }
1914         }
1915 
1916         selector = (selector & ~3) | dpl;
1917         cpu_x86_load_seg_cache(env, R_CS, selector,
1918                        get_seg_base(e1, e2),
1919                        get_seg_limit(e1, e2),
1920                        e2);
1921         SET_ESP(sa.sp, sa.sp_mask);
1922         env->eip = offset;
1923     }
1924 }
1925 
1926 /* real and vm86 mode iret */
helper_iret_real(CPUX86State * env,int shift)1927 void helper_iret_real(CPUX86State *env, int shift)
1928 {
1929     uint32_t new_cs, new_eip, new_eflags;
1930     int eflags_mask;
1931     StackAccess sa;
1932 
1933     sa.env = env;
1934     sa.ra = GETPC();
1935     sa.mmu_index = x86_mmu_index_pl(env, 0);
1936     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1937     sa.sp = env->regs[R_ESP];
1938     sa.ss_base = env->segs[R_SS].base;
1939 
1940     if (shift == 1) {
1941         /* 32 bits */
1942         new_eip = popl(&sa);
1943         new_cs = popl(&sa) & 0xffff;
1944         new_eflags = popl(&sa);
1945     } else {
1946         /* 16 bits */
1947         new_eip = popw(&sa);
1948         new_cs = popw(&sa);
1949         new_eflags = popw(&sa);
1950     }
1951     SET_ESP(sa.sp, sa.sp_mask);
1952     env->segs[R_CS].selector = new_cs;
1953     env->segs[R_CS].base = (new_cs << 4);
1954     env->eip = new_eip;
1955     if (env->eflags & VM_MASK) {
1956         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1957             NT_MASK;
1958     } else {
1959         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1960             RF_MASK | NT_MASK;
1961     }
1962     if (shift == 0) {
1963         eflags_mask &= 0xffff;
1964     }
1965     cpu_load_eflags(env, new_eflags, eflags_mask);
1966     env->hflags2 &= ~HF2_NMI_MASK;
1967 }
1968 
validate_seg(CPUX86State * env,X86Seg seg_reg,int cpl)1969 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1970 {
1971     int dpl;
1972     uint32_t e2;
1973 
1974     /* XXX: on x86_64, we do not want to nullify FS and GS because
1975        they may still contain a valid base. I would be interested to
1976        know how a real x86_64 CPU behaves */
1977     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1978         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1979         return;
1980     }
1981 
1982     e2 = env->segs[seg_reg].flags;
1983     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1984     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1985         /* data or non conforming code segment */
1986         if (dpl < cpl) {
1987             cpu_x86_load_seg_cache(env, seg_reg, 0,
1988                                    env->segs[seg_reg].base,
1989                                    env->segs[seg_reg].limit,
1990                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1991         }
1992     }
1993 }
1994 
1995 /* protected mode iret */
helper_ret_protected(CPUX86State * env,int shift,int is_iret,int addend,uintptr_t retaddr)1996 static inline void helper_ret_protected(CPUX86State *env, int shift,
1997                                         int is_iret, int addend,
1998                                         uintptr_t retaddr)
1999 {
2000     uint32_t new_cs, new_eflags, new_ss;
2001     uint32_t new_es, new_ds, new_fs, new_gs;
2002     uint32_t e1, e2, ss_e1, ss_e2;
2003     int cpl, dpl, rpl, eflags_mask, iopl;
2004     target_ulong new_eip, new_esp;
2005     StackAccess sa;
2006 
2007     cpl = env->hflags & HF_CPL_MASK;
2008 
2009     sa.env = env;
2010     sa.ra = retaddr;
2011     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2012 
2013 #ifdef TARGET_X86_64
2014     if (shift == 2) {
2015         sa.sp_mask = -1;
2016     } else
2017 #endif
2018     {
2019         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2020     }
2021     sa.sp = env->regs[R_ESP];
2022     sa.ss_base = env->segs[R_SS].base;
2023     new_eflags = 0; /* avoid warning */
2024 #ifdef TARGET_X86_64
2025     if (shift == 2) {
2026         new_eip = popq(&sa);
2027         new_cs = popq(&sa) & 0xffff;
2028         if (is_iret) {
2029             new_eflags = popq(&sa);
2030         }
2031     } else
2032 #endif
2033     {
2034         if (shift == 1) {
2035             /* 32 bits */
2036             new_eip = popl(&sa);
2037             new_cs = popl(&sa) & 0xffff;
2038             if (is_iret) {
2039                 new_eflags = popl(&sa);
2040                 if (new_eflags & VM_MASK) {
2041                     goto return_to_vm86;
2042                 }
2043             }
2044         } else {
2045             /* 16 bits */
2046             new_eip = popw(&sa);
2047             new_cs = popw(&sa);
2048             if (is_iret) {
2049                 new_eflags = popw(&sa);
2050             }
2051         }
2052     }
2053     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2054               new_cs, new_eip, shift, addend);
2055     LOG_PCALL_STATE(env_cpu(env));
2056     if ((new_cs & 0xfffc) == 0) {
2057         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2058     }
2059     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2060         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2061     }
2062     if (!(e2 & DESC_S_MASK) ||
2063         !(e2 & DESC_CS_MASK)) {
2064         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2065     }
2066     rpl = new_cs & 3;
2067     if (rpl < cpl) {
2068         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2069     }
2070     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2071     if (e2 & DESC_C_MASK) {
2072         if (dpl > rpl) {
2073             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2074         }
2075     } else {
2076         if (dpl != rpl) {
2077             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2078         }
2079     }
2080     if (!(e2 & DESC_P_MASK)) {
2081         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2082     }
2083 
2084     sa.sp += addend;
2085     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2086                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2087         /* return to same privilege level */
2088         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2089                        get_seg_base(e1, e2),
2090                        get_seg_limit(e1, e2),
2091                        e2);
2092     } else {
2093         /* return to different privilege level */
2094 #ifdef TARGET_X86_64
2095         if (shift == 2) {
2096             new_esp = popq(&sa);
2097             new_ss = popq(&sa) & 0xffff;
2098         } else
2099 #endif
2100         {
2101             if (shift == 1) {
2102                 /* 32 bits */
2103                 new_esp = popl(&sa);
2104                 new_ss = popl(&sa) & 0xffff;
2105             } else {
2106                 /* 16 bits */
2107                 new_esp = popw(&sa);
2108                 new_ss = popw(&sa);
2109             }
2110         }
2111         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2112                   new_ss, new_esp);
2113         if ((new_ss & 0xfffc) == 0) {
2114 #ifdef TARGET_X86_64
2115             /* NULL ss is allowed in long mode if cpl != 3 */
2116             /* XXX: test CS64? */
2117             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2118                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2119                                        0, 0xffffffff,
2120                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2121                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2122                                        DESC_W_MASK | DESC_A_MASK);
2123                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2124             } else
2125 #endif
2126             {
2127                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2128             }
2129         } else {
2130             if ((new_ss & 3) != rpl) {
2131                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2132             }
2133             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2134                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2135             }
2136             if (!(ss_e2 & DESC_S_MASK) ||
2137                 (ss_e2 & DESC_CS_MASK) ||
2138                 !(ss_e2 & DESC_W_MASK)) {
2139                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2140             }
2141             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2142             if (dpl != rpl) {
2143                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2144             }
2145             if (!(ss_e2 & DESC_P_MASK)) {
2146                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2147             }
2148             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2149                                    get_seg_base(ss_e1, ss_e2),
2150                                    get_seg_limit(ss_e1, ss_e2),
2151                                    ss_e2);
2152         }
2153 
2154         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2155                        get_seg_base(e1, e2),
2156                        get_seg_limit(e1, e2),
2157                        e2);
2158         sa.sp = new_esp;
2159 #ifdef TARGET_X86_64
2160         if (env->hflags & HF_CS64_MASK) {
2161             sa.sp_mask = -1;
2162         } else
2163 #endif
2164         {
2165             sa.sp_mask = get_sp_mask(ss_e2);
2166         }
2167 
2168         /* validate data segments */
2169         validate_seg(env, R_ES, rpl);
2170         validate_seg(env, R_DS, rpl);
2171         validate_seg(env, R_FS, rpl);
2172         validate_seg(env, R_GS, rpl);
2173 
2174         sa.sp += addend;
2175     }
2176     SET_ESP(sa.sp, sa.sp_mask);
2177     env->eip = new_eip;
2178     if (is_iret) {
2179         /* NOTE: 'cpl' is the _old_ CPL */
2180         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2181         if (cpl == 0) {
2182             eflags_mask |= IOPL_MASK;
2183         }
2184         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2185         if (cpl <= iopl) {
2186             eflags_mask |= IF_MASK;
2187         }
2188         if (shift == 0) {
2189             eflags_mask &= 0xffff;
2190         }
2191         cpu_load_eflags(env, new_eflags, eflags_mask);
2192     }
2193     return;
2194 
2195  return_to_vm86:
2196     new_esp = popl(&sa);
2197     new_ss = popl(&sa);
2198     new_es = popl(&sa);
2199     new_ds = popl(&sa);
2200     new_fs = popl(&sa);
2201     new_gs = popl(&sa);
2202 
2203     /* modify processor state */
2204     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2205                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2206                     VIP_MASK);
2207     load_seg_vm(env, R_CS, new_cs & 0xffff);
2208     load_seg_vm(env, R_SS, new_ss & 0xffff);
2209     load_seg_vm(env, R_ES, new_es & 0xffff);
2210     load_seg_vm(env, R_DS, new_ds & 0xffff);
2211     load_seg_vm(env, R_FS, new_fs & 0xffff);
2212     load_seg_vm(env, R_GS, new_gs & 0xffff);
2213 
2214     env->eip = new_eip & 0xffff;
2215     env->regs[R_ESP] = new_esp;
2216 }
2217 
helper_iret_protected(CPUX86State * env,int shift,int next_eip)2218 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2219 {
2220     int tss_selector, type;
2221     uint32_t e1, e2;
2222 
2223     /* specific case for TSS */
2224     if (env->eflags & NT_MASK) {
2225 #ifdef TARGET_X86_64
2226         if (env->hflags & HF_LMA_MASK) {
2227             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2228         }
2229 #endif
2230         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2231         if (tss_selector & 4) {
2232             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2233         }
2234         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2235             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2236         }
2237         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2238         /* NOTE: we check both segment and busy TSS */
2239         if (type != 3) {
2240             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2241         }
2242         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2243     } else {
2244         helper_ret_protected(env, shift, 1, 0, GETPC());
2245     }
2246     env->hflags2 &= ~HF2_NMI_MASK;
2247 }
2248 
helper_lret_protected(CPUX86State * env,int shift,int addend)2249 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2250 {
2251     helper_ret_protected(env, shift, 0, addend, GETPC());
2252 }
2253 
helper_sysenter(CPUX86State * env)2254 void helper_sysenter(CPUX86State *env)
2255 {
2256     if (env->sysenter_cs == 0) {
2257         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2258     }
2259     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2260 
2261 #ifdef TARGET_X86_64
2262     if (env->hflags & HF_LMA_MASK) {
2263         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2264                                0, 0xffffffff,
2265                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2266                                DESC_S_MASK |
2267                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2268                                DESC_L_MASK);
2269     } else
2270 #endif
2271     {
2272         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2273                                0, 0xffffffff,
2274                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2275                                DESC_S_MASK |
2276                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2277     }
2278     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2279                            0, 0xffffffff,
2280                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2281                            DESC_S_MASK |
2282                            DESC_W_MASK | DESC_A_MASK);
2283     env->regs[R_ESP] = env->sysenter_esp;
2284     env->eip = env->sysenter_eip;
2285 }
2286 
helper_sysexit(CPUX86State * env,int dflag)2287 void helper_sysexit(CPUX86State *env, int dflag)
2288 {
2289     int cpl;
2290 
2291     cpl = env->hflags & HF_CPL_MASK;
2292     if (env->sysenter_cs == 0 || cpl != 0) {
2293         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2294     }
2295 #ifdef TARGET_X86_64
2296     if (dflag == 2) {
2297         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2298                                3, 0, 0xffffffff,
2299                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2300                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2301                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2302                                DESC_L_MASK);
2303         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2304                                3, 0, 0xffffffff,
2305                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2306                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2307                                DESC_W_MASK | DESC_A_MASK);
2308     } else
2309 #endif
2310     {
2311         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2312                                3, 0, 0xffffffff,
2313                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2315                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2316         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2317                                3, 0, 0xffffffff,
2318                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2319                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2320                                DESC_W_MASK | DESC_A_MASK);
2321     }
2322     env->regs[R_ESP] = env->regs[R_ECX];
2323     env->eip = env->regs[R_EDX];
2324 }
2325 
helper_lsl(CPUX86State * env,target_ulong selector1)2326 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2327 {
2328     unsigned int limit;
2329     uint32_t e1, e2, selector;
2330     int rpl, dpl, cpl, type;
2331 
2332     selector = selector1 & 0xffff;
2333     assert(CC_OP == CC_OP_EFLAGS);
2334     if ((selector & 0xfffc) == 0) {
2335         goto fail;
2336     }
2337     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2338         goto fail;
2339     }
2340     rpl = selector & 3;
2341     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2342     cpl = env->hflags & HF_CPL_MASK;
2343     if (e2 & DESC_S_MASK) {
2344         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2345             /* conforming */
2346         } else {
2347             if (dpl < cpl || dpl < rpl) {
2348                 goto fail;
2349             }
2350         }
2351     } else {
2352         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2353         switch (type) {
2354         case 1:
2355         case 2:
2356         case 3:
2357         case 9:
2358         case 11:
2359             break;
2360         default:
2361             goto fail;
2362         }
2363         if (dpl < cpl || dpl < rpl) {
2364         fail:
2365             CC_SRC &= ~CC_Z;
2366             return 0;
2367         }
2368     }
2369     limit = get_seg_limit(e1, e2);
2370     CC_SRC |= CC_Z;
2371     return limit;
2372 }
2373 
helper_lar(CPUX86State * env,target_ulong selector1)2374 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2375 {
2376     uint32_t e1, e2, selector;
2377     int rpl, dpl, cpl, type;
2378 
2379     selector = selector1 & 0xffff;
2380     assert(CC_OP == CC_OP_EFLAGS);
2381     if ((selector & 0xfffc) == 0) {
2382         goto fail;
2383     }
2384     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2385         goto fail;
2386     }
2387     rpl = selector & 3;
2388     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2389     cpl = env->hflags & HF_CPL_MASK;
2390     if (e2 & DESC_S_MASK) {
2391         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2392             /* conforming */
2393         } else {
2394             if (dpl < cpl || dpl < rpl) {
2395                 goto fail;
2396             }
2397         }
2398     } else {
2399         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2400         switch (type) {
2401         case 1:
2402         case 2:
2403         case 3:
2404         case 4:
2405         case 5:
2406         case 9:
2407         case 11:
2408         case 12:
2409             break;
2410         default:
2411             goto fail;
2412         }
2413         if (dpl < cpl || dpl < rpl) {
2414         fail:
2415             CC_SRC &= ~CC_Z;
2416             return 0;
2417         }
2418     }
2419     CC_SRC |= CC_Z;
2420     return e2 & 0x00f0ff00;
2421 }
2422 
helper_verr(CPUX86State * env,target_ulong selector1)2423 void helper_verr(CPUX86State *env, target_ulong selector1)
2424 {
2425     uint32_t e1, e2, eflags, selector;
2426     int rpl, dpl, cpl;
2427 
2428     selector = selector1 & 0xffff;
2429     eflags = cpu_cc_compute_all(env) | CC_Z;
2430     if ((selector & 0xfffc) == 0) {
2431         goto fail;
2432     }
2433     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2434         goto fail;
2435     }
2436     if (!(e2 & DESC_S_MASK)) {
2437         goto fail;
2438     }
2439     rpl = selector & 3;
2440     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2441     cpl = env->hflags & HF_CPL_MASK;
2442     if (e2 & DESC_CS_MASK) {
2443         if (!(e2 & DESC_R_MASK)) {
2444             goto fail;
2445         }
2446         if (!(e2 & DESC_C_MASK)) {
2447             if (dpl < cpl || dpl < rpl) {
2448                 goto fail;
2449             }
2450         }
2451     } else {
2452         if (dpl < cpl || dpl < rpl) {
2453         fail:
2454             eflags &= ~CC_Z;
2455         }
2456     }
2457     CC_SRC = eflags;
2458     CC_OP = CC_OP_EFLAGS;
2459 }
2460 
helper_verw(CPUX86State * env,target_ulong selector1)2461 void helper_verw(CPUX86State *env, target_ulong selector1)
2462 {
2463     uint32_t e1, e2, eflags, selector;
2464     int rpl, dpl, cpl;
2465 
2466     selector = selector1 & 0xffff;
2467     eflags = cpu_cc_compute_all(env) | CC_Z;
2468     if ((selector & 0xfffc) == 0) {
2469         goto fail;
2470     }
2471     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2472         goto fail;
2473     }
2474     if (!(e2 & DESC_S_MASK)) {
2475         goto fail;
2476     }
2477     rpl = selector & 3;
2478     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2479     cpl = env->hflags & HF_CPL_MASK;
2480     if (e2 & DESC_CS_MASK) {
2481         goto fail;
2482     } else {
2483         if (dpl < cpl || dpl < rpl) {
2484             goto fail;
2485         }
2486         if (!(e2 & DESC_W_MASK)) {
2487         fail:
2488             eflags &= ~CC_Z;
2489         }
2490     }
2491     CC_SRC = eflags;
2492     CC_OP = CC_OP_EFLAGS;
2493 }
2494