xref: /openbmc/qemu/target/i386/hvf/x86.c (revision 13b5ae94)
1  /*
2   * Copyright (C) 2016 Veertu Inc,
3   * Copyright (C) 2017 Google Inc,
4   *
5   * This program is free software; you can redistribute it and/or
6   * modify it under the terms of the GNU Lesser General Public
7   * License as published by the Free Software Foundation; either
8   * version 2.1 of the License, or (at your option) any later version.
9   *
10   * This program is distributed in the hope that it will be useful,
11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   * Lesser General Public License for more details.
14   *
15   * You should have received a copy of the GNU Lesser General Public
16   * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17   */
18  
19  #include "qemu/osdep.h"
20  
21  #include "cpu.h"
22  #include "x86_decode.h"
23  #include "x86_emu.h"
24  #include "vmcs.h"
25  #include "vmx.h"
26  #include "x86_mmu.h"
27  #include "x86_descr.h"
28  
29  /* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
30  {
31     uint32_t ar;
32  
33     if (!var->p) {
34         ar = 1 << 16;
35         return ar;
36     }
37  
38     ar = var->type & 15;
39     ar |= (var->s & 1) << 4;
40     ar |= (var->dpl & 3) << 5;
41     ar |= (var->p & 1) << 7;
42     ar |= (var->avl & 1) << 12;
43     ar |= (var->l & 1) << 13;
44     ar |= (var->db & 1) << 14;
45     ar |= (var->g & 1) << 15;
46     return ar;
47  }*/
48  
49  bool x86_read_segment_descriptor(CPUState *cpu,
50                                   struct x86_segment_descriptor *desc,
51                                   x68_segment_selector sel)
52  {
53      target_ulong base;
54      uint32_t limit;
55  
56      memset(desc, 0, sizeof(*desc));
57  
58      /* valid gdt descriptors start from index 1 */
59      if (!sel.index && GDT_SEL == sel.ti) {
60          return false;
61      }
62  
63      if (GDT_SEL == sel.ti) {
64          base  = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE);
65          limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT);
66      } else {
67          base  = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE);
68          limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT);
69      }
70  
71      if (sel.index * 8 >= limit) {
72          return false;
73      }
74  
75      vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
76      return true;
77  }
78  
79  bool x86_write_segment_descriptor(CPUState *cpu,
80                                    struct x86_segment_descriptor *desc,
81                                    x68_segment_selector sel)
82  {
83      target_ulong base;
84      uint32_t limit;
85  
86      if (GDT_SEL == sel.ti) {
87          base  = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE);
88          limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT);
89      } else {
90          base  = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE);
91          limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT);
92      }
93  
94      if (sel.index * 8 >= limit) {
95          printf("%s: gdt limit\n", __func__);
96          return false;
97      }
98      vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
99      return true;
100  }
101  
102  bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
103                          int gate)
104  {
105      target_ulong base  = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_BASE);
106      uint32_t limit = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_LIMIT);
107  
108      memset(idt_desc, 0, sizeof(*idt_desc));
109      if (gate * 8 >= limit) {
110          printf("%s: idt limit\n", __func__);
111          return false;
112      }
113  
114      vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
115      return true;
116  }
117  
118  bool x86_is_protected(CPUState *cpu)
119  {
120      uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0);
121      return cr0 & CR0_PE_MASK;
122  }
123  
124  bool x86_is_real(CPUState *cpu)
125  {
126      return !x86_is_protected(cpu);
127  }
128  
129  bool x86_is_v8086(CPUState *cpu)
130  {
131      X86CPU *x86_cpu = X86_CPU(cpu);
132      CPUX86State *env = &x86_cpu->env;
133      return x86_is_protected(cpu) && (env->eflags & VM_MASK);
134  }
135  
136  bool x86_is_long_mode(CPUState *cpu)
137  {
138      return rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
139  }
140  
141  bool x86_is_long64_mode(CPUState *cpu)
142  {
143      struct vmx_segment desc;
144      vmx_read_segment_descriptor(cpu, &desc, R_CS);
145  
146      return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
147  }
148  
149  bool x86_is_paging_mode(CPUState *cpu)
150  {
151      uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0);
152      return cr0 & CR0_PG_MASK;
153  }
154  
155  bool x86_is_pae_enabled(CPUState *cpu)
156  {
157      uint64_t cr4 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4);
158      return cr4 & CR4_PAE_MASK;
159  }
160  
161  target_ulong linear_addr(CPUState *cpu, target_ulong addr, X86Seg seg)
162  {
163      return vmx_read_segment_base(cpu, seg) + addr;
164  }
165  
166  target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size,
167                                X86Seg seg)
168  {
169      switch (size) {
170      case 2:
171          addr = (uint16_t)addr;
172          break;
173      case 4:
174          addr = (uint32_t)addr;
175          break;
176      default:
177          break;
178      }
179      return linear_addr(cpu, addr, seg);
180  }
181  
182  target_ulong linear_rip(CPUState *cpu, target_ulong rip)
183  {
184      return linear_addr(cpu, rip, R_CS);
185  }
186