xref: /openbmc/qemu/hw/i386/intel_iommu.c (revision 1f7685fa)
1 /*
2  * QEMU emulation of an Intel IOMMU (VT-d)
3  *   (DMA Remapping device)
4  *
5  * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6  * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12 
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17 
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "qemu/osdep.h"
23 #include "hw/sysbus.h"
24 #include "exec/address-spaces.h"
25 #include "intel_iommu_internal.h"
26 #include "hw/pci/pci.h"
27 
28 /*#define DEBUG_INTEL_IOMMU*/
29 #ifdef DEBUG_INTEL_IOMMU
30 enum {
31     DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
32     DEBUG_CACHE,
33 };
34 #define VTD_DBGBIT(x)   (1 << DEBUG_##x)
35 static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
36 
37 #define VTD_DPRINTF(what, fmt, ...) do { \
38     if (vtd_dbgflags & VTD_DBGBIT(what)) { \
39         fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
40                 ## __VA_ARGS__); } \
41     } while (0)
42 #else
43 #define VTD_DPRINTF(what, fmt, ...) do {} while (0)
44 #endif
45 
46 static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
47                             uint64_t wmask, uint64_t w1cmask)
48 {
49     stq_le_p(&s->csr[addr], val);
50     stq_le_p(&s->wmask[addr], wmask);
51     stq_le_p(&s->w1cmask[addr], w1cmask);
52 }
53 
54 static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
55 {
56     stq_le_p(&s->womask[addr], mask);
57 }
58 
59 static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
60                             uint32_t wmask, uint32_t w1cmask)
61 {
62     stl_le_p(&s->csr[addr], val);
63     stl_le_p(&s->wmask[addr], wmask);
64     stl_le_p(&s->w1cmask[addr], w1cmask);
65 }
66 
67 static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
68 {
69     stl_le_p(&s->womask[addr], mask);
70 }
71 
72 /* "External" get/set operations */
73 static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
74 {
75     uint64_t oldval = ldq_le_p(&s->csr[addr]);
76     uint64_t wmask = ldq_le_p(&s->wmask[addr]);
77     uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
78     stq_le_p(&s->csr[addr],
79              ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
80 }
81 
82 static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
83 {
84     uint32_t oldval = ldl_le_p(&s->csr[addr]);
85     uint32_t wmask = ldl_le_p(&s->wmask[addr]);
86     uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
87     stl_le_p(&s->csr[addr],
88              ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
89 }
90 
91 static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
92 {
93     uint64_t val = ldq_le_p(&s->csr[addr]);
94     uint64_t womask = ldq_le_p(&s->womask[addr]);
95     return val & ~womask;
96 }
97 
98 static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
99 {
100     uint32_t val = ldl_le_p(&s->csr[addr]);
101     uint32_t womask = ldl_le_p(&s->womask[addr]);
102     return val & ~womask;
103 }
104 
105 /* "Internal" get/set operations */
106 static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
107 {
108     return ldq_le_p(&s->csr[addr]);
109 }
110 
111 static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
112 {
113     return ldl_le_p(&s->csr[addr]);
114 }
115 
116 static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
117 {
118     stq_le_p(&s->csr[addr], val);
119 }
120 
121 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
122                                         uint32_t clear, uint32_t mask)
123 {
124     uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
125     stl_le_p(&s->csr[addr], new_val);
126     return new_val;
127 }
128 
129 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
130                                         uint64_t clear, uint64_t mask)
131 {
132     uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
133     stq_le_p(&s->csr[addr], new_val);
134     return new_val;
135 }
136 
137 /* GHashTable functions */
138 static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
139 {
140     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
141 }
142 
143 static guint vtd_uint64_hash(gconstpointer v)
144 {
145     return (guint)*(const uint64_t *)v;
146 }
147 
148 static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
149                                           gpointer user_data)
150 {
151     VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
152     uint16_t domain_id = *(uint16_t *)user_data;
153     return entry->domain_id == domain_id;
154 }
155 
156 /* The shift of an addr for a certain level of paging structure */
157 static inline uint32_t vtd_slpt_level_shift(uint32_t level)
158 {
159     return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
160 }
161 
162 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
163 {
164     return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
165 }
166 
167 static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
168                                         gpointer user_data)
169 {
170     VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
171     VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
172     uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
173     uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
174     return (entry->domain_id == info->domain_id) &&
175             (((entry->gfn & info->mask) == gfn) ||
176              (entry->gfn == gfn_tlb));
177 }
178 
179 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
180  * IntelIOMMUState to 1.
181  */
182 static void vtd_reset_context_cache(IntelIOMMUState *s)
183 {
184     VTDAddressSpace *vtd_as;
185     VTDBus *vtd_bus;
186     GHashTableIter bus_it;
187     uint32_t devfn_it;
188 
189     g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
190 
191     VTD_DPRINTF(CACHE, "global context_cache_gen=1");
192     while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
193         for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
194             vtd_as = vtd_bus->dev_as[devfn_it];
195             if (!vtd_as) {
196                 continue;
197             }
198             vtd_as->context_cache_entry.context_cache_gen = 0;
199         }
200     }
201     s->context_cache_gen = 1;
202 }
203 
204 static void vtd_reset_iotlb(IntelIOMMUState *s)
205 {
206     assert(s->iotlb);
207     g_hash_table_remove_all(s->iotlb);
208 }
209 
210 static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id,
211                                   uint32_t level)
212 {
213     return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
214            ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
215 }
216 
217 static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
218 {
219     return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
220 }
221 
222 static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
223                                        hwaddr addr)
224 {
225     VTDIOTLBEntry *entry;
226     uint64_t key;
227     int level;
228 
229     for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
230         key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
231                                 source_id, level);
232         entry = g_hash_table_lookup(s->iotlb, &key);
233         if (entry) {
234             goto out;
235         }
236     }
237 
238 out:
239     return entry;
240 }
241 
242 static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
243                              uint16_t domain_id, hwaddr addr, uint64_t slpte,
244                              bool read_flags, bool write_flags,
245                              uint32_t level)
246 {
247     VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
248     uint64_t *key = g_malloc(sizeof(*key));
249     uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
250 
251     VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
252                 " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
253                 domain_id);
254     if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
255         VTD_DPRINTF(CACHE, "iotlb exceeds size limit, forced to reset");
256         vtd_reset_iotlb(s);
257     }
258 
259     entry->gfn = gfn;
260     entry->domain_id = domain_id;
261     entry->slpte = slpte;
262     entry->read_flags = read_flags;
263     entry->write_flags = write_flags;
264     entry->mask = vtd_slpt_level_page_mask(level);
265     *key = vtd_get_iotlb_key(gfn, source_id, level);
266     g_hash_table_replace(s->iotlb, key, entry);
267 }
268 
269 /* Given the reg addr of both the message data and address, generate an
270  * interrupt via MSI.
271  */
272 static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
273                                    hwaddr mesg_data_reg)
274 {
275     hwaddr addr;
276     uint32_t data;
277 
278     assert(mesg_data_reg < DMAR_REG_SIZE);
279     assert(mesg_addr_reg < DMAR_REG_SIZE);
280 
281     addr = vtd_get_long_raw(s, mesg_addr_reg);
282     data = vtd_get_long_raw(s, mesg_data_reg);
283 
284     VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data);
285     address_space_stl_le(&address_space_memory, addr, data,
286                          MEMTXATTRS_UNSPECIFIED, NULL);
287 }
288 
289 /* Generate a fault event to software via MSI if conditions are met.
290  * Notice that the value of FSTS_REG being passed to it should be the one
291  * before any update.
292  */
293 static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
294 {
295     if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
296         pre_fsts & VTD_FSTS_IQE) {
297         VTD_DPRINTF(FLOG, "there are previous interrupt conditions "
298                     "to be serviced by software, fault event is not generated "
299                     "(FSTS_REG 0x%"PRIx32 ")", pre_fsts);
300         return;
301     }
302     vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
303     if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
304         VTD_DPRINTF(FLOG, "Interrupt Mask set, fault event is not generated");
305     } else {
306         vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
307         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
308     }
309 }
310 
311 /* Check if the Fault (F) field of the Fault Recording Register referenced by
312  * @index is Set.
313  */
314 static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
315 {
316     /* Each reg is 128-bit */
317     hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
318     addr += 8; /* Access the high 64-bit half */
319 
320     assert(index < DMAR_FRCD_REG_NR);
321 
322     return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
323 }
324 
325 /* Update the PPF field of Fault Status Register.
326  * Should be called whenever change the F field of any fault recording
327  * registers.
328  */
329 static void vtd_update_fsts_ppf(IntelIOMMUState *s)
330 {
331     uint32_t i;
332     uint32_t ppf_mask = 0;
333 
334     for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
335         if (vtd_is_frcd_set(s, i)) {
336             ppf_mask = VTD_FSTS_PPF;
337             break;
338         }
339     }
340     vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
341     VTD_DPRINTF(FLOG, "set PPF of FSTS_REG to %d", ppf_mask ? 1 : 0);
342 }
343 
344 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
345 {
346     /* Each reg is 128-bit */
347     hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
348     addr += 8; /* Access the high 64-bit half */
349 
350     assert(index < DMAR_FRCD_REG_NR);
351 
352     vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
353     vtd_update_fsts_ppf(s);
354 }
355 
356 /* Must not update F field now, should be done later */
357 static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
358                             uint16_t source_id, hwaddr addr,
359                             VTDFaultReason fault, bool is_write)
360 {
361     uint64_t hi = 0, lo;
362     hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
363 
364     assert(index < DMAR_FRCD_REG_NR);
365 
366     lo = VTD_FRCD_FI(addr);
367     hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
368     if (!is_write) {
369         hi |= VTD_FRCD_T;
370     }
371     vtd_set_quad_raw(s, frcd_reg_addr, lo);
372     vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
373     VTD_DPRINTF(FLOG, "record to FRCD_REG #%"PRIu16 ": hi 0x%"PRIx64
374                 ", lo 0x%"PRIx64, index, hi, lo);
375 }
376 
377 /* Try to collapse multiple pending faults from the same requester */
378 static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
379 {
380     uint32_t i;
381     uint64_t frcd_reg;
382     hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
383 
384     for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
385         frcd_reg = vtd_get_quad_raw(s, addr);
386         VTD_DPRINTF(FLOG, "frcd_reg #%d 0x%"PRIx64, i, frcd_reg);
387         if ((frcd_reg & VTD_FRCD_F) &&
388             ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
389             return true;
390         }
391         addr += 16; /* 128-bit for each */
392     }
393     return false;
394 }
395 
396 /* Log and report an DMAR (address translation) fault to software */
397 static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
398                                   hwaddr addr, VTDFaultReason fault,
399                                   bool is_write)
400 {
401     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
402 
403     assert(fault < VTD_FR_MAX);
404 
405     if (fault == VTD_FR_RESERVED_ERR) {
406         /* This is not a normal fault reason case. Drop it. */
407         return;
408     }
409     VTD_DPRINTF(FLOG, "sid 0x%"PRIx16 ", fault %d, addr 0x%"PRIx64
410                 ", is_write %d", source_id, fault, addr, is_write);
411     if (fsts_reg & VTD_FSTS_PFO) {
412         VTD_DPRINTF(FLOG, "new fault is not recorded due to "
413                     "Primary Fault Overflow");
414         return;
415     }
416     if (vtd_try_collapse_fault(s, source_id)) {
417         VTD_DPRINTF(FLOG, "new fault is not recorded due to "
418                     "compression of faults");
419         return;
420     }
421     if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
422         VTD_DPRINTF(FLOG, "Primary Fault Overflow and "
423                     "new fault is not recorded, set PFO field");
424         vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
425         return;
426     }
427 
428     vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
429 
430     if (fsts_reg & VTD_FSTS_PPF) {
431         VTD_DPRINTF(FLOG, "there are pending faults already, "
432                     "fault event is not generated");
433         vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
434         s->next_frcd_reg++;
435         if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
436             s->next_frcd_reg = 0;
437         }
438     } else {
439         vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
440                                 VTD_FSTS_FRI(s->next_frcd_reg));
441         vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
442         s->next_frcd_reg++;
443         if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
444             s->next_frcd_reg = 0;
445         }
446         /* This case actually cause the PPF to be Set.
447          * So generate fault event (interrupt).
448          */
449          vtd_generate_fault_event(s, fsts_reg);
450     }
451 }
452 
453 /* Handle Invalidation Queue Errors of queued invalidation interface error
454  * conditions.
455  */
456 static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
457 {
458     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
459 
460     vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
461     vtd_generate_fault_event(s, fsts_reg);
462 }
463 
464 /* Set the IWC field and try to generate an invalidation completion interrupt */
465 static void vtd_generate_completion_event(IntelIOMMUState *s)
466 {
467     VTD_DPRINTF(INV, "completes an invalidation wait command with "
468                 "Interrupt Flag");
469     if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
470         VTD_DPRINTF(INV, "there is a previous interrupt condition to be "
471                     "serviced by software, "
472                     "new invalidation event is not generated");
473         return;
474     }
475     vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
476     vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
477     if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
478         VTD_DPRINTF(INV, "IM filed in IECTL_REG is set, new invalidation "
479                     "event is not generated");
480         return;
481     } else {
482         /* Generate the interrupt event */
483         vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
484         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
485     }
486 }
487 
488 static inline bool vtd_root_entry_present(VTDRootEntry *root)
489 {
490     return root->val & VTD_ROOT_ENTRY_P;
491 }
492 
493 static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
494                               VTDRootEntry *re)
495 {
496     dma_addr_t addr;
497 
498     addr = s->root + index * sizeof(*re);
499     if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
500         VTD_DPRINTF(GENERAL, "error: fail to access root-entry at 0x%"PRIx64
501                     " + %"PRIu8, s->root, index);
502         re->val = 0;
503         return -VTD_FR_ROOT_TABLE_INV;
504     }
505     re->val = le64_to_cpu(re->val);
506     return 0;
507 }
508 
509 static inline bool vtd_context_entry_present(VTDContextEntry *context)
510 {
511     return context->lo & VTD_CONTEXT_ENTRY_P;
512 }
513 
514 static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
515                                            VTDContextEntry *ce)
516 {
517     dma_addr_t addr;
518 
519     if (!vtd_root_entry_present(root)) {
520         VTD_DPRINTF(GENERAL, "error: root-entry is not present");
521         return -VTD_FR_ROOT_ENTRY_P;
522     }
523     addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
524     if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
525         VTD_DPRINTF(GENERAL, "error: fail to access context-entry at 0x%"PRIx64
526                     " + %"PRIu8,
527                     (uint64_t)(root->val & VTD_ROOT_ENTRY_CTP), index);
528         return -VTD_FR_CONTEXT_TABLE_INV;
529     }
530     ce->lo = le64_to_cpu(ce->lo);
531     ce->hi = le64_to_cpu(ce->hi);
532     return 0;
533 }
534 
535 static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
536 {
537     return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
538 }
539 
540 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
541 {
542     return slpte & VTD_SL_PT_BASE_ADDR_MASK;
543 }
544 
545 /* Whether the pte indicates the address of the page frame */
546 static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
547 {
548     return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
549 }
550 
551 /* Get the content of a spte located in @base_addr[@index] */
552 static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
553 {
554     uint64_t slpte;
555 
556     assert(index < VTD_SL_PT_ENTRY_NR);
557 
558     if (dma_memory_read(&address_space_memory,
559                         base_addr + index * sizeof(slpte), &slpte,
560                         sizeof(slpte))) {
561         slpte = (uint64_t)-1;
562         return slpte;
563     }
564     slpte = le64_to_cpu(slpte);
565     return slpte;
566 }
567 
568 /* Given a gpa and the level of paging structure, return the offset of current
569  * level.
570  */
571 static inline uint32_t vtd_gpa_level_offset(uint64_t gpa, uint32_t level)
572 {
573     return (gpa >> vtd_slpt_level_shift(level)) &
574             ((1ULL << VTD_SL_LEVEL_BITS) - 1);
575 }
576 
577 /* Check Capability Register to see if the @level of page-table is supported */
578 static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
579 {
580     return VTD_CAP_SAGAW_MASK & s->cap &
581            (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
582 }
583 
584 /* Get the page-table level that hardware should use for the second-level
585  * page-table walk from the Address Width field of context-entry.
586  */
587 static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry *ce)
588 {
589     return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
590 }
591 
592 static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
593 {
594     return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
595 }
596 
597 static const uint64_t vtd_paging_entry_rsvd_field[] = {
598     [0] = ~0ULL,
599     /* For not large page */
600     [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
601     [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
602     [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
603     [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
604     /* For large page */
605     [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
606     [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
607     [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
608     [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
609 };
610 
611 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
612 {
613     if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
614         /* Maybe large page */
615         return slpte & vtd_paging_entry_rsvd_field[level + 4];
616     } else {
617         return slpte & vtd_paging_entry_rsvd_field[level];
618     }
619 }
620 
621 /* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
622  * of the translation, can be used for deciding the size of large page.
623  */
624 static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
625                             uint64_t *slptep, uint32_t *slpte_level,
626                             bool *reads, bool *writes)
627 {
628     dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
629     uint32_t level = vtd_get_level_from_context_entry(ce);
630     uint32_t offset;
631     uint64_t slpte;
632     uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
633     uint64_t access_right_check;
634 
635     /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in CAP_REG
636      * and AW in context-entry.
637      */
638     if (gpa & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
639         VTD_DPRINTF(GENERAL, "error: gpa 0x%"PRIx64 " exceeds limits", gpa);
640         return -VTD_FR_ADDR_BEYOND_MGAW;
641     }
642 
643     /* FIXME: what is the Atomics request here? */
644     access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
645 
646     while (true) {
647         offset = vtd_gpa_level_offset(gpa, level);
648         slpte = vtd_get_slpte(addr, offset);
649 
650         if (slpte == (uint64_t)-1) {
651             VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
652                         "entry at level %"PRIu32 " for gpa 0x%"PRIx64,
653                         level, gpa);
654             if (level == vtd_get_level_from_context_entry(ce)) {
655                 /* Invalid programming of context-entry */
656                 return -VTD_FR_CONTEXT_ENTRY_INV;
657             } else {
658                 return -VTD_FR_PAGING_ENTRY_INV;
659             }
660         }
661         *reads = (*reads) && (slpte & VTD_SL_R);
662         *writes = (*writes) && (slpte & VTD_SL_W);
663         if (!(slpte & access_right_check)) {
664             VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
665                         "gpa 0x%"PRIx64 " slpte 0x%"PRIx64,
666                         (is_write ? "write" : "read"), gpa, slpte);
667             return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
668         }
669         if (vtd_slpte_nonzero_rsvd(slpte, level)) {
670             VTD_DPRINTF(GENERAL, "error: non-zero reserved field in second "
671                         "level paging entry level %"PRIu32 " slpte 0x%"PRIx64,
672                         level, slpte);
673             return -VTD_FR_PAGING_ENTRY_RSVD;
674         }
675 
676         if (vtd_is_last_slpte(slpte, level)) {
677             *slptep = slpte;
678             *slpte_level = level;
679             return 0;
680         }
681         addr = vtd_get_slpte_addr(slpte);
682         level--;
683     }
684 }
685 
686 /* Map a device to its corresponding domain (context-entry) */
687 static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
688                                     uint8_t devfn, VTDContextEntry *ce)
689 {
690     VTDRootEntry re;
691     int ret_fr;
692 
693     ret_fr = vtd_get_root_entry(s, bus_num, &re);
694     if (ret_fr) {
695         return ret_fr;
696     }
697 
698     if (!vtd_root_entry_present(&re)) {
699         VTD_DPRINTF(GENERAL, "error: root-entry #%"PRIu8 " is not present",
700                     bus_num);
701         return -VTD_FR_ROOT_ENTRY_P;
702     } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
703         VTD_DPRINTF(GENERAL, "error: non-zero reserved field in root-entry "
704                     "hi 0x%"PRIx64 " lo 0x%"PRIx64, re.rsvd, re.val);
705         return -VTD_FR_ROOT_ENTRY_RSVD;
706     }
707 
708     ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
709     if (ret_fr) {
710         return ret_fr;
711     }
712 
713     if (!vtd_context_entry_present(ce)) {
714         VTD_DPRINTF(GENERAL,
715                     "error: context-entry #%"PRIu8 "(bus #%"PRIu8 ") "
716                     "is not present", devfn, bus_num);
717         return -VTD_FR_CONTEXT_ENTRY_P;
718     } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
719                (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
720         VTD_DPRINTF(GENERAL,
721                     "error: non-zero reserved field in context-entry "
722                     "hi 0x%"PRIx64 " lo 0x%"PRIx64, ce->hi, ce->lo);
723         return -VTD_FR_CONTEXT_ENTRY_RSVD;
724     }
725     /* Check if the programming of context-entry is valid */
726     if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
727         VTD_DPRINTF(GENERAL, "error: unsupported Address Width value in "
728                     "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
729                     ce->hi, ce->lo);
730         return -VTD_FR_CONTEXT_ENTRY_INV;
731     } else if (ce->lo & VTD_CONTEXT_ENTRY_TT) {
732         VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
733                     "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
734                     ce->hi, ce->lo);
735         return -VTD_FR_CONTEXT_ENTRY_INV;
736     }
737     return 0;
738 }
739 
740 static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
741 {
742     return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
743 }
744 
745 static const bool vtd_qualified_faults[] = {
746     [VTD_FR_RESERVED] = false,
747     [VTD_FR_ROOT_ENTRY_P] = false,
748     [VTD_FR_CONTEXT_ENTRY_P] = true,
749     [VTD_FR_CONTEXT_ENTRY_INV] = true,
750     [VTD_FR_ADDR_BEYOND_MGAW] = true,
751     [VTD_FR_WRITE] = true,
752     [VTD_FR_READ] = true,
753     [VTD_FR_PAGING_ENTRY_INV] = true,
754     [VTD_FR_ROOT_TABLE_INV] = false,
755     [VTD_FR_CONTEXT_TABLE_INV] = false,
756     [VTD_FR_ROOT_ENTRY_RSVD] = false,
757     [VTD_FR_PAGING_ENTRY_RSVD] = true,
758     [VTD_FR_CONTEXT_ENTRY_TT] = true,
759     [VTD_FR_RESERVED_ERR] = false,
760     [VTD_FR_MAX] = false,
761 };
762 
763 /* To see if a fault condition is "qualified", which is reported to software
764  * only if the FPD field in the context-entry used to process the faulting
765  * request is 0.
766  */
767 static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
768 {
769     return vtd_qualified_faults[fault];
770 }
771 
772 static inline bool vtd_is_interrupt_addr(hwaddr addr)
773 {
774     return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
775 }
776 
777 /* Map dev to context-entry then do a paging-structures walk to do a iommu
778  * translation.
779  *
780  * Called from RCU critical section.
781  *
782  * @bus_num: The bus number
783  * @devfn: The devfn, which is the  combined of device and function number
784  * @is_write: The access is a write operation
785  * @entry: IOMMUTLBEntry that contain the addr to be translated and result
786  */
787 static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
788                                    uint8_t devfn, hwaddr addr, bool is_write,
789                                    IOMMUTLBEntry *entry)
790 {
791     IntelIOMMUState *s = vtd_as->iommu_state;
792     VTDContextEntry ce;
793     uint8_t bus_num = pci_bus_num(bus);
794     VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
795     uint64_t slpte, page_mask;
796     uint32_t level;
797     uint16_t source_id = vtd_make_source_id(bus_num, devfn);
798     int ret_fr;
799     bool is_fpd_set = false;
800     bool reads = true;
801     bool writes = true;
802     VTDIOTLBEntry *iotlb_entry;
803 
804     /* Check if the request is in interrupt address range */
805     if (vtd_is_interrupt_addr(addr)) {
806         if (is_write) {
807             /* FIXME: since we don't know the length of the access here, we
808              * treat Non-DWORD length write requests without PASID as
809              * interrupt requests, too. Withoud interrupt remapping support,
810              * we just use 1:1 mapping.
811              */
812             VTD_DPRINTF(MMU, "write request to interrupt address "
813                         "gpa 0x%"PRIx64, addr);
814             entry->iova = addr & VTD_PAGE_MASK_4K;
815             entry->translated_addr = addr & VTD_PAGE_MASK_4K;
816             entry->addr_mask = ~VTD_PAGE_MASK_4K;
817             entry->perm = IOMMU_WO;
818             return;
819         } else {
820             VTD_DPRINTF(GENERAL, "error: read request from interrupt address "
821                         "gpa 0x%"PRIx64, addr);
822             vtd_report_dmar_fault(s, source_id, addr, VTD_FR_READ, is_write);
823             return;
824         }
825     }
826     /* Try to fetch slpte form IOTLB */
827     iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
828     if (iotlb_entry) {
829         VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
830                     " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
831                     iotlb_entry->slpte, iotlb_entry->domain_id);
832         slpte = iotlb_entry->slpte;
833         reads = iotlb_entry->read_flags;
834         writes = iotlb_entry->write_flags;
835         page_mask = iotlb_entry->mask;
836         goto out;
837     }
838     /* Try to fetch context-entry from cache first */
839     if (cc_entry->context_cache_gen == s->context_cache_gen) {
840         VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d "
841                     "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")",
842                     bus_num, devfn, cc_entry->context_entry.hi,
843                     cc_entry->context_entry.lo, cc_entry->context_cache_gen);
844         ce = cc_entry->context_entry;
845         is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
846     } else {
847         ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
848         is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
849         if (ret_fr) {
850             ret_fr = -ret_fr;
851             if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
852                 VTD_DPRINTF(FLOG, "fault processing is disabled for DMA "
853                             "requests through this context-entry "
854                             "(with FPD Set)");
855             } else {
856                 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
857             }
858             return;
859         }
860         /* Update context-cache */
861         VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d "
862                     "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")",
863                     bus_num, devfn, ce.hi, ce.lo,
864                     cc_entry->context_cache_gen, s->context_cache_gen);
865         cc_entry->context_entry = ce;
866         cc_entry->context_cache_gen = s->context_cache_gen;
867     }
868 
869     ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level,
870                               &reads, &writes);
871     if (ret_fr) {
872         ret_fr = -ret_fr;
873         if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
874             VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests "
875                         "through this context-entry (with FPD Set)");
876         } else {
877             vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
878         }
879         return;
880     }
881 
882     page_mask = vtd_slpt_level_page_mask(level);
883     vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
884                      reads, writes, level);
885 out:
886     entry->iova = addr & page_mask;
887     entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
888     entry->addr_mask = ~page_mask;
889     entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
890 }
891 
892 static void vtd_root_table_setup(IntelIOMMUState *s)
893 {
894     s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
895     s->root_extended = s->root & VTD_RTADDR_RTT;
896     s->root &= VTD_RTADDR_ADDR_MASK;
897 
898     VTD_DPRINTF(CSR, "root_table addr 0x%"PRIx64 " %s", s->root,
899                 (s->root_extended ? "(extended)" : ""));
900 }
901 
902 static void vtd_context_global_invalidate(IntelIOMMUState *s)
903 {
904     s->context_cache_gen++;
905     if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
906         vtd_reset_context_cache(s);
907     }
908 }
909 
910 
911 /* Find the VTD address space currently associated with a given bus number,
912  */
913 static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
914 {
915     VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
916     if (!vtd_bus) {
917         /* Iterate over the registered buses to find the one
918          * which currently hold this bus number, and update the bus_num lookup table:
919          */
920         GHashTableIter iter;
921 
922         g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
923         while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
924             if (pci_bus_num(vtd_bus->bus) == bus_num) {
925                 s->vtd_as_by_bus_num[bus_num] = vtd_bus;
926                 return vtd_bus;
927             }
928         }
929     }
930     return vtd_bus;
931 }
932 
933 /* Do a context-cache device-selective invalidation.
934  * @func_mask: FM field after shifting
935  */
936 static void vtd_context_device_invalidate(IntelIOMMUState *s,
937                                           uint16_t source_id,
938                                           uint16_t func_mask)
939 {
940     uint16_t mask;
941     VTDBus *vtd_bus;
942     VTDAddressSpace *vtd_as;
943     uint16_t devfn;
944     uint16_t devfn_it;
945 
946     switch (func_mask & 3) {
947     case 0:
948         mask = 0;   /* No bits in the SID field masked */
949         break;
950     case 1:
951         mask = 4;   /* Mask bit 2 in the SID field */
952         break;
953     case 2:
954         mask = 6;   /* Mask bit 2:1 in the SID field */
955         break;
956     case 3:
957         mask = 7;   /* Mask bit 2:0 in the SID field */
958         break;
959     }
960     VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
961                     " mask %"PRIu16, source_id, mask);
962     vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
963     if (vtd_bus) {
964         devfn = VTD_SID_TO_DEVFN(source_id);
965         for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
966             vtd_as = vtd_bus->dev_as[devfn_it];
967             if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
968                 VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
969                             devfn_it);
970                 vtd_as->context_cache_entry.context_cache_gen = 0;
971             }
972         }
973     }
974 }
975 
976 /* Context-cache invalidation
977  * Returns the Context Actual Invalidation Granularity.
978  * @val: the content of the CCMD_REG
979  */
980 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
981 {
982     uint64_t caig;
983     uint64_t type = val & VTD_CCMD_CIRG_MASK;
984 
985     switch (type) {
986     case VTD_CCMD_DOMAIN_INVL:
987         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
988                     (uint16_t)VTD_CCMD_DID(val));
989         /* Fall through */
990     case VTD_CCMD_GLOBAL_INVL:
991         VTD_DPRINTF(INV, "global invalidation");
992         caig = VTD_CCMD_GLOBAL_INVL_A;
993         vtd_context_global_invalidate(s);
994         break;
995 
996     case VTD_CCMD_DEVICE_INVL:
997         caig = VTD_CCMD_DEVICE_INVL_A;
998         vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
999         break;
1000 
1001     default:
1002         VTD_DPRINTF(GENERAL, "error: invalid granularity");
1003         caig = 0;
1004     }
1005     return caig;
1006 }
1007 
1008 static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
1009 {
1010     vtd_reset_iotlb(s);
1011 }
1012 
1013 static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
1014 {
1015     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
1016                                 &domain_id);
1017 }
1018 
1019 static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
1020                                       hwaddr addr, uint8_t am)
1021 {
1022     VTDIOTLBPageInvInfo info;
1023 
1024     assert(am <= VTD_MAMV);
1025     info.domain_id = domain_id;
1026     info.addr = addr;
1027     info.mask = ~((1 << am) - 1);
1028     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
1029 }
1030 
1031 /* Flush IOTLB
1032  * Returns the IOTLB Actual Invalidation Granularity.
1033  * @val: the content of the IOTLB_REG
1034  */
1035 static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
1036 {
1037     uint64_t iaig;
1038     uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
1039     uint16_t domain_id;
1040     hwaddr addr;
1041     uint8_t am;
1042 
1043     switch (type) {
1044     case VTD_TLB_GLOBAL_FLUSH:
1045         VTD_DPRINTF(INV, "global invalidation");
1046         iaig = VTD_TLB_GLOBAL_FLUSH_A;
1047         vtd_iotlb_global_invalidate(s);
1048         break;
1049 
1050     case VTD_TLB_DSI_FLUSH:
1051         domain_id = VTD_TLB_DID(val);
1052         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1053                     domain_id);
1054         iaig = VTD_TLB_DSI_FLUSH_A;
1055         vtd_iotlb_domain_invalidate(s, domain_id);
1056         break;
1057 
1058     case VTD_TLB_PSI_FLUSH:
1059         domain_id = VTD_TLB_DID(val);
1060         addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
1061         am = VTD_IVA_AM(addr);
1062         addr = VTD_IVA_ADDR(addr);
1063         VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
1064                     " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
1065         if (am > VTD_MAMV) {
1066             VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
1067                         "%"PRIu8, (uint8_t)VTD_MAMV);
1068             iaig = 0;
1069             break;
1070         }
1071         iaig = VTD_TLB_PSI_FLUSH_A;
1072         vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1073         break;
1074 
1075     default:
1076         VTD_DPRINTF(GENERAL, "error: invalid granularity");
1077         iaig = 0;
1078     }
1079     return iaig;
1080 }
1081 
1082 static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s)
1083 {
1084     return s->iq_tail == 0;
1085 }
1086 
1087 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
1088 {
1089     return s->qi_enabled && (s->iq_tail == s->iq_head) &&
1090            (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
1091 }
1092 
1093 static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
1094 {
1095     uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
1096 
1097     VTD_DPRINTF(INV, "Queued Invalidation Enable %s", (en ? "on" : "off"));
1098     if (en) {
1099         if (vtd_queued_inv_enable_check(s)) {
1100             s->iq = iqa_val & VTD_IQA_IQA_MASK;
1101             /* 2^(x+8) entries */
1102             s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
1103             s->qi_enabled = true;
1104             VTD_DPRINTF(INV, "DMAR_IQA_REG 0x%"PRIx64, iqa_val);
1105             VTD_DPRINTF(INV, "Invalidation Queue addr 0x%"PRIx64 " size %d",
1106                         s->iq, s->iq_size);
1107             /* Ok - report back to driver */
1108             vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
1109         } else {
1110             VTD_DPRINTF(GENERAL, "error: can't enable Queued Invalidation: "
1111                         "tail %"PRIu16, s->iq_tail);
1112         }
1113     } else {
1114         if (vtd_queued_inv_disable_check(s)) {
1115             /* disable Queued Invalidation */
1116             vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
1117             s->iq_head = 0;
1118             s->qi_enabled = false;
1119             /* Ok - report back to driver */
1120             vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
1121         } else {
1122             VTD_DPRINTF(GENERAL, "error: can't disable Queued Invalidation: "
1123                         "head %"PRIu16 ", tail %"PRIu16
1124                         ", last_descriptor %"PRIu8,
1125                         s->iq_head, s->iq_tail, s->iq_last_desc_type);
1126         }
1127     }
1128 }
1129 
1130 /* Set Root Table Pointer */
1131 static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
1132 {
1133     VTD_DPRINTF(CSR, "set Root Table Pointer");
1134 
1135     vtd_root_table_setup(s);
1136     /* Ok - report back to driver */
1137     vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
1138 }
1139 
1140 /* Handle Translation Enable/Disable */
1141 static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
1142 {
1143     VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off"));
1144 
1145     if (en) {
1146         s->dmar_enabled = true;
1147         /* Ok - report back to driver */
1148         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
1149     } else {
1150         s->dmar_enabled = false;
1151 
1152         /* Clear the index of Fault Recording Register */
1153         s->next_frcd_reg = 0;
1154         /* Ok - report back to driver */
1155         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
1156     }
1157 }
1158 
1159 /* Handle write to Global Command Register */
1160 static void vtd_handle_gcmd_write(IntelIOMMUState *s)
1161 {
1162     uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
1163     uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
1164     uint32_t changed = status ^ val;
1165 
1166     VTD_DPRINTF(CSR, "value 0x%"PRIx32 " status 0x%"PRIx32, val, status);
1167     if (changed & VTD_GCMD_TE) {
1168         /* Translation enable/disable */
1169         vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
1170     }
1171     if (val & VTD_GCMD_SRTP) {
1172         /* Set/update the root-table pointer */
1173         vtd_handle_gcmd_srtp(s);
1174     }
1175     if (changed & VTD_GCMD_QIE) {
1176         /* Queued Invalidation Enable */
1177         vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
1178     }
1179 }
1180 
1181 /* Handle write to Context Command Register */
1182 static void vtd_handle_ccmd_write(IntelIOMMUState *s)
1183 {
1184     uint64_t ret;
1185     uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
1186 
1187     /* Context-cache invalidation request */
1188     if (val & VTD_CCMD_ICC) {
1189         if (s->qi_enabled) {
1190             VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1191                         "should not use register-based invalidation");
1192             return;
1193         }
1194         ret = vtd_context_cache_invalidate(s, val);
1195         /* Invalidation completed. Change something to show */
1196         vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
1197         ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
1198                                       ret);
1199         VTD_DPRINTF(INV, "CCMD_REG write-back val: 0x%"PRIx64, ret);
1200     }
1201 }
1202 
1203 /* Handle write to IOTLB Invalidation Register */
1204 static void vtd_handle_iotlb_write(IntelIOMMUState *s)
1205 {
1206     uint64_t ret;
1207     uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
1208 
1209     /* IOTLB invalidation request */
1210     if (val & VTD_TLB_IVT) {
1211         if (s->qi_enabled) {
1212             VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1213                         "should not use register-based invalidation");
1214             return;
1215         }
1216         ret = vtd_iotlb_flush(s, val);
1217         /* Invalidation completed. Change something to show */
1218         vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
1219         ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
1220                                       VTD_TLB_FLUSH_GRANU_MASK_A, ret);
1221         VTD_DPRINTF(INV, "IOTLB_REG write-back val: 0x%"PRIx64, ret);
1222     }
1223 }
1224 
1225 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1226 static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
1227                              VTDInvDesc *inv_desc)
1228 {
1229     dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
1230     if (dma_memory_read(&address_space_memory, addr, inv_desc,
1231         sizeof(*inv_desc))) {
1232         VTD_DPRINTF(GENERAL, "error: fail to fetch Invalidation Descriptor "
1233                     "base_addr 0x%"PRIx64 " offset %"PRIu32, base_addr, offset);
1234         inv_desc->lo = 0;
1235         inv_desc->hi = 0;
1236 
1237         return false;
1238     }
1239     inv_desc->lo = le64_to_cpu(inv_desc->lo);
1240     inv_desc->hi = le64_to_cpu(inv_desc->hi);
1241     return true;
1242 }
1243 
1244 static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1245 {
1246     if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
1247         (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
1248         VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Invalidation "
1249                     "Wait Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1250                     inv_desc->hi, inv_desc->lo);
1251         return false;
1252     }
1253     if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
1254         /* Status Write */
1255         uint32_t status_data = (uint32_t)(inv_desc->lo >>
1256                                VTD_INV_DESC_WAIT_DATA_SHIFT);
1257 
1258         assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
1259 
1260         /* FIXME: need to be masked with HAW? */
1261         dma_addr_t status_addr = inv_desc->hi;
1262         VTD_DPRINTF(INV, "status data 0x%x, status addr 0x%"PRIx64,
1263                     status_data, status_addr);
1264         status_data = cpu_to_le32(status_data);
1265         if (dma_memory_write(&address_space_memory, status_addr, &status_data,
1266                              sizeof(status_data))) {
1267             VTD_DPRINTF(GENERAL, "error: fail to perform a coherent write");
1268             return false;
1269         }
1270     } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
1271         /* Interrupt flag */
1272         VTD_DPRINTF(INV, "Invalidation Wait Descriptor interrupt completion");
1273         vtd_generate_completion_event(s);
1274     } else {
1275         VTD_DPRINTF(GENERAL, "error: invalid Invalidation Wait Descriptor: "
1276                     "hi 0x%"PRIx64 " lo 0x%"PRIx64, inv_desc->hi, inv_desc->lo);
1277         return false;
1278     }
1279     return true;
1280 }
1281 
1282 static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
1283                                            VTDInvDesc *inv_desc)
1284 {
1285     if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
1286         VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Context-cache "
1287                     "Invalidate Descriptor");
1288         return false;
1289     }
1290     switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
1291     case VTD_INV_DESC_CC_DOMAIN:
1292         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1293                     (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
1294         /* Fall through */
1295     case VTD_INV_DESC_CC_GLOBAL:
1296         VTD_DPRINTF(INV, "global invalidation");
1297         vtd_context_global_invalidate(s);
1298         break;
1299 
1300     case VTD_INV_DESC_CC_DEVICE:
1301         vtd_context_device_invalidate(s, VTD_INV_DESC_CC_SID(inv_desc->lo),
1302                                       VTD_INV_DESC_CC_FM(inv_desc->lo));
1303         break;
1304 
1305     default:
1306         VTD_DPRINTF(GENERAL, "error: invalid granularity in Context-cache "
1307                     "Invalidate Descriptor hi 0x%"PRIx64  " lo 0x%"PRIx64,
1308                     inv_desc->hi, inv_desc->lo);
1309         return false;
1310     }
1311     return true;
1312 }
1313 
1314 static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1315 {
1316     uint16_t domain_id;
1317     uint8_t am;
1318     hwaddr addr;
1319 
1320     if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
1321         (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
1322         VTD_DPRINTF(GENERAL, "error: non-zero reserved field in IOTLB "
1323                     "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1324                     inv_desc->hi, inv_desc->lo);
1325         return false;
1326     }
1327 
1328     switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
1329     case VTD_INV_DESC_IOTLB_GLOBAL:
1330         VTD_DPRINTF(INV, "global invalidation");
1331         vtd_iotlb_global_invalidate(s);
1332         break;
1333 
1334     case VTD_INV_DESC_IOTLB_DOMAIN:
1335         domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1336         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1337                     domain_id);
1338         vtd_iotlb_domain_invalidate(s, domain_id);
1339         break;
1340 
1341     case VTD_INV_DESC_IOTLB_PAGE:
1342         domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1343         addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
1344         am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
1345         VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
1346                     " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
1347         if (am > VTD_MAMV) {
1348             VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
1349                         "%"PRIu8, (uint8_t)VTD_MAMV);
1350             return false;
1351         }
1352         vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1353         break;
1354 
1355     default:
1356         VTD_DPRINTF(GENERAL, "error: invalid granularity in IOTLB Invalidate "
1357                     "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1358                     inv_desc->hi, inv_desc->lo);
1359         return false;
1360     }
1361     return true;
1362 }
1363 
1364 static bool vtd_process_inv_desc(IntelIOMMUState *s)
1365 {
1366     VTDInvDesc inv_desc;
1367     uint8_t desc_type;
1368 
1369     VTD_DPRINTF(INV, "iq head %"PRIu16, s->iq_head);
1370     if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
1371         s->iq_last_desc_type = VTD_INV_DESC_NONE;
1372         return false;
1373     }
1374     desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
1375     /* FIXME: should update at first or at last? */
1376     s->iq_last_desc_type = desc_type;
1377 
1378     switch (desc_type) {
1379     case VTD_INV_DESC_CC:
1380         VTD_DPRINTF(INV, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
1381                     " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
1382         if (!vtd_process_context_cache_desc(s, &inv_desc)) {
1383             return false;
1384         }
1385         break;
1386 
1387     case VTD_INV_DESC_IOTLB:
1388         VTD_DPRINTF(INV, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
1389                     " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
1390         if (!vtd_process_iotlb_desc(s, &inv_desc)) {
1391             return false;
1392         }
1393         break;
1394 
1395     case VTD_INV_DESC_WAIT:
1396         VTD_DPRINTF(INV, "Invalidation Wait Descriptor hi 0x%"PRIx64
1397                     " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
1398         if (!vtd_process_wait_desc(s, &inv_desc)) {
1399             return false;
1400         }
1401         break;
1402 
1403     default:
1404         VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
1405                     "hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
1406                     inv_desc.hi, inv_desc.lo, desc_type);
1407         return false;
1408     }
1409     s->iq_head++;
1410     if (s->iq_head == s->iq_size) {
1411         s->iq_head = 0;
1412     }
1413     return true;
1414 }
1415 
1416 /* Try to fetch and process more Invalidation Descriptors */
1417 static void vtd_fetch_inv_desc(IntelIOMMUState *s)
1418 {
1419     VTD_DPRINTF(INV, "fetch Invalidation Descriptors");
1420     if (s->iq_tail >= s->iq_size) {
1421         /* Detects an invalid Tail pointer */
1422         VTD_DPRINTF(GENERAL, "error: iq_tail is %"PRIu16
1423                     " while iq_size is %"PRIu16, s->iq_tail, s->iq_size);
1424         vtd_handle_inv_queue_error(s);
1425         return;
1426     }
1427     while (s->iq_head != s->iq_tail) {
1428         if (!vtd_process_inv_desc(s)) {
1429             /* Invalidation Queue Errors */
1430             vtd_handle_inv_queue_error(s);
1431             break;
1432         }
1433         /* Must update the IQH_REG in time */
1434         vtd_set_quad_raw(s, DMAR_IQH_REG,
1435                          (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
1436                          VTD_IQH_QH_MASK);
1437     }
1438 }
1439 
1440 /* Handle write to Invalidation Queue Tail Register */
1441 static void vtd_handle_iqt_write(IntelIOMMUState *s)
1442 {
1443     uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
1444 
1445     s->iq_tail = VTD_IQT_QT(val);
1446     VTD_DPRINTF(INV, "set iq tail %"PRIu16, s->iq_tail);
1447     if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1448         /* Process Invalidation Queue here */
1449         vtd_fetch_inv_desc(s);
1450     }
1451 }
1452 
1453 static void vtd_handle_fsts_write(IntelIOMMUState *s)
1454 {
1455     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
1456     uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1457     uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
1458 
1459     if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
1460         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1461         VTD_DPRINTF(FLOG, "all pending interrupt conditions serviced, clear "
1462                     "IP field of FECTL_REG");
1463     }
1464     /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1465      * Descriptors if there are any when Queued Invalidation is enabled?
1466      */
1467 }
1468 
1469 static void vtd_handle_fectl_write(IntelIOMMUState *s)
1470 {
1471     uint32_t fectl_reg;
1472     /* FIXME: when software clears the IM field, check the IP field. But do we
1473      * need to compare the old value and the new value to conclude that
1474      * software clears the IM field? Or just check if the IM field is zero?
1475      */
1476     fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1477     if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
1478         vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
1479         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1480         VTD_DPRINTF(FLOG, "IM field is cleared, generate "
1481                     "fault event interrupt");
1482     }
1483 }
1484 
1485 static void vtd_handle_ics_write(IntelIOMMUState *s)
1486 {
1487     uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
1488     uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1489 
1490     if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
1491         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1492         VTD_DPRINTF(INV, "pending completion interrupt condition serviced, "
1493                     "clear IP field of IECTL_REG");
1494     }
1495 }
1496 
1497 static void vtd_handle_iectl_write(IntelIOMMUState *s)
1498 {
1499     uint32_t iectl_reg;
1500     /* FIXME: when software clears the IM field, check the IP field. But do we
1501      * need to compare the old value and the new value to conclude that
1502      * software clears the IM field? Or just check if the IM field is zero?
1503      */
1504     iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1505     if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
1506         vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
1507         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1508         VTD_DPRINTF(INV, "IM field is cleared, generate "
1509                     "invalidation event interrupt");
1510     }
1511 }
1512 
1513 static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
1514 {
1515     IntelIOMMUState *s = opaque;
1516     uint64_t val;
1517 
1518     if (addr + size > DMAR_REG_SIZE) {
1519         VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
1520                     ", got 0x%"PRIx64 " %d",
1521                     (uint64_t)DMAR_REG_SIZE, addr, size);
1522         return (uint64_t)-1;
1523     }
1524 
1525     switch (addr) {
1526     /* Root Table Address Register, 64-bit */
1527     case DMAR_RTADDR_REG:
1528         if (size == 4) {
1529             val = s->root & ((1ULL << 32) - 1);
1530         } else {
1531             val = s->root;
1532         }
1533         break;
1534 
1535     case DMAR_RTADDR_REG_HI:
1536         assert(size == 4);
1537         val = s->root >> 32;
1538         break;
1539 
1540     /* Invalidation Queue Address Register, 64-bit */
1541     case DMAR_IQA_REG:
1542         val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
1543         if (size == 4) {
1544             val = val & ((1ULL << 32) - 1);
1545         }
1546         break;
1547 
1548     case DMAR_IQA_REG_HI:
1549         assert(size == 4);
1550         val = s->iq >> 32;
1551         break;
1552 
1553     default:
1554         if (size == 4) {
1555             val = vtd_get_long(s, addr);
1556         } else {
1557             val = vtd_get_quad(s, addr);
1558         }
1559     }
1560     VTD_DPRINTF(CSR, "addr 0x%"PRIx64 " size %d val 0x%"PRIx64,
1561                 addr, size, val);
1562     return val;
1563 }
1564 
1565 static void vtd_mem_write(void *opaque, hwaddr addr,
1566                           uint64_t val, unsigned size)
1567 {
1568     IntelIOMMUState *s = opaque;
1569 
1570     if (addr + size > DMAR_REG_SIZE) {
1571         VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
1572                     ", got 0x%"PRIx64 " %d",
1573                     (uint64_t)DMAR_REG_SIZE, addr, size);
1574         return;
1575     }
1576 
1577     switch (addr) {
1578     /* Global Command Register, 32-bit */
1579     case DMAR_GCMD_REG:
1580         VTD_DPRINTF(CSR, "DMAR_GCMD_REG write addr 0x%"PRIx64
1581                     ", size %d, val 0x%"PRIx64, addr, size, val);
1582         vtd_set_long(s, addr, val);
1583         vtd_handle_gcmd_write(s);
1584         break;
1585 
1586     /* Context Command Register, 64-bit */
1587     case DMAR_CCMD_REG:
1588         VTD_DPRINTF(CSR, "DMAR_CCMD_REG write addr 0x%"PRIx64
1589                     ", size %d, val 0x%"PRIx64, addr, size, val);
1590         if (size == 4) {
1591             vtd_set_long(s, addr, val);
1592         } else {
1593             vtd_set_quad(s, addr, val);
1594             vtd_handle_ccmd_write(s);
1595         }
1596         break;
1597 
1598     case DMAR_CCMD_REG_HI:
1599         VTD_DPRINTF(CSR, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
1600                     ", size %d, val 0x%"PRIx64, addr, size, val);
1601         assert(size == 4);
1602         vtd_set_long(s, addr, val);
1603         vtd_handle_ccmd_write(s);
1604         break;
1605 
1606     /* IOTLB Invalidation Register, 64-bit */
1607     case DMAR_IOTLB_REG:
1608         VTD_DPRINTF(INV, "DMAR_IOTLB_REG write addr 0x%"PRIx64
1609                     ", size %d, val 0x%"PRIx64, addr, size, val);
1610         if (size == 4) {
1611             vtd_set_long(s, addr, val);
1612         } else {
1613             vtd_set_quad(s, addr, val);
1614             vtd_handle_iotlb_write(s);
1615         }
1616         break;
1617 
1618     case DMAR_IOTLB_REG_HI:
1619         VTD_DPRINTF(INV, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
1620                     ", size %d, val 0x%"PRIx64, addr, size, val);
1621         assert(size == 4);
1622         vtd_set_long(s, addr, val);
1623         vtd_handle_iotlb_write(s);
1624         break;
1625 
1626     /* Invalidate Address Register, 64-bit */
1627     case DMAR_IVA_REG:
1628         VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
1629                     ", size %d, val 0x%"PRIx64, addr, size, val);
1630         if (size == 4) {
1631             vtd_set_long(s, addr, val);
1632         } else {
1633             vtd_set_quad(s, addr, val);
1634         }
1635         break;
1636 
1637     case DMAR_IVA_REG_HI:
1638         VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
1639                     ", size %d, val 0x%"PRIx64, addr, size, val);
1640         assert(size == 4);
1641         vtd_set_long(s, addr, val);
1642         break;
1643 
1644     /* Fault Status Register, 32-bit */
1645     case DMAR_FSTS_REG:
1646         VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
1647                     ", size %d, val 0x%"PRIx64, addr, size, val);
1648         assert(size == 4);
1649         vtd_set_long(s, addr, val);
1650         vtd_handle_fsts_write(s);
1651         break;
1652 
1653     /* Fault Event Control Register, 32-bit */
1654     case DMAR_FECTL_REG:
1655         VTD_DPRINTF(FLOG, "DMAR_FECTL_REG write addr 0x%"PRIx64
1656                     ", size %d, val 0x%"PRIx64, addr, size, val);
1657         assert(size == 4);
1658         vtd_set_long(s, addr, val);
1659         vtd_handle_fectl_write(s);
1660         break;
1661 
1662     /* Fault Event Data Register, 32-bit */
1663     case DMAR_FEDATA_REG:
1664         VTD_DPRINTF(FLOG, "DMAR_FEDATA_REG write addr 0x%"PRIx64
1665                     ", size %d, val 0x%"PRIx64, addr, size, val);
1666         assert(size == 4);
1667         vtd_set_long(s, addr, val);
1668         break;
1669 
1670     /* Fault Event Address Register, 32-bit */
1671     case DMAR_FEADDR_REG:
1672         VTD_DPRINTF(FLOG, "DMAR_FEADDR_REG write addr 0x%"PRIx64
1673                     ", size %d, val 0x%"PRIx64, addr, size, val);
1674         assert(size == 4);
1675         vtd_set_long(s, addr, val);
1676         break;
1677 
1678     /* Fault Event Upper Address Register, 32-bit */
1679     case DMAR_FEUADDR_REG:
1680         VTD_DPRINTF(FLOG, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
1681                     ", size %d, val 0x%"PRIx64, addr, size, val);
1682         assert(size == 4);
1683         vtd_set_long(s, addr, val);
1684         break;
1685 
1686     /* Protected Memory Enable Register, 32-bit */
1687     case DMAR_PMEN_REG:
1688         VTD_DPRINTF(CSR, "DMAR_PMEN_REG write addr 0x%"PRIx64
1689                     ", size %d, val 0x%"PRIx64, addr, size, val);
1690         assert(size == 4);
1691         vtd_set_long(s, addr, val);
1692         break;
1693 
1694     /* Root Table Address Register, 64-bit */
1695     case DMAR_RTADDR_REG:
1696         VTD_DPRINTF(CSR, "DMAR_RTADDR_REG write addr 0x%"PRIx64
1697                     ", size %d, val 0x%"PRIx64, addr, size, val);
1698         if (size == 4) {
1699             vtd_set_long(s, addr, val);
1700         } else {
1701             vtd_set_quad(s, addr, val);
1702         }
1703         break;
1704 
1705     case DMAR_RTADDR_REG_HI:
1706         VTD_DPRINTF(CSR, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
1707                     ", size %d, val 0x%"PRIx64, addr, size, val);
1708         assert(size == 4);
1709         vtd_set_long(s, addr, val);
1710         break;
1711 
1712     /* Invalidation Queue Tail Register, 64-bit */
1713     case DMAR_IQT_REG:
1714         VTD_DPRINTF(INV, "DMAR_IQT_REG write addr 0x%"PRIx64
1715                     ", size %d, val 0x%"PRIx64, addr, size, val);
1716         if (size == 4) {
1717             vtd_set_long(s, addr, val);
1718         } else {
1719             vtd_set_quad(s, addr, val);
1720         }
1721         vtd_handle_iqt_write(s);
1722         break;
1723 
1724     case DMAR_IQT_REG_HI:
1725         VTD_DPRINTF(INV, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
1726                     ", size %d, val 0x%"PRIx64, addr, size, val);
1727         assert(size == 4);
1728         vtd_set_long(s, addr, val);
1729         /* 19:63 of IQT_REG is RsvdZ, do nothing here */
1730         break;
1731 
1732     /* Invalidation Queue Address Register, 64-bit */
1733     case DMAR_IQA_REG:
1734         VTD_DPRINTF(INV, "DMAR_IQA_REG write addr 0x%"PRIx64
1735                     ", size %d, val 0x%"PRIx64, addr, size, val);
1736         if (size == 4) {
1737             vtd_set_long(s, addr, val);
1738         } else {
1739             vtd_set_quad(s, addr, val);
1740         }
1741         break;
1742 
1743     case DMAR_IQA_REG_HI:
1744         VTD_DPRINTF(INV, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
1745                     ", size %d, val 0x%"PRIx64, addr, size, val);
1746         assert(size == 4);
1747         vtd_set_long(s, addr, val);
1748         break;
1749 
1750     /* Invalidation Completion Status Register, 32-bit */
1751     case DMAR_ICS_REG:
1752         VTD_DPRINTF(INV, "DMAR_ICS_REG write addr 0x%"PRIx64
1753                     ", size %d, val 0x%"PRIx64, addr, size, val);
1754         assert(size == 4);
1755         vtd_set_long(s, addr, val);
1756         vtd_handle_ics_write(s);
1757         break;
1758 
1759     /* Invalidation Event Control Register, 32-bit */
1760     case DMAR_IECTL_REG:
1761         VTD_DPRINTF(INV, "DMAR_IECTL_REG write addr 0x%"PRIx64
1762                     ", size %d, val 0x%"PRIx64, addr, size, val);
1763         assert(size == 4);
1764         vtd_set_long(s, addr, val);
1765         vtd_handle_iectl_write(s);
1766         break;
1767 
1768     /* Invalidation Event Data Register, 32-bit */
1769     case DMAR_IEDATA_REG:
1770         VTD_DPRINTF(INV, "DMAR_IEDATA_REG write addr 0x%"PRIx64
1771                     ", size %d, val 0x%"PRIx64, addr, size, val);
1772         assert(size == 4);
1773         vtd_set_long(s, addr, val);
1774         break;
1775 
1776     /* Invalidation Event Address Register, 32-bit */
1777     case DMAR_IEADDR_REG:
1778         VTD_DPRINTF(INV, "DMAR_IEADDR_REG write addr 0x%"PRIx64
1779                     ", size %d, val 0x%"PRIx64, addr, size, val);
1780         assert(size == 4);
1781         vtd_set_long(s, addr, val);
1782         break;
1783 
1784     /* Invalidation Event Upper Address Register, 32-bit */
1785     case DMAR_IEUADDR_REG:
1786         VTD_DPRINTF(INV, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
1787                     ", size %d, val 0x%"PRIx64, addr, size, val);
1788         assert(size == 4);
1789         vtd_set_long(s, addr, val);
1790         break;
1791 
1792     /* Fault Recording Registers, 128-bit */
1793     case DMAR_FRCD_REG_0_0:
1794         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
1795                     ", size %d, val 0x%"PRIx64, addr, size, val);
1796         if (size == 4) {
1797             vtd_set_long(s, addr, val);
1798         } else {
1799             vtd_set_quad(s, addr, val);
1800         }
1801         break;
1802 
1803     case DMAR_FRCD_REG_0_1:
1804         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
1805                     ", size %d, val 0x%"PRIx64, addr, size, val);
1806         assert(size == 4);
1807         vtd_set_long(s, addr, val);
1808         break;
1809 
1810     case DMAR_FRCD_REG_0_2:
1811         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
1812                     ", size %d, val 0x%"PRIx64, addr, size, val);
1813         if (size == 4) {
1814             vtd_set_long(s, addr, val);
1815         } else {
1816             vtd_set_quad(s, addr, val);
1817             /* May clear bit 127 (Fault), update PPF */
1818             vtd_update_fsts_ppf(s);
1819         }
1820         break;
1821 
1822     case DMAR_FRCD_REG_0_3:
1823         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
1824                     ", size %d, val 0x%"PRIx64, addr, size, val);
1825         assert(size == 4);
1826         vtd_set_long(s, addr, val);
1827         /* May clear bit 127 (Fault), update PPF */
1828         vtd_update_fsts_ppf(s);
1829         break;
1830 
1831     default:
1832         VTD_DPRINTF(GENERAL, "error: unhandled reg write addr 0x%"PRIx64
1833                     ", size %d, val 0x%"PRIx64, addr, size, val);
1834         if (size == 4) {
1835             vtd_set_long(s, addr, val);
1836         } else {
1837             vtd_set_quad(s, addr, val);
1838         }
1839     }
1840 }
1841 
1842 static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
1843                                          bool is_write)
1844 {
1845     VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
1846     IntelIOMMUState *s = vtd_as->iommu_state;
1847     IOMMUTLBEntry ret = {
1848         .target_as = &address_space_memory,
1849         .iova = addr,
1850         .translated_addr = 0,
1851         .addr_mask = ~(hwaddr)0,
1852         .perm = IOMMU_NONE,
1853     };
1854 
1855     if (!s->dmar_enabled) {
1856         /* DMAR disabled, passthrough, use 4k-page*/
1857         ret.iova = addr & VTD_PAGE_MASK_4K;
1858         ret.translated_addr = addr & VTD_PAGE_MASK_4K;
1859         ret.addr_mask = ~VTD_PAGE_MASK_4K;
1860         ret.perm = IOMMU_RW;
1861         return ret;
1862     }
1863 
1864     vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
1865                            is_write, &ret);
1866     VTD_DPRINTF(MMU,
1867                 "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
1868                 " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
1869                 VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
1870                 vtd_as->devfn, addr, ret.translated_addr);
1871     return ret;
1872 }
1873 
1874 static const VMStateDescription vtd_vmstate = {
1875     .name = "iommu-intel",
1876     .unmigratable = 1,
1877 };
1878 
1879 static const MemoryRegionOps vtd_mem_ops = {
1880     .read = vtd_mem_read,
1881     .write = vtd_mem_write,
1882     .endianness = DEVICE_LITTLE_ENDIAN,
1883     .impl = {
1884         .min_access_size = 4,
1885         .max_access_size = 8,
1886     },
1887     .valid = {
1888         .min_access_size = 4,
1889         .max_access_size = 8,
1890     },
1891 };
1892 
1893 static Property vtd_properties[] = {
1894     DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
1895     DEFINE_PROP_END_OF_LIST(),
1896 };
1897 
1898 
1899 VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
1900 {
1901     uintptr_t key = (uintptr_t)bus;
1902     VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
1903     VTDAddressSpace *vtd_dev_as;
1904 
1905     if (!vtd_bus) {
1906         /* No corresponding free() */
1907         vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * VTD_PCI_DEVFN_MAX);
1908         vtd_bus->bus = bus;
1909         key = (uintptr_t)bus;
1910         g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
1911     }
1912 
1913     vtd_dev_as = vtd_bus->dev_as[devfn];
1914 
1915     if (!vtd_dev_as) {
1916         vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
1917 
1918         vtd_dev_as->bus = bus;
1919         vtd_dev_as->devfn = (uint8_t)devfn;
1920         vtd_dev_as->iommu_state = s;
1921         vtd_dev_as->context_cache_entry.context_cache_gen = 0;
1922         memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
1923                                  &s->iommu_ops, "intel_iommu", UINT64_MAX);
1924         address_space_init(&vtd_dev_as->as,
1925                            &vtd_dev_as->iommu, "intel_iommu");
1926     }
1927     return vtd_dev_as;
1928 }
1929 
1930 /* Do the initialization. It will also be called when reset, so pay
1931  * attention when adding new initialization stuff.
1932  */
1933 static void vtd_init(IntelIOMMUState *s)
1934 {
1935     memset(s->csr, 0, DMAR_REG_SIZE);
1936     memset(s->wmask, 0, DMAR_REG_SIZE);
1937     memset(s->w1cmask, 0, DMAR_REG_SIZE);
1938     memset(s->womask, 0, DMAR_REG_SIZE);
1939 
1940     s->iommu_ops.translate = vtd_iommu_translate;
1941     s->root = 0;
1942     s->root_extended = false;
1943     s->dmar_enabled = false;
1944     s->iq_head = 0;
1945     s->iq_tail = 0;
1946     s->iq = 0;
1947     s->iq_size = 0;
1948     s->qi_enabled = false;
1949     s->iq_last_desc_type = VTD_INV_DESC_NONE;
1950     s->next_frcd_reg = 0;
1951     s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
1952              VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
1953     s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
1954 
1955     vtd_reset_context_cache(s);
1956     vtd_reset_iotlb(s);
1957 
1958     /* Define registers with default values and bit semantics */
1959     vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
1960     vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
1961     vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
1962     vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
1963     vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
1964     vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
1965     vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
1966     vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
1967     vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
1968 
1969     /* Advanced Fault Logging not supported */
1970     vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
1971     vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
1972     vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
1973     vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
1974 
1975     /* Treated as RsvdZ when EIM in ECAP_REG is not supported
1976      * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
1977      */
1978     vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
1979 
1980     /* Treated as RO for implementations that PLMR and PHMR fields reported
1981      * as Clear in the CAP_REG.
1982      * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
1983      */
1984     vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
1985 
1986     vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
1987     vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
1988     vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
1989     vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
1990     vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
1991     vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
1992     vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
1993     /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
1994     vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
1995 
1996     /* IOTLB registers */
1997     vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
1998     vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
1999     vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
2000 
2001     /* Fault Recording Registers, 128-bit */
2002     vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
2003     vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
2004 }
2005 
2006 /* Should not reset address_spaces when reset because devices will still use
2007  * the address space they got at first (won't ask the bus again).
2008  */
2009 static void vtd_reset(DeviceState *dev)
2010 {
2011     IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
2012 
2013     VTD_DPRINTF(GENERAL, "");
2014     vtd_init(s);
2015 }
2016 
2017 static void vtd_realize(DeviceState *dev, Error **errp)
2018 {
2019     IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
2020 
2021     VTD_DPRINTF(GENERAL, "");
2022     memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
2023     memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
2024                           "intel_iommu", DMAR_REG_SIZE);
2025     sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
2026     /* No corresponding destroy */
2027     s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
2028                                      g_free, g_free);
2029     s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
2030                                               g_free, g_free);
2031     vtd_init(s);
2032 }
2033 
2034 static void vtd_class_init(ObjectClass *klass, void *data)
2035 {
2036     DeviceClass *dc = DEVICE_CLASS(klass);
2037 
2038     dc->reset = vtd_reset;
2039     dc->realize = vtd_realize;
2040     dc->vmsd = &vtd_vmstate;
2041     dc->props = vtd_properties;
2042 }
2043 
2044 static const TypeInfo vtd_info = {
2045     .name          = TYPE_INTEL_IOMMU_DEVICE,
2046     .parent        = TYPE_SYS_BUS_DEVICE,
2047     .instance_size = sizeof(IntelIOMMUState),
2048     .class_init    = vtd_class_init,
2049 };
2050 
2051 static void vtd_register_types(void)
2052 {
2053     VTD_DPRINTF(GENERAL, "");
2054     type_register_static(&vtd_info);
2055 }
2056 
2057 type_init(vtd_register_types)
2058