Lines Matching +full:high +full:- +full:vt
2 * QEMU emulation of an Intel IOMMU (VT-d)
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
30 #include "hw/qdev-properties.h"
32 #include "hw/i386/apic-msidef.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
45 ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
47 ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
50 #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
52 (4 + (((pe)->val[2] >> 2) & VTD_SM_PASID_ENTRY_FLPM))
54 (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
90 error_report("We need to set caching-mode=on for intel-iommu to enable " in vtd_panic_require_caching_mode()
98 stq_le_p(&s->csr[addr], val); in vtd_define_quad()
99 stq_le_p(&s->wmask[addr], wmask); in vtd_define_quad()
100 stq_le_p(&s->w1cmask[addr], w1cmask); in vtd_define_quad()
105 stq_le_p(&s->womask[addr], mask); in vtd_define_quad_wo()
111 stl_le_p(&s->csr[addr], val); in vtd_define_long()
112 stl_le_p(&s->wmask[addr], wmask); in vtd_define_long()
113 stl_le_p(&s->w1cmask[addr], w1cmask); in vtd_define_long()
118 stl_le_p(&s->womask[addr], mask); in vtd_define_long_wo()
124 uint64_t oldval = ldq_le_p(&s->csr[addr]); in vtd_set_quad()
125 uint64_t wmask = ldq_le_p(&s->wmask[addr]); in vtd_set_quad()
126 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); in vtd_set_quad()
127 stq_le_p(&s->csr[addr], in vtd_set_quad()
133 uint32_t oldval = ldl_le_p(&s->csr[addr]); in vtd_set_long()
134 uint32_t wmask = ldl_le_p(&s->wmask[addr]); in vtd_set_long()
135 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); in vtd_set_long()
136 stl_le_p(&s->csr[addr], in vtd_set_long()
142 uint64_t val = ldq_le_p(&s->csr[addr]); in vtd_get_quad()
143 uint64_t womask = ldq_le_p(&s->womask[addr]); in vtd_get_quad()
149 uint32_t val = ldl_le_p(&s->csr[addr]); in vtd_get_long()
150 uint32_t womask = ldl_le_p(&s->womask[addr]); in vtd_get_long()
157 return ldq_le_p(&s->csr[addr]); in vtd_get_quad_raw()
162 return ldl_le_p(&s->csr[addr]); in vtd_get_long_raw()
167 stq_le_p(&s->csr[addr], val); in vtd_set_quad_raw()
173 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; in vtd_set_clear_mask_long()
174 stl_le_p(&s->csr[addr], new_val); in vtd_set_clear_mask_long()
181 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; in vtd_set_clear_mask_quad()
182 stq_le_p(&s->csr[addr], new_val); in vtd_set_clear_mask_quad()
188 qemu_mutex_lock(&s->iommu_lock); in vtd_iommu_lock()
193 qemu_mutex_unlock(&s->iommu_lock); in vtd_iommu_unlock()
200 if (s->scalable_mode) { in vtd_update_scalable_state()
201 s->root_scalable = val & VTD_RTADDR_SMT; in vtd_update_scalable_state()
209 if (s->ecap & VTD_ECAP_SMTS && in vtd_update_iq_dw()
211 s->iq_dw = true; in vtd_update_iq_dw()
213 s->iq_dw = false; in vtd_update_iq_dw()
220 return as->notifier_flags & IOMMU_NOTIFIER_MAP; in vtd_as_has_map_notifier()
229 return key1->sid == key2->sid && in vtd_iotlb_equal()
230 key1->pasid == key2->pasid && in vtd_iotlb_equal()
231 key1->level == key2->level && in vtd_iotlb_equal()
232 key1->gfn == key2->gfn; in vtd_iotlb_equal()
238 uint64_t hash64 = key->gfn | ((uint64_t)(key->sid) << VTD_IOTLB_SID_SHIFT) | in vtd_iotlb_hash()
239 (uint64_t)(key->level - 1) << VTD_IOTLB_LVL_SHIFT | in vtd_iotlb_hash()
240 (uint64_t)(key->pasid) << VTD_IOTLB_PASID_SHIFT; in vtd_iotlb_hash()
250 return (key1->bus == key2->bus) && (key1->devfn == key2->devfn) && in vtd_as_equal()
251 (key1->pasid == key2->pasid); in vtd_as_equal()
262 guint value = (guint)(uintptr_t)key->bus; in vtd_as_hash()
264 return (guint)(value << 8 | key->devfn); in vtd_as_hash()
278 return (key1->bus == key2->bus) && (key1->devfn == key2->devfn); in vtd_hiod_equal()
291 return entry->domain_id == domain_id; in vtd_hash_remove_by_domain()
298 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_LEVEL_BITS; in vtd_pt_level_shift()
303 return ~((1ULL << vtd_pt_level_shift(level)) - 1); in vtd_pt_level_page_mask()
311 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; in vtd_hash_remove_by_page()
312 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; in vtd_hash_remove_by_page()
314 if (entry->domain_id != info->domain_id) { in vtd_hash_remove_by_page()
319 * According to spec, IOTLB entries caching first-stage (PGTT=001b) or in vtd_hash_remove_by_page()
320 * nested (PGTT=011b) mapping associated with specified domain-id are in vtd_hash_remove_by_page()
323 if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) { in vtd_hash_remove_by_page()
327 return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb; in vtd_hash_remove_by_page()
335 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; in vtd_hash_remove_by_page_piotlb()
336 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; in vtd_hash_remove_by_page_piotlb()
339 * According to spec, PASID-based-IOTLB Invalidation in page granularity in vtd_hash_remove_by_page_piotlb()
340 * doesn't invalidate IOTLB entries caching second-stage (PGTT=010b) in vtd_hash_remove_by_page_piotlb()
341 * or pass-through (PGTT=100b) mappings. Nested isn't supported yet, in vtd_hash_remove_by_page_piotlb()
342 * so only need to check first-stage (PGTT=001b) mappings. in vtd_hash_remove_by_page_piotlb()
344 if (entry->pgtt != VTD_SM_PASID_ENTRY_FLT) { in vtd_hash_remove_by_page_piotlb()
348 return entry->domain_id == info->domain_id && entry->pasid == info->pasid && in vtd_hash_remove_by_page_piotlb()
349 ((entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb); in vtd_hash_remove_by_page_piotlb()
362 g_hash_table_iter_init(&as_it, s->vtd_address_spaces); in vtd_reset_context_cache_locked()
365 vtd_as->context_cache_entry.context_cache_gen = 0; in vtd_reset_context_cache_locked()
367 s->context_cache_gen = 1; in vtd_reset_context_cache_locked()
373 assert(s->iotlb); in vtd_reset_iotlb_locked()
374 g_hash_table_remove_all(s->iotlb); in vtd_reset_iotlb_locked()
410 entry = g_hash_table_lookup(s->iotlb, &key); in vtd_lookup_iotlb()
431 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { in vtd_update_iotlb()
436 entry->gfn = gfn; in vtd_update_iotlb()
437 entry->domain_id = domain_id; in vtd_update_iotlb()
438 entry->pte = pte; in vtd_update_iotlb()
439 entry->access_flags = access_flags; in vtd_update_iotlb()
440 entry->mask = vtd_pt_level_page_mask(level); in vtd_update_iotlb()
441 entry->pasid = pasid; in vtd_update_iotlb()
442 entry->pgtt = pgtt; in vtd_update_iotlb()
444 key->gfn = gfn; in vtd_update_iotlb()
445 key->sid = source_id; in vtd_update_iotlb()
446 key->level = level; in vtd_update_iotlb()
447 key->pasid = pasid; in vtd_update_iotlb()
449 g_hash_table_replace(s->iotlb, key, entry); in vtd_update_iotlb()
468 apic_get_class(NULL)->send_msi(&msi); in vtd_generate_interrupt()
498 /* Each reg is 128-bit */ in vtd_is_frcd_set()
500 addr += 8; /* Access the high 64-bit half */ in vtd_is_frcd_set()
528 /* Each reg is 128-bit */ in vtd_set_frcd_and_update_ppf()
530 addr += 8; /* Access the high 64-bit half */ in vtd_set_frcd_and_update_ppf()
557 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ in vtd_try_collapse_fault()
565 addr += 16; /* 128-bit for each */ in vtd_try_collapse_fault()
588 if (vtd_is_frcd_set(s, s->next_frcd_reg)) { in vtd_report_frcd_fault()
595 vtd_record_frcd(s, s->next_frcd_reg, hi, lo); in vtd_report_frcd_fault()
600 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); in vtd_report_frcd_fault()
601 s->next_frcd_reg++; in vtd_report_frcd_fault()
602 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { in vtd_report_frcd_fault()
603 s->next_frcd_reg = 0; in vtd_report_frcd_fault()
607 VTD_FSTS_FRI(s->next_frcd_reg)); in vtd_report_frcd_fault()
608 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ in vtd_report_frcd_fault()
609 s->next_frcd_reg++; in vtd_report_frcd_fault()
610 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { in vtd_report_frcd_fault()
611 s->next_frcd_reg = 0; in vtd_report_frcd_fault()
690 if (s->root_scalable && devfn > UINT8_MAX / 2) { in vtd_root_entry_present()
691 return re->hi & VTD_ROOT_ENTRY_P; in vtd_root_entry_present()
694 return re->lo & VTD_ROOT_ENTRY_P; in vtd_root_entry_present()
702 addr = s->root + index * sizeof(*re); in vtd_get_root_entry()
705 re->lo = 0; in vtd_get_root_entry()
706 return -VTD_FR_ROOT_TABLE_INV; in vtd_get_root_entry()
708 re->lo = le64_to_cpu(re->lo); in vtd_get_root_entry()
709 re->hi = le64_to_cpu(re->hi); in vtd_get_root_entry()
715 return context->lo & VTD_CONTEXT_ENTRY_P; in vtd_ce_present()
726 ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : in vtd_get_context_entry_from_root()
729 if (s->root_scalable && index > UINT8_MAX / 2) { in vtd_get_context_entry_from_root()
731 addr = re->hi & VTD_ROOT_ENTRY_CTP; in vtd_get_context_entry_from_root()
733 addr = re->lo & VTD_ROOT_ENTRY_CTP; in vtd_get_context_entry_from_root()
739 return -VTD_FR_CONTEXT_TABLE_INV; in vtd_get_context_entry_from_root()
742 ce->lo = le64_to_cpu(ce->lo); in vtd_get_context_entry_from_root()
743 ce->hi = le64_to_cpu(ce->hi); in vtd_get_context_entry_from_root()
745 ce->val[2] = le64_to_cpu(ce->val[2]); in vtd_get_context_entry_from_root()
746 ce->val[3] = le64_to_cpu(ce->val[3]); in vtd_get_context_entry_from_root()
753 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; in vtd_ce_get_slpt_base()
777 pte = (uint64_t)-1; in vtd_get_pte()
790 ((1ULL << VTD_LEVEL_BITS) - 1); in vtd_iova_level_offset()
793 /* Check Capability Register to see if the @level of page-table is supported */
796 return VTD_CAP_SAGAW_MASK & s->cap & in vtd_is_sl_level_supported()
797 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); in vtd_is_sl_level_supported()
810 return !!(s->ecap & VTD_ECAP_FLTS); in vtd_pe_type_check()
812 return !!(s->ecap & VTD_ECAP_SLTS); in vtd_pe_type_check()
817 return !!(s->ecap & VTD_ECAP_PT); in vtd_pe_type_check()
826 return pdire->val & 1; in vtd_pdire_present()
845 return -VTD_FR_PASID_DIR_ACCESS_ERR; in vtd_get_pdire_from_pdir_table()
848 pdire->val = le64_to_cpu(pdire->val); in vtd_get_pdire_from_pdir_table()
855 return pe->val[0] & VTD_PASID_ENTRY_P; in vtd_pe_present()
872 return -VTD_FR_PASID_TABLE_ACCESS_ERR; in vtd_get_pe_in_pasid_leaf_table()
874 for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) { in vtd_get_pe_in_pasid_leaf_table()
875 pe->val[i] = le64_to_cpu(pe->val[i]); in vtd_get_pe_in_pasid_leaf_table()
880 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
886 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
891 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
906 dma_addr_t addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; in vtd_get_pe_from_pdire()
932 return -VTD_FR_PASID_DIR_ENTRY_P; in vtd_get_pe_from_pasid_table()
941 return -VTD_FR_PASID_ENTRY_P; in vtd_get_pe_from_pasid_table()
994 return -VTD_FR_PASID_DIR_ENTRY_P; in vtd_ce_get_pasid_fpd()
1013 /* Get the page-table level that hardware should use for the second-level
1014 * page-table walk from the Address Width field of context-entry.
1018 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); in vtd_ce_get_level()
1027 if (s->root_scalable) { in vtd_get_iova_level()
1029 if (s->flts) { in vtd_get_iova_level()
1041 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; in vtd_ce_get_agaw()
1050 if (s->root_scalable) { in vtd_get_iova_agaw()
1060 return ce->lo & VTD_CONTEXT_ENTRY_TT; in vtd_ce_get_type()
1072 if (!x86_iommu->dt_supported) { in vtd_ce_type_check()
1078 if (!x86_iommu->pt_supported) { in vtd_ce_type_check()
1106 * Check if @iova is above 2^X-1, where X is the minimum of MGAW in vtd_iova_sl_range_check()
1107 * in CAP_REG and AW in context-entry. in vtd_iova_sl_range_check()
1109 return !(iova & ~(vtd_iova_limit(s, ce, aw, pasid) - 1)); in vtd_iova_sl_range_check()
1118 if (s->root_scalable) { in vtd_get_iova_pgtbl_base()
1120 if (s->flts) { in vtd_get_iova_pgtbl_base()
1135 * We support only 3-level and 4-level page tables (see vtd_init() which
1136 * sets only VTD_CAP_SAGAW_39bit and maybe VTD_CAP_SAGAW_48bit bits in s->cap).
1147 * We should have caught a guest-mis-programmed level earlier, in vtd_slpte_nonzero_rsvd()
1186 return -VTD_FR_ADDR_BEYOND_MGAW; in vtd_iova_to_slpte()
1196 if (slpte == (uint64_t)-1) { in vtd_iova_to_slpte()
1201 /* Invalid programming of context-entry */ in vtd_iova_to_slpte()
1202 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_iova_to_slpte()
1204 return -VTD_FR_PAGING_ENTRY_INV; in vtd_iova_to_slpte()
1215 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; in vtd_iova_to_slpte()
1218 error_report_once("%s: detected splte reserve non-zero " in vtd_iova_to_slpte()
1222 return -VTD_FR_PAGING_ENTRY_RSVD; in vtd_iova_to_slpte()
1231 level--; in vtd_iova_to_slpte()
1245 * @as: VT-d address space of the device
1260 VTDAddressSpace *as = info->as; in vtd_page_walk_one()
1261 vtd_page_walk_hook hook_fn = info->hook_fn; in vtd_page_walk_one()
1262 void *private = info->private; in vtd_page_walk_one()
1263 IOMMUTLBEntry *entry = &event->entry; in vtd_page_walk_one()
1265 .iova = entry->iova, in vtd_page_walk_one()
1266 .size = entry->addr_mask, in vtd_page_walk_one()
1267 .translated_addr = entry->translated_addr, in vtd_page_walk_one()
1268 .perm = entry->perm, in vtd_page_walk_one()
1270 const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); in vtd_page_walk_one()
1272 if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { in vtd_page_walk_one()
1273 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); in vtd_page_walk_one()
1280 if (event->type == IOMMU_NOTIFIER_MAP) { in vtd_page_walk_one()
1284 trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, in vtd_page_walk_one()
1285 entry->translated_addr); in vtd_page_walk_one()
1302 IOMMUAccessFlags cache_perm = entry->perm; in vtd_page_walk_one()
1306 event->type = IOMMU_NOTIFIER_UNMAP; in vtd_page_walk_one()
1307 entry->perm = IOMMU_NONE; in vtd_page_walk_one()
1308 trace_vtd_page_walk_one(info->domain_id, in vtd_page_walk_one()
1309 entry->iova, in vtd_page_walk_one()
1310 entry->translated_addr, in vtd_page_walk_one()
1311 entry->addr_mask, in vtd_page_walk_one()
1312 entry->perm); in vtd_page_walk_one()
1318 iova_tree_remove(as->iova_tree, target); in vtd_page_walk_one()
1320 event->type = IOMMU_NOTIFIER_MAP; in vtd_page_walk_one()
1321 entry->perm = cache_perm; in vtd_page_walk_one()
1324 iova_tree_insert(as->iova_tree, &target); in vtd_page_walk_one()
1328 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); in vtd_page_walk_one()
1331 iova_tree_remove(as->iova_tree, target); in vtd_page_walk_one()
1334 trace_vtd_page_walk_one(info->domain_id, entry->iova, in vtd_page_walk_one()
1335 entry->translated_addr, entry->addr_mask, in vtd_page_walk_one()
1336 entry->perm); in vtd_page_walk_one()
1341 * vtd_page_walk_level - walk over specific level for IOVA range
1374 if (slpte == (uint64_t)-1) { in vtd_page_walk_level()
1400 ret = vtd_page_walk_level(vtd_get_pte_addr(slpte, info->aw), in vtd_page_walk_level()
1401 iova, MIN(iova_next, end), level - 1, in vtd_page_walk_level()
1417 event.entry.translated_addr = vtd_get_pte_addr(slpte, info->aw); in vtd_page_walk_level()
1435 * vtd_page_walk - walk specific IOVA range, and call the hook
1451 if (!vtd_iova_sl_range_check(s, start, ce, info->aw, pasid)) { in vtd_page_walk()
1452 return -VTD_FR_ADDR_BEYOND_MGAW; in vtd_page_walk()
1455 if (!vtd_iova_sl_range_check(s, end, ce, info->aw, pasid)) { in vtd_page_walk()
1457 end = vtd_iova_limit(s, ce, info->aw, pasid); in vtd_page_walk()
1467 if (!s->root_scalable && in vtd_root_entry_rsvd_bits_check()
1468 (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) in vtd_root_entry_rsvd_bits_check()
1472 if (s->root_scalable && in vtd_root_entry_rsvd_bits_check()
1473 ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || in vtd_root_entry_rsvd_bits_check()
1474 (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) in vtd_root_entry_rsvd_bits_check()
1482 __func__, re->hi, re->lo); in vtd_root_entry_rsvd_bits_check()
1483 return -VTD_FR_ROOT_ENTRY_RSVD; in vtd_root_entry_rsvd_bits_check()
1489 if (!s->root_scalable && in vtd_context_entry_rsvd_bits_check()
1490 (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || in vtd_context_entry_rsvd_bits_check()
1491 ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { in vtd_context_entry_rsvd_bits_check()
1494 __func__, ce->hi, ce->lo); in vtd_context_entry_rsvd_bits_check()
1495 return -VTD_FR_CONTEXT_ENTRY_RSVD; in vtd_context_entry_rsvd_bits_check()
1498 if (s->root_scalable && in vtd_context_entry_rsvd_bits_check()
1499 (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || in vtd_context_entry_rsvd_bits_check()
1500 ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || in vtd_context_entry_rsvd_bits_check()
1501 ce->val[2] || in vtd_context_entry_rsvd_bits_check()
1502 ce->val[3])) { in vtd_context_entry_rsvd_bits_check()
1507 __func__, ce->val[3], ce->val[2], in vtd_context_entry_rsvd_bits_check()
1508 ce->val[1], ce->val[0]); in vtd_context_entry_rsvd_bits_check()
1509 return -VTD_FR_CONTEXT_ENTRY_RSVD; in vtd_context_entry_rsvd_bits_check()
1528 /* Map a device to its corresponding domain (context-entry) */
1542 /* Not error - it's okay we don't have root entry. */ in vtd_dev_to_context_entry()
1544 return -VTD_FR_ROOT_ENTRY_P; in vtd_dev_to_context_entry()
1558 /* Not error - it's okay we don't have context entry. */ in vtd_dev_to_context_entry()
1560 return -VTD_FR_CONTEXT_ENTRY_P; in vtd_dev_to_context_entry()
1568 /* Check if the programming of context-entry is valid */ in vtd_dev_to_context_entry()
1569 if (!s->root_scalable && in vtd_dev_to_context_entry()
1573 __func__, ce->hi, ce->lo, in vtd_dev_to_context_entry()
1575 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_dev_to_context_entry()
1578 if (!s->root_scalable) { in vtd_dev_to_context_entry()
1582 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_dev_to_context_entry()
1586 * Check if the programming of context-entry.rid2pasid in vtd_dev_to_context_entry()
1613 if (s->root_scalable) { in vtd_get_domain_id()
1618 return VTD_CONTEXT_ENTRY_DID(ce->hi); in vtd_get_domain_id()
1625 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_sync_shadow_page_table_range()
1628 .private = (void *)&vtd_as->iommu, in vtd_sync_shadow_page_table_range()
1630 .aw = s->aw_bits, in vtd_sync_shadow_page_table_range()
1632 .domain_id = vtd_get_domain_id(s, ce, vtd_as->pasid), in vtd_sync_shadow_page_table_range()
1635 return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid); in vtd_sync_shadow_page_table_range()
1646 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_sync()
1652 ret = vtd_dev_to_context_entry(vtd_as->iommu_state, in vtd_address_space_sync()
1653 pci_bus_num(vtd_as->bus), in vtd_address_space_sync()
1654 vtd_as->devfn, &ce); in vtd_address_space_sync()
1656 if (ret == -VTD_FR_CONTEXT_ENTRY_P) { in vtd_address_space_sync()
1666 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_sync()
1680 * 1st-level translation or 2nd-level translation, it depends
1689 if (s->root_scalable) { in vtd_dev_pt_enabled()
1712 s = as->iommu_state; in vtd_as_pt_enabled()
1713 if (vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn, in vtd_as_pt_enabled()
1724 return vtd_dev_pt_enabled(s, &ce, as->pasid); in vtd_as_pt_enabled()
1736 use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as); in vtd_switch_address_space()
1737 pt = as->iommu_state->dmar_enabled && vtd_as_pt_enabled(as); in vtd_switch_address_space()
1739 trace_vtd_switch_address_space(pci_bus_num(as->bus), in vtd_switch_address_space()
1740 VTD_PCI_SLOT(as->devfn), in vtd_switch_address_space()
1741 VTD_PCI_FUNC(as->devfn), in vtd_switch_address_space()
1755 memory_region_set_enabled(&as->nodmar, false); in vtd_switch_address_space()
1756 memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); in vtd_switch_address_space()
1758 * vt-d spec v3.4 3.14: in vtd_switch_address_space()
1761 * Requests-with-PASID with input address in range 0xFEEx_xxxx in vtd_switch_address_space()
1762 * are translated normally like any other request-with-PASID in vtd_switch_address_space()
1763 * through DMA-remapping hardware. in vtd_switch_address_space()
1768 if (as->pasid != PCI_NO_PASID) { in vtd_switch_address_space()
1769 memory_region_set_enabled(&as->iommu_ir, false); in vtd_switch_address_space()
1771 memory_region_set_enabled(&as->iommu_ir, true); in vtd_switch_address_space()
1774 memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); in vtd_switch_address_space()
1775 memory_region_set_enabled(&as->nodmar, true); in vtd_switch_address_space()
1779 * vtd-spec v3.4 3.14: in vtd_switch_address_space()
1782 * Requests-with-PASID with input address in range 0xFEEx_xxxx are in vtd_switch_address_space()
1783 * translated normally like any other request-with-PASID through in vtd_switch_address_space()
1784 * DMA-remapping hardware. However, if such a request is processed in vtd_switch_address_space()
1785 * using pass-through translation, it will be blocked as described in vtd_switch_address_space()
1788 * Software must not program paging-structure entries to remap any in vtd_switch_address_space()
1798 if (pt && as->pasid != PCI_NO_PASID) { in vtd_switch_address_space()
1799 memory_region_set_enabled(&as->iommu_ir_fault, true); in vtd_switch_address_space()
1801 memory_region_set_enabled(&as->iommu_ir_fault, false); in vtd_switch_address_space()
1816 g_hash_table_iter_init(&iter, s->vtd_address_spaces); in vtd_switch_address_space_all()
1855 * only if the FPD field in the context-entry used to process the faulting
1873 uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn); in vtd_find_as_by_sid_and_pasid()
1875 return (as_key->pasid == target->pasid) && (sid == target->sid); in vtd_find_as_by_sid_and_pasid()
1887 return g_hash_table_find(s->vtd_address_spaces, in vtd_get_as_by_sid_and_pasid()
1920 * We support only 4-level page tables.
1931 * We should have caught a guest-mis-programmed level earlier, in vtd_flpte_nonzero_rsvd()
1961 uint64_t iova_limit = vtd_iova_limit(s, ce, s->aw_bits, pasid); in vtd_iova_fl_check_canonical()
1962 uint64_t upper_bits_mask = ~(iova_limit - 1); in vtd_iova_fl_check_canonical()
2005 return -VTD_FR_FS_NON_CANONICAL; in vtd_iova_to_flpte()
2012 if (flpte == (uint64_t)-1) { in vtd_iova_to_flpte()
2014 /* Invalid programming of pasid-entry */ in vtd_iova_to_flpte()
2015 return -VTD_FR_PASID_ENTRY_FSPTPTR_INV; in vtd_iova_to_flpte()
2017 return -VTD_FR_FS_PAGING_ENTRY_INV; in vtd_iova_to_flpte()
2024 return -VTD_FR_FS_PAGING_ENTRY_P; in vtd_iova_to_flpte()
2031 return -VTD_FR_FS_PAGING_ENTRY_US; in vtd_iova_to_flpte()
2037 return -VTD_FR_SM_WRITE; in vtd_iova_to_flpte()
2040 error_report_once("%s: detected flpte reserved non-zero " in vtd_iova_to_flpte()
2044 return -VTD_FR_FS_PAGING_ENTRY_RSVD; in vtd_iova_to_flpte()
2052 return -VTD_FR_FS_BIT_UPDATE_FAILED; in vtd_iova_to_flpte()
2062 level--; in vtd_iova_to_flpte()
2082 /* Map dev to context-entry then do a paging-structures walk to do a iommu
2098 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_do_iommu_translate()
2103 uint32_t level, pasid = vtd_as->pasid; in vtd_do_iommu_translate()
2110 bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; in vtd_do_iommu_translate()
2122 cc_entry = &vtd_as->context_cache_entry; in vtd_do_iommu_translate()
2128 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, in vtd_do_iommu_translate()
2129 iotlb_entry->domain_id); in vtd_do_iommu_translate()
2130 pte = iotlb_entry->pte; in vtd_do_iommu_translate()
2131 access_flags = iotlb_entry->access_flags; in vtd_do_iommu_translate()
2132 page_mask = iotlb_entry->mask; in vtd_do_iommu_translate()
2137 /* Try to fetch context-entry from cache first */ in vtd_do_iommu_translate()
2138 if (cc_entry->context_cache_gen == s->context_cache_gen) { in vtd_do_iommu_translate()
2139 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, in vtd_do_iommu_translate()
2140 cc_entry->context_entry.lo, in vtd_do_iommu_translate()
2141 cc_entry->context_cache_gen); in vtd_do_iommu_translate()
2142 ce = cc_entry->context_entry; in vtd_do_iommu_translate()
2144 if (!is_fpd_set && s->root_scalable) { in vtd_do_iommu_translate()
2147 vtd_report_fault(s, -ret_fr, is_fpd_set, in vtd_do_iommu_translate()
2156 if (!ret_fr && !is_fpd_set && s->root_scalable) { in vtd_do_iommu_translate()
2160 vtd_report_fault(s, -ret_fr, is_fpd_set, in vtd_do_iommu_translate()
2165 /* Update context-cache */ in vtd_do_iommu_translate()
2167 cc_entry->context_cache_gen, in vtd_do_iommu_translate()
2168 s->context_cache_gen); in vtd_do_iommu_translate()
2169 cc_entry->context_entry = ce; in vtd_do_iommu_translate()
2170 cc_entry->context_cache_gen = s->context_cache_gen; in vtd_do_iommu_translate()
2178 * We don't need to translate for pass-through context entries. in vtd_do_iommu_translate()
2182 entry->iova = addr & VTD_PAGE_MASK_4K; in vtd_do_iommu_translate()
2183 entry->translated_addr = entry->iova; in vtd_do_iommu_translate()
2184 entry->addr_mask = ~VTD_PAGE_MASK_4K; in vtd_do_iommu_translate()
2185 entry->perm = IOMMU_RW; in vtd_do_iommu_translate()
2186 trace_vtd_translate_pt(source_id, entry->iova); in vtd_do_iommu_translate()
2189 * When this happens, it means firstly caching-mode is not in vtd_do_iommu_translate()
2206 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, in vtd_do_iommu_translate()
2207 iotlb_entry->domain_id); in vtd_do_iommu_translate()
2208 pte = iotlb_entry->pte; in vtd_do_iommu_translate()
2209 access_flags = iotlb_entry->access_flags; in vtd_do_iommu_translate()
2210 page_mask = iotlb_entry->mask; in vtd_do_iommu_translate()
2215 if (s->flts && s->root_scalable) { in vtd_do_iommu_translate()
2217 &reads, &writes, s->aw_bits, pasid); in vtd_do_iommu_translate()
2221 &reads, &writes, s->aw_bits, pasid); in vtd_do_iommu_translate()
2225 xlat = vtd_get_pte_addr(pte, s->aw_bits); in vtd_do_iommu_translate()
2229 * Per VT-d spec 4.1 section 3.15: Untranslated requests and translation in vtd_do_iommu_translate()
2234 xlat + size - 1 >= VTD_INTERRUPT_ADDR_FIRST)) { in vtd_do_iommu_translate()
2242 ret_fr = s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR : in vtd_do_iommu_translate()
2243 -VTD_FR_INTERRUPT_ADDR; in vtd_do_iommu_translate()
2248 vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, in vtd_do_iommu_translate()
2259 entry->iova = addr & page_mask; in vtd_do_iommu_translate()
2260 entry->translated_addr = vtd_get_pte_addr(pte, s->aw_bits) & page_mask; in vtd_do_iommu_translate()
2261 entry->addr_mask = ~page_mask; in vtd_do_iommu_translate()
2262 entry->perm = access_flags; in vtd_do_iommu_translate()
2267 entry->iova = 0; in vtd_do_iommu_translate()
2268 entry->translated_addr = 0; in vtd_do_iommu_translate()
2269 entry->addr_mask = 0; in vtd_do_iommu_translate()
2270 entry->perm = IOMMU_NONE; in vtd_do_iommu_translate()
2276 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); in vtd_root_table_setup()
2277 s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); in vtd_root_table_setup()
2281 trace_vtd_reg_dmar_root(s->root, s->root_scalable); in vtd_root_table_setup()
2294 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); in vtd_interrupt_remap_table_setup()
2295 s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); in vtd_interrupt_remap_table_setup()
2296 s->intr_eime = value & VTD_IRTA_EIME; in vtd_interrupt_remap_table_setup()
2301 trace_vtd_reg_ir_root(s->intr_root, s->intr_size); in vtd_interrupt_remap_table_setup()
2308 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_iommu_replay_all()
2318 s->context_cache_gen++; in vtd_context_global_invalidate()
2319 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { in vtd_context_global_invalidate()
2325 * From VT-d spec 6.5.2.1, a global context entry invalidation in vtd_context_global_invalidate()
2329 * VT-d emulation codes. in vtd_context_global_invalidate()
2334 /* Do a context-cache device-selective invalidation.
2369 g_hash_table_iter_init(&as_it, s->vtd_address_spaces); in vtd_context_device_invalidate()
2371 if ((pci_bus_num(vtd_as->bus) == bus_n) && in vtd_context_device_invalidate()
2372 (vtd_as->devfn & mask) == (devfn & mask)) { in vtd_context_device_invalidate()
2373 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(vtd_as->devfn), in vtd_context_device_invalidate()
2374 VTD_PCI_FUNC(vtd_as->devfn)); in vtd_context_device_invalidate()
2376 vtd_as->context_cache_entry.context_cache_gen = 0; in vtd_context_device_invalidate()
2387 * notifier registered - the IOMMU notification in vtd_context_device_invalidate()
2396 /* Context-cache invalidation
2441 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, in vtd_iotlb_domain_invalidate()
2445 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_iotlb_domain_invalidate()
2446 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_iotlb_domain_invalidate()
2447 vtd_as->devfn, &ce) && in vtd_iotlb_domain_invalidate()
2448 domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_iotlb_domain_invalidate()
2468 QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { in vtd_iotlb_page_invalidate_notify()
2469 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_iotlb_page_invalidate_notify()
2470 vtd_as->devfn, &ce); in vtd_iotlb_page_invalidate_notify()
2471 if (!ret && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_iotlb_page_invalidate_notify()
2474 if (s->root_scalable) { in vtd_iotlb_page_invalidate_notify()
2479 * In legacy mode, vtd_as->pasid == pasid is always true. in vtd_iotlb_page_invalidate_notify()
2484 if (!(vtd_as->pasid == pasid || in vtd_iotlb_page_invalidate_notify()
2485 (vtd_as->pasid == PCI_NO_PASID && pasid == rid2pasid))) { in vtd_iotlb_page_invalidate_notify()
2491 * When stage-1 translation is off, as long as we have MAP in vtd_iotlb_page_invalidate_notify()
2497 if (!s->flts || !s->root_scalable) { in vtd_iotlb_page_invalidate_notify()
2502 * For UNMAP-only notifiers, we don't need to walk the in vtd_iotlb_page_invalidate_notify()
2512 .addr_mask = size - 1, in vtd_iotlb_page_invalidate_notify()
2516 memory_region_notify_iommu(&vtd_as->iommu, 0, event); in vtd_iotlb_page_invalidate_notify()
2532 info.mask = ~((1 << am) - 1); in vtd_iotlb_page_invalidate()
2534 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); in vtd_iotlb_page_invalidate()
2590 return s->qi_enabled && (s->iq_tail == s->iq_head) && in vtd_queued_inv_disable_check()
2591 (s->iq_last_desc_type == VTD_INV_DESC_WAIT); in vtd_queued_inv_disable_check()
2601 s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); in vtd_handle_gcmd_qie()
2603 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); in vtd_handle_gcmd_qie()
2604 s->qi_enabled = true; in vtd_handle_gcmd_qie()
2605 trace_vtd_inv_qi_setup(s->iq, s->iq_size); in vtd_handle_gcmd_qie()
2606 /* Ok - report back to driver */ in vtd_handle_gcmd_qie()
2609 if (s->iq_tail != 0) { in vtd_handle_gcmd_qie()
2615 trace_vtd_warn_invalid_qi_tail(s->iq_tail); in vtd_handle_gcmd_qie()
2624 s->iq_head = 0; in vtd_handle_gcmd_qie()
2625 s->qi_enabled = false; in vtd_handle_gcmd_qie()
2626 /* Ok - report back to driver */ in vtd_handle_gcmd_qie()
2632 s->iq_head, s->iq_tail, s->iq_last_desc_type); in vtd_handle_gcmd_qie()
2641 /* Ok - report back to driver */ in vtd_handle_gcmd_srtp()
2651 /* Ok - report back to driver */ in vtd_handle_gcmd_sirtp()
2658 if (s->dmar_enabled == en) { in vtd_handle_gcmd_te()
2665 s->dmar_enabled = true; in vtd_handle_gcmd_te()
2666 /* Ok - report back to driver */ in vtd_handle_gcmd_te()
2669 s->dmar_enabled = false; in vtd_handle_gcmd_te()
2672 s->next_frcd_reg = 0; in vtd_handle_gcmd_te()
2673 /* Ok - report back to driver */ in vtd_handle_gcmd_te()
2687 s->intr_enabled = true; in vtd_handle_gcmd_ire()
2688 /* Ok - report back to driver */ in vtd_handle_gcmd_ire()
2691 s->intr_enabled = false; in vtd_handle_gcmd_ire()
2692 /* Ok - report back to driver */ in vtd_handle_gcmd_ire()
2706 if ((changed & VTD_GCMD_TE) && s->dma_translation) { in vtd_handle_gcmd_write()
2711 /* Set/update the root-table pointer */ in vtd_handle_gcmd_write()
2719 /* Set/update the interrupt remapping root-table pointer */ in vtd_handle_gcmd_write()
2735 /* Context-cache invalidation request */ in vtd_handle_ccmd_write()
2737 if (s->qi_enabled) { in vtd_handle_ccmd_write()
2739 "should not use register-based invalidation"); in vtd_handle_ccmd_write()
2758 if (s->qi_enabled) { in vtd_handle_iotlb_write()
2760 "should not use register-based invalidation"); in vtd_handle_iotlb_write()
2775 dma_addr_t base_addr = s->iq; in vtd_get_inv_desc()
2776 uint32_t offset = s->iq_head; in vtd_get_inv_desc()
2777 uint32_t dw = s->iq_dw ? 32 : 16; in vtd_get_inv_desc()
2785 inv_desc->lo = le64_to_cpu(inv_desc->lo); in vtd_get_inv_desc()
2786 inv_desc->hi = le64_to_cpu(inv_desc->hi); in vtd_get_inv_desc()
2788 inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); in vtd_get_inv_desc()
2789 inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); in vtd_get_inv_desc()
2800 if (s->iq_dw) { in vtd_inv_desc_reserved_check()
2801 if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] || in vtd_inv_desc_reserved_check()
2802 inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) { in vtd_inv_desc_reserved_check()
2806 func_name, desc_type, inv_desc->val[3], in vtd_inv_desc_reserved_check()
2807 inv_desc->val[2], inv_desc->val[1], in vtd_inv_desc_reserved_check()
2808 inv_desc->val[0]); in vtd_inv_desc_reserved_check()
2813 error_report("%s: 256-bit %s desc in 128-bit invalidation queue", in vtd_inv_desc_reserved_check()
2818 if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) { in vtd_inv_desc_reserved_check()
2821 inv_desc->hi, inv_desc->lo); in vtd_inv_desc_reserved_check()
2839 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { in vtd_process_wait_desc()
2841 uint32_t status_data = (uint32_t)(inv_desc->lo >> in vtd_process_wait_desc()
2844 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); in vtd_process_wait_desc()
2847 dma_addr_t status_addr = inv_desc->hi; in vtd_process_wait_desc()
2853 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); in vtd_process_wait_desc()
2856 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { in vtd_process_wait_desc()
2861 " (unknown type)", __func__, inv_desc->hi, in vtd_process_wait_desc()
2862 inv_desc->lo); in vtd_process_wait_desc()
2880 switch (inv_desc->lo & VTD_INV_DESC_CC_G) { in vtd_process_context_cache_desc()
2883 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); in vtd_process_context_cache_desc()
2890 sid = VTD_INV_DESC_CC_SID(inv_desc->lo); in vtd_process_context_cache_desc()
2891 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); in vtd_process_context_cache_desc()
2897 " (invalid type)", __func__, inv_desc->hi, in vtd_process_context_cache_desc()
2898 inv_desc->lo); in vtd_process_context_cache_desc()
2917 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { in vtd_process_iotlb_desc()
2923 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); in vtd_process_iotlb_desc()
2928 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); in vtd_process_iotlb_desc()
2929 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); in vtd_process_iotlb_desc()
2930 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); in vtd_process_iotlb_desc()
2934 __func__, inv_desc->hi, inv_desc->lo, in vtd_process_iotlb_desc()
2944 __func__, inv_desc->hi, inv_desc->lo, in vtd_process_iotlb_desc()
2945 inv_desc->lo & VTD_INV_DESC_IOTLB_G); in vtd_process_iotlb_desc()
2957 return ((entry->domain_id == info->domain_id) && in vtd_hash_remove_by_pasid()
2958 (entry->pasid == info->pasid)); in vtd_hash_remove_by_pasid()
2972 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid, in vtd_piotlb_pasid_invalidate()
2976 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_piotlb_pasid_invalidate()
2977 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_piotlb_pasid_invalidate()
2978 vtd_as->devfn, &ce) && in vtd_piotlb_pasid_invalidate()
2979 domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_piotlb_pasid_invalidate()
2982 if ((vtd_as->pasid != PCI_NO_PASID || pasid != rid2pasid) && in vtd_piotlb_pasid_invalidate()
2983 vtd_as->pasid != pasid) { in vtd_piotlb_pasid_invalidate()
2987 if (!s->flts || !vtd_as_has_map_notifier(vtd_as)) { in vtd_piotlb_pasid_invalidate()
3002 info.mask = ~((1 << am) - 1); in vtd_piotlb_page_invalidate()
3005 g_hash_table_foreach_remove(s->iotlb, in vtd_piotlb_page_invalidate()
3028 domain_id = VTD_INV_DESC_PIOTLB_DID(inv_desc->val[0]); in vtd_process_piotlb_desc()
3029 pasid = VTD_INV_DESC_PIOTLB_PASID(inv_desc->val[0]); in vtd_process_piotlb_desc()
3030 switch (inv_desc->val[0] & VTD_INV_DESC_PIOTLB_G) { in vtd_process_piotlb_desc()
3036 am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]); in vtd_process_piotlb_desc()
3037 addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]); in vtd_process_piotlb_desc()
3044 __func__, inv_desc->val[1], inv_desc->val[0], in vtd_process_piotlb_desc()
3045 inv_desc->val[0] & VTD_INV_DESC_IOTLB_G); in vtd_process_piotlb_desc()
3062 trace_vtd_inv_desc_iec(inv_desc->iec.granularity, in vtd_process_inv_iec_desc()
3063 inv_desc->iec.index, in vtd_process_inv_iec_desc()
3064 inv_desc->iec.index_mask); in vtd_process_inv_iec_desc()
3066 vtd_iec_notify_all(s, !inv_desc->iec.granularity, in vtd_process_inv_iec_desc()
3067 inv_desc->iec.index, in vtd_process_inv_iec_desc()
3068 inv_desc->iec.index_mask); in vtd_process_inv_iec_desc()
3090 addr &= ~(sz - 1); in do_invalidate_device_tlb()
3096 event.entry.target_as = &vtd_dev_as->as; in do_invalidate_device_tlb()
3097 event.entry.addr_mask = sz - 1; in do_invalidate_device_tlb()
3101 memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event); in do_invalidate_device_tlb()
3122 global = VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(inv_desc->hi); in vtd_process_device_piotlb_desc()
3123 size = VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(inv_desc->hi); in vtd_process_device_piotlb_desc()
3124 addr = VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(inv_desc->hi); in vtd_process_device_piotlb_desc()
3125 sid = VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(inv_desc->lo); in vtd_process_device_piotlb_desc()
3127 QLIST_FOREACH(vtd_dev_as, &s->vtd_as_with_notifiers, next) { in vtd_process_device_piotlb_desc()
3128 if ((vtd_dev_as->pasid != PCI_NO_PASID) && in vtd_process_device_piotlb_desc()
3129 (PCI_BUILD_BDF(pci_bus_num(vtd_dev_as->bus), in vtd_process_device_piotlb_desc()
3130 vtd_dev_as->devfn) == sid)) { in vtd_process_device_piotlb_desc()
3135 pasid = VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(inv_desc->lo); in vtd_process_device_piotlb_desc()
3159 __func__, "dev-iotlb inv")) { in vtd_process_device_iotlb_desc()
3163 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); in vtd_process_device_iotlb_desc()
3164 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); in vtd_process_device_iotlb_desc()
3165 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); in vtd_process_device_iotlb_desc()
3187 trace_vtd_inv_qi_head(s->iq_head); in vtd_process_inv_desc()
3189 s->iq_last_desc_type = VTD_INV_DESC_NONE; in vtd_process_inv_desc()
3195 s->iq_last_desc_type = desc_type; in vtd_process_inv_desc()
3199 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); in vtd_process_inv_desc()
3213 trace_vtd_inv_desc("p-iotlb", inv_desc.val[1], inv_desc.val[0]); in vtd_process_inv_desc()
3234 trace_vtd_inv_desc("device-piotlb", inv_desc.hi, inv_desc.lo); in vtd_process_inv_desc()
3253 if (s->scalable_mode) { in vtd_process_inv_desc()
3263 s->iq_head++; in vtd_process_inv_desc()
3264 if (s->iq_head == s->iq_size) { in vtd_process_inv_desc()
3265 s->iq_head = 0; in vtd_process_inv_desc()
3275 /* Refer to 10.4.23 of VT-d spec 3.0 */ in vtd_fetch_inv_desc()
3276 qi_shift = s->iq_dw ? VTD_IQH_QH_SHIFT_5 : VTD_IQH_QH_SHIFT_4; in vtd_fetch_inv_desc()
3280 if (s->iq_tail >= s->iq_size) { in vtd_fetch_inv_desc()
3284 __func__, s->iq_tail, s->iq_size); in vtd_fetch_inv_desc()
3288 while (s->iq_head != s->iq_tail) { in vtd_fetch_inv_desc()
3296 (((uint64_t)(s->iq_head)) << qi_shift) & in vtd_fetch_inv_desc()
3306 if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { in vtd_handle_iqt_write()
3312 s->iq_tail = VTD_IQT_QT(s->iq_dw, val); in vtd_handle_iqt_write()
3313 trace_vtd_inv_qi_tail(s->iq_tail); in vtd_handle_iqt_write()
3315 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { in vtd_handle_iqt_write()
3391 return (uint64_t)-1; in vtd_mem_read()
3395 /* Root Table Address Register, 64-bit */ in vtd_mem_read()
3399 val = val & ((1ULL << 32) - 1); in vtd_mem_read()
3408 /* Invalidation Queue Address Register, 64-bit */ in vtd_mem_read()
3410 val = s->iq | in vtd_mem_read()
3414 val = val & ((1ULL << 32) - 1); in vtd_mem_read()
3420 val = s->iq >> 32; in vtd_mem_read()
3448 /* Global Command Register, 32-bit */ in vtd_mem_write()
3454 /* Context Command Register, 64-bit */ in vtd_mem_write()
3470 /* IOTLB Invalidation Register, 64-bit */ in vtd_mem_write()
3486 /* Invalidate Address Register, 64-bit */ in vtd_mem_write()
3500 /* Fault Status Register, 32-bit */ in vtd_mem_write()
3507 /* Fault Event Control Register, 32-bit */ in vtd_mem_write()
3514 /* Fault Event Data Register, 32-bit */ in vtd_mem_write()
3520 /* Fault Event Address Register, 32-bit */ in vtd_mem_write()
3526 * While the register is 32-bit only, some guests (Xen...) write to in vtd_mem_write()
3527 * it with 64-bit. in vtd_mem_write()
3533 /* Fault Event Upper Address Register, 32-bit */ in vtd_mem_write()
3539 /* Protected Memory Enable Register, 32-bit */ in vtd_mem_write()
3545 /* Root Table Address Register, 64-bit */ in vtd_mem_write()
3559 /* Invalidation Queue Tail Register, 64-bit */ in vtd_mem_write()
3575 /* Invalidation Queue Address Register, 64-bit */ in vtd_mem_write()
3590 /* Invalidation Completion Status Register, 32-bit */ in vtd_mem_write()
3597 /* Invalidation Event Control Register, 32-bit */ in vtd_mem_write()
3604 /* Invalidation Event Data Register, 32-bit */ in vtd_mem_write()
3610 /* Invalidation Event Address Register, 32-bit */ in vtd_mem_write()
3616 /* Invalidation Event Upper Address Register, 32-bit */ in vtd_mem_write()
3622 /* Fault Recording Registers, 128-bit */ in vtd_mem_write()
3679 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_translate()
3686 if (likely(s->dmar_enabled)) { in vtd_iommu_translate()
3687 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, in vtd_iommu_translate()
3690 /* DMAR disabled, passthrough, use 4k-page*/ in vtd_iommu_translate()
3699 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), in vtd_iommu_translate()
3700 VTD_PCI_SLOT(vtd_as->devfn), in vtd_iommu_translate()
3701 VTD_PCI_FUNC(vtd_as->devfn), in vtd_iommu_translate()
3707 __func__, pci_bus_num(vtd_as->bus), in vtd_iommu_translate()
3708 VTD_PCI_SLOT(vtd_as->devfn), in vtd_iommu_translate()
3709 VTD_PCI_FUNC(vtd_as->devfn), in vtd_iommu_translate()
3722 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_notify_flag_changed()
3726 if (s->snoop_control) { in vtd_iommu_notify_flag_changed()
3729 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3731 if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { in vtd_iommu_notify_flag_changed()
3734 pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), in vtd_iommu_notify_flag_changed()
3735 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_notify_flag_changed()
3736 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3738 if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { in vtd_iommu_notify_flag_changed()
3741 pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), in vtd_iommu_notify_flag_changed()
3742 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_notify_flag_changed()
3743 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3746 /* Update per-address-space notifier flags */ in vtd_iommu_notify_flag_changed()
3747 vtd_as->notifier_flags = new; in vtd_iommu_notify_flag_changed()
3750 QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); in vtd_iommu_notify_flag_changed()
3783 .name = "iommu-intel",
3799 VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
3826 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
3827 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits,
3829 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
3830 DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE),
3831 DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, flts, FALSE),
3832 DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
3833 DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false),
3834 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
3835 DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true),
3836 DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false),
3851 if (index >= iommu->intr_size) { in vtd_irte_get()
3860 addr = iommu->intr_root + index * sizeof(*entry); in vtd_irte_get()
3871 entry->data[0] = le64_to_cpu(entry->data[0]); in vtd_irte_get()
3872 entry->data[1] = le64_to_cpu(entry->data[1]); in vtd_irte_get()
3874 trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]); in vtd_irte_get()
3882 if (entry->irte.fault_disable) { in vtd_irte_get()
3886 if (!entry->irte.present) { in vtd_irte_get()
3887 error_report_once("%s: detected non-present IRTE " in vtd_irte_get()
3888 "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", in vtd_irte_get()
3889 __func__, index, entry->data[1], entry->data[0]); in vtd_irte_get()
3896 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || in vtd_irte_get()
3897 entry->irte.__reserved_2) { in vtd_irte_get()
3898 error_report_once("%s: detected non-zero reserved IRTE " in vtd_irte_get()
3899 "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", in vtd_irte_get()
3900 __func__, index, entry->data[1], entry->data[0]); in vtd_irte_get()
3909 source_id = entry->irte.source_id; in vtd_irte_get()
3910 switch (entry->irte.sid_vtype) { in vtd_irte_get()
3915 mask = vtd_svt_mask[entry->irte.sid_q]; in vtd_irte_get()
3945 index, entry->irte.sid_vtype); in vtd_irte_get()
3967 irq->trigger_mode = irte.irte.trigger_mode; in vtd_remap_irq_get()
3968 irq->vector = irte.irte.vector; in vtd_remap_irq_get()
3969 irq->delivery_mode = irte.irte.delivery_mode; in vtd_remap_irq_get()
3970 irq->dest = irte.irte.dest_id; in vtd_remap_irq_get()
3971 if (!iommu->intr_eime) { in vtd_remap_irq_get()
3974 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> in vtd_remap_irq_get()
3977 irq->dest_mode = irte.irte.dest_mode; in vtd_remap_irq_get()
3978 irq->redir_hint = irte.irte.redir_hint; in vtd_remap_irq_get()
3980 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, in vtd_remap_irq_get()
3981 irq->delivery_mode, irq->dest, irq->dest_mode); in vtd_remap_irq_get()
3986 /* Interrupt remapping for MSI/MSI-X entry */
3998 trace_vtd_ir_remap_msi_req(origin->address, origin->data); in vtd_interrupt_remap_msi()
4000 if (!iommu || !iommu->intr_enabled) { in vtd_interrupt_remap_msi()
4005 if (origin->address & VTD_MSI_ADDR_HI_MASK) { in vtd_interrupt_remap_msi()
4006 error_report_once("%s: MSI address high 32 bits non-zero detected: " in vtd_interrupt_remap_msi()
4007 "address=0x%" PRIx64, __func__, origin->address); in vtd_interrupt_remap_msi()
4011 return -EINVAL; in vtd_interrupt_remap_msi()
4014 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; in vtd_interrupt_remap_msi()
4021 return -EINVAL; in vtd_interrupt_remap_msi()
4036 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ in vtd_interrupt_remap_msi()
4037 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; in vtd_interrupt_remap_msi()
4041 return -EINVAL; in vtd_interrupt_remap_msi()
4046 if (origin->data & VTD_IR_MSI_DATA_RESERVED) { in vtd_interrupt_remap_msi()
4050 __func__, sid, origin->address, origin->data); in vtd_interrupt_remap_msi()
4054 return -EINVAL; in vtd_interrupt_remap_msi()
4057 uint8_t vector = origin->data & 0xff; in vtd_interrupt_remap_msi()
4058 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; in vtd_interrupt_remap_msi()
4062 * (see vt-d spec 5.1.5.1). */ in vtd_interrupt_remap_msi()
4068 * (see vt-d spec 5.1.5.1). */ in vtd_interrupt_remap_msi()
4085 trace_vtd_ir_remap_msi(origin->address, origin->data, in vtd_interrupt_remap_msi()
4086 translated->address, translated->data); in vtd_interrupt_remap_msi()
4126 apic_get_class(NULL)->send_msi(&to); in vtd_mem_ir_write()
4148 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_report_ir_illegal_access()
4149 uint8_t bus_n = pci_bus_num(vtd_as->bus); in vtd_report_ir_illegal_access()
4150 uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn); in vtd_report_ir_illegal_access()
4154 assert(vtd_as->pasid != PCI_NO_PASID); in vtd_report_ir_illegal_access()
4157 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { in vtd_report_ir_illegal_access()
4159 if (!is_fpd_set && s->root_scalable) { in vtd_report_ir_illegal_access()
4160 vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid); in vtd_report_ir_illegal_access()
4166 true, vtd_as->pasid); in vtd_report_ir_illegal_access()
4216 vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); in vtd_find_add_as()
4220 new_key->bus = bus; in vtd_find_add_as()
4221 new_key->devfn = devfn; in vtd_find_add_as()
4222 new_key->pasid = pasid; in vtd_find_add_as()
4225 snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), in vtd_find_add_as()
4228 snprintf(name, sizeof(name), "vtd-%02x.%x-pasid-%x", PCI_SLOT(devfn), in vtd_find_add_as()
4234 vtd_dev_as->bus = bus; in vtd_find_add_as()
4235 vtd_dev_as->devfn = (uint8_t)devfn; in vtd_find_add_as()
4236 vtd_dev_as->pasid = pasid; in vtd_find_add_as()
4237 vtd_dev_as->iommu_state = s; in vtd_find_add_as()
4238 vtd_dev_as->context_cache_entry.context_cache_gen = 0; in vtd_find_add_as()
4239 vtd_dev_as->iova_tree = iova_tree_new(); in vtd_find_add_as()
4241 memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); in vtd_find_add_as()
4242 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root"); in vtd_find_add_as()
4245 * Build the DMAR-disabled container with aliases to the in vtd_find_add_as()
4253 memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), in vtd_find_add_as()
4254 "vtd-nodmar", &s->mr_nodmar, 0, in vtd_find_add_as()
4255 memory_region_size(&s->mr_nodmar)); in vtd_find_add_as()
4258 * Build the per-device DMAR-enabled container. in vtd_find_add_as()
4260 * TODO: currently we have per-device IOMMU memory region only in vtd_find_add_as()
4261 * because we have per-device IOMMU notifiers for devices. If in vtd_find_add_as()
4267 strcat(name, "-dmar"); in vtd_find_add_as()
4268 memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), in vtd_find_add_as()
4271 memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir", in vtd_find_add_as()
4272 &s->mr_ir, 0, memory_region_size(&s->mr_ir)); in vtd_find_add_as()
4273 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), in vtd_find_add_as()
4275 &vtd_dev_as->iommu_ir, 1); in vtd_find_add_as()
4284 memory_region_init_io(&vtd_dev_as->iommu_ir_fault, OBJECT(s), in vtd_find_add_as()
4285 &vtd_mem_ir_fault_ops, vtd_dev_as, "vtd-no-ir", in vtd_find_add_as()
4288 * Hook to root since when PT is enabled vtd_dev_as->iommu in vtd_find_add_as()
4291 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->root), in vtd_find_add_as()
4293 &vtd_dev_as->iommu_ir_fault, 2); in vtd_find_add_as()
4298 * corresponding sub-containers in vtd_find_add_as()
4300 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, in vtd_find_add_as()
4301 MEMORY_REGION(&vtd_dev_as->iommu), in vtd_find_add_as()
4303 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, in vtd_find_add_as()
4304 &vtd_dev_as->nodmar, 0); in vtd_find_add_as()
4308 g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as); in vtd_find_add_as()
4319 if (!hiodc->get_cap) { in vtd_check_hiod()
4325 ret = hiodc->get_cap(hiod, HOST_IOMMU_DEVICE_CAP_AW_BITS, errp); in vtd_check_hiod()
4329 if (s->aw_bits > ret) { in vtd_check_hiod()
4330 error_setg(errp, "aw-bits %d > host aw-bits %d", s->aw_bits, ret); in vtd_check_hiod()
4334 if (!s->flts) { in vtd_check_hiod()
4335 /* All checks requested by VTD stage-2 translation pass */ in vtd_check_hiod()
4339 error_setg(errp, "host device is uncompatible with stage-1 translation"); in vtd_check_hiod()
4357 if (g_hash_table_lookup(s->vtd_host_iommu_dev, &key)) { in vtd_dev_set_iommu_device()
4369 new_key->bus = bus; in vtd_dev_set_iommu_device()
4370 new_key->devfn = devfn; in vtd_dev_set_iommu_device()
4373 g_hash_table_insert(s->vtd_host_iommu_dev, new_key, hiod); in vtd_dev_set_iommu_device()
4390 if (!g_hash_table_lookup(s->vtd_host_iommu_dev, &key)) { in vtd_dev_unset_iommu_device()
4395 g_hash_table_remove(s->vtd_host_iommu_dev, &key); in vtd_dev_unset_iommu_device()
4404 hwaddr start = n->start; in vtd_address_space_unmap()
4405 hwaddr end = n->end; in vtd_address_space_unmap()
4406 IntelIOMMUState *s = as->iommu_state; in vtd_address_space_unmap()
4412 * VT-d spec), otherwise we need to consider overflow of 64 bits. in vtd_address_space_unmap()
4415 if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) { in vtd_address_space_unmap()
4418 * VT-d supported address space size in vtd_address_space_unmap()
4420 end = VTD_ADDRESS_SIZE(s->aw_bits) - 1; in vtd_address_space_unmap()
4424 total = remain = end - start + 1; in vtd_address_space_unmap()
4428 uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits); in vtd_address_space_unmap()
4444 remain -= size; in vtd_address_space_unmap()
4449 trace_vtd_as_unmap_whole(pci_bus_num(as->bus), in vtd_address_space_unmap()
4450 VTD_PCI_SLOT(as->devfn), in vtd_address_space_unmap()
4451 VTD_PCI_FUNC(as->devfn), in vtd_address_space_unmap()
4452 n->start, total); in vtd_address_space_unmap()
4454 map.iova = n->start; in vtd_address_space_unmap()
4455 map.size = total - 1; /* Inclusive */ in vtd_address_space_unmap()
4456 iova_tree_remove(as->iova_tree, map); in vtd_address_space_unmap()
4464 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_address_space_unmap_all()
4465 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_unmap_all()
4486 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_replay()
4487 uint8_t bus_n = pci_bus_num(vtd_as->bus); in vtd_iommu_replay()
4491 /* replay is protected by BQL, page walk will re-setup it safely */ in vtd_iommu_replay()
4492 iova_tree_remove(vtd_as->iova_tree, map); in vtd_iommu_replay()
4494 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { in vtd_iommu_replay()
4495 trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : in vtd_iommu_replay()
4497 bus_n, PCI_SLOT(vtd_as->devfn), in vtd_iommu_replay()
4498 PCI_FUNC(vtd_as->devfn), in vtd_iommu_replay()
4499 vtd_get_domain_id(s, &ce, vtd_as->pasid), in vtd_iommu_replay()
4501 if (n->notifier_flags & IOMMU_NOTIFIER_MAP) { in vtd_iommu_replay()
4507 .aw = s->aw_bits, in vtd_iommu_replay()
4509 .domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid), in vtd_iommu_replay()
4512 vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid); in vtd_iommu_replay()
4515 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), in vtd_iommu_replay()
4516 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_replay()
4526 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | in vtd_cap_init()
4528 VTD_CAP_MGAW(s->aw_bits); in vtd_cap_init()
4529 if (s->dma_drain) { in vtd_cap_init()
4530 s->cap |= VTD_CAP_DRAIN; in vtd_cap_init()
4532 if (s->dma_translation) { in vtd_cap_init()
4533 if (s->aw_bits >= VTD_HOST_AW_39BIT) { in vtd_cap_init()
4534 s->cap |= VTD_CAP_SAGAW_39bit; in vtd_cap_init()
4536 if (s->aw_bits >= VTD_HOST_AW_48BIT) { in vtd_cap_init()
4537 s->cap |= VTD_CAP_SAGAW_48bit; in vtd_cap_init()
4540 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; in vtd_cap_init()
4543 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; in vtd_cap_init()
4544 if (s->intr_eim == ON_OFF_AUTO_ON) { in vtd_cap_init()
4545 s->ecap |= VTD_ECAP_EIM; in vtd_cap_init()
4547 assert(s->intr_eim != ON_OFF_AUTO_AUTO); in vtd_cap_init()
4550 if (x86_iommu->dt_supported) { in vtd_cap_init()
4551 s->ecap |= VTD_ECAP_DT; in vtd_cap_init()
4554 if (x86_iommu->pt_supported) { in vtd_cap_init()
4555 s->ecap |= VTD_ECAP_PT; in vtd_cap_init()
4558 if (s->caching_mode) { in vtd_cap_init()
4559 s->cap |= VTD_CAP_CM; in vtd_cap_init()
4563 if (s->flts) { in vtd_cap_init()
4564 s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_FLTS; in vtd_cap_init()
4565 if (s->fs1gp) { in vtd_cap_init()
4566 s->cap |= VTD_CAP_FS1GP; in vtd_cap_init()
4568 } else if (s->scalable_mode) { in vtd_cap_init()
4569 s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; in vtd_cap_init()
4572 if (s->snoop_control) { in vtd_cap_init()
4573 s->ecap |= VTD_ECAP_SC; in vtd_cap_init()
4576 if (s->pasid) { in vtd_cap_init()
4577 s->ecap |= VTD_ECAP_PASID; in vtd_cap_init()
4589 memset(s->csr, 0, DMAR_REG_SIZE); in vtd_init()
4590 memset(s->wmask, 0, DMAR_REG_SIZE); in vtd_init()
4591 memset(s->w1cmask, 0, DMAR_REG_SIZE); in vtd_init()
4592 memset(s->womask, 0, DMAR_REG_SIZE); in vtd_init()
4594 s->root = 0; in vtd_init()
4595 s->root_scalable = false; in vtd_init()
4596 s->dmar_enabled = false; in vtd_init()
4597 s->intr_enabled = false; in vtd_init()
4598 s->iq_head = 0; in vtd_init()
4599 s->iq_tail = 0; in vtd_init()
4600 s->iq = 0; in vtd_init()
4601 s->iq_size = 0; in vtd_init()
4602 s->qi_enabled = false; in vtd_init()
4603 s->iq_last_desc_type = VTD_INV_DESC_NONE; in vtd_init()
4604 s->iq_dw = false; in vtd_init()
4605 s->next_frcd_reg = 0; in vtd_init()
4613 vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits, in vtd_init()
4614 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4615 vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4616 vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4617 vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); in vtd_init()
4619 vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits, in vtd_init()
4620 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4621 vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, in vtd_init()
4622 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4628 vtd_fpte_rsvd[1] = VTD_FPTE_PAGE_L1_RSVD_MASK(s->aw_bits); in vtd_init()
4629 vtd_fpte_rsvd[2] = VTD_FPTE_PAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4630 vtd_fpte_rsvd[3] = VTD_FPTE_PAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4631 vtd_fpte_rsvd[4] = VTD_FPTE_PAGE_L4_RSVD_MASK(s->aw_bits); in vtd_init()
4633 vtd_fpte_rsvd_large[2] = VTD_FPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4634 vtd_fpte_rsvd_large[3] = VTD_FPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4636 if (s->scalable_mode || s->snoop_control) { in vtd_init()
4646 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); in vtd_init()
4647 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); in vtd_init()
4687 /* Fault Recording Registers, 128-bit */ in vtd_init()
4717 return &vtd_as->as; in vtd_host_dma_iommu()
4730 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) { in vtd_decide_config()
4735 if (s->intr_eim == ON_OFF_AUTO_AUTO) { in vtd_decide_config()
4736 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) in vtd_decide_config()
4740 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { in vtd_decide_config()
4748 if (!s->scalable_mode && s->flts) { in vtd_decide_config()
4749 error_setg(errp, "x-flts is only available in scalable mode"); in vtd_decide_config()
4753 if (!s->flts && s->aw_bits != VTD_HOST_AW_39BIT && in vtd_decide_config()
4754 s->aw_bits != VTD_HOST_AW_48BIT) { in vtd_decide_config()
4755 error_setg(errp, "%s: supported values for aw-bits are: %d, %d", in vtd_decide_config()
4756 s->scalable_mode ? "Scalable mode(flts=off)" : "Legacy mode", in vtd_decide_config()
4761 if (s->flts && s->aw_bits != VTD_HOST_AW_48BIT) { in vtd_decide_config()
4763 "Scalable mode(flts=on): supported value for aw-bits is: %d", in vtd_decide_config()
4768 if (s->scalable_mode && !s->dma_drain) { in vtd_decide_config()
4773 if (s->pasid && !s->scalable_mode) { in vtd_decide_config()
4786 * We hard-coded here because vfio-pci is the only special case in vtd_machine_done_notify_one()
4790 if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) { in vtd_machine_done_notify_one()
4812 PCIBus *bus = pcms->pcibus; in vtd_realize()
4816 if (s->pasid && x86_iommu->dt_supported) { in vtd_realize()
4818 * PASID-based-Device-TLB Invalidate Descriptor is not in vtd_realize()
4830 QLIST_INIT(&s->vtd_as_with_notifiers); in vtd_realize()
4831 qemu_mutex_init(&s->iommu_lock); in vtd_realize()
4832 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, in vtd_realize()
4835 Q35_HOST_BRIDGE_IOMMU_ADDR, &s->csrmem); in vtd_realize()
4838 memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar", in vtd_realize()
4840 memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, in vtd_realize()
4841 s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE); in vtd_realize()
4842 memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), in vtd_realize()
4843 "vtd-sys-alias", get_system_memory(), 0, in vtd_realize()
4845 memory_region_add_subregion_overlap(&s->mr_nodmar, 0, in vtd_realize()
4846 &s->mr_sys_alias, 0); in vtd_realize()
4847 memory_region_add_subregion_overlap(&s->mr_nodmar, in vtd_realize()
4849 &s->mr_ir, 1); in vtd_realize()
4851 s->iotlb = g_hash_table_new_full(vtd_iotlb_hash, vtd_iotlb_equal, in vtd_realize()
4853 s->vtd_address_spaces = g_hash_table_new_full(vtd_as_hash, vtd_as_equal, in vtd_realize()
4855 s->vtd_host_iommu_dev = g_hash_table_new_full(vtd_hiod_hash, vtd_hiod_equal, in vtd_realize()
4860 x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); in vtd_realize()
4874 rc->phases.exit = vtd_reset_exit; in vtd_class_init()
4875 dc->vmsd = &vtd_vmstate; in vtd_class_init()
4877 dc->hotpluggable = false; in vtd_class_init()
4878 x86_class->realize = vtd_realize; in vtd_class_init()
4879 x86_class->int_remap = vtd_int_remap; in vtd_class_init()
4880 set_bit(DEVICE_CATEGORY_MISC, dc->categories); in vtd_class_init()
4881 dc->desc = "Intel IOMMU (VT-d) DMA Remapping device"; in vtd_class_init()
4896 imrc->translate = vtd_iommu_translate; in vtd_iommu_memory_region_class_init()
4897 imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; in vtd_iommu_memory_region_class_init()
4898 imrc->replay = vtd_iommu_replay; in vtd_iommu_memory_region_class_init()