xref: /openbmc/qemu/hw/i386/intel_iommu.c (revision e4f4fb1eca795e36f363b4647724221e774523c1)
11da12ec4SLe Tan /*
21da12ec4SLe Tan  * QEMU emulation of an Intel IOMMU (VT-d)
31da12ec4SLe Tan  *   (DMA Remapping device)
41da12ec4SLe Tan  *
51da12ec4SLe Tan  * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
61da12ec4SLe Tan  * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
71da12ec4SLe Tan  *
81da12ec4SLe Tan  * This program is free software; you can redistribute it and/or modify
91da12ec4SLe Tan  * it under the terms of the GNU General Public License as published by
101da12ec4SLe Tan  * the Free Software Foundation; either version 2 of the License, or
111da12ec4SLe Tan  * (at your option) any later version.
121da12ec4SLe Tan 
131da12ec4SLe Tan  * This program is distributed in the hope that it will be useful,
141da12ec4SLe Tan  * but WITHOUT ANY WARRANTY; without even the implied warranty of
151da12ec4SLe Tan  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
161da12ec4SLe Tan  * GNU General Public License for more details.
171da12ec4SLe Tan 
181da12ec4SLe Tan  * You should have received a copy of the GNU General Public License along
191da12ec4SLe Tan  * with this program; if not, see <http://www.gnu.org/licenses/>.
201da12ec4SLe Tan  */
211da12ec4SLe Tan 
22b6a0aa05SPeter Maydell #include "qemu/osdep.h"
234684a204SPeter Xu #include "qemu/error-report.h"
246333e93cSRadim Krčmář #include "qapi/error.h"
251da12ec4SLe Tan #include "hw/sysbus.h"
261da12ec4SLe Tan #include "exec/address-spaces.h"
271da12ec4SLe Tan #include "intel_iommu_internal.h"
287df953bdSKnut Omang #include "hw/pci/pci.h"
293cb3b154SAlex Williamson #include "hw/pci/pci_bus.h"
30621d983aSMarcel Apfelbaum #include "hw/i386/pc.h"
31dea651a9SFeng Wu #include "hw/i386/apic-msidef.h"
3204af0e18SPeter Xu #include "hw/boards.h"
3304af0e18SPeter Xu #include "hw/i386/x86-iommu.h"
34cb135f59SPeter Xu #include "hw/pci-host/q35.h"
354684a204SPeter Xu #include "sysemu/kvm.h"
3632946019SRadim Krčmář #include "hw/i386/apic_internal.h"
37fb506e70SRadim Krčmář #include "kvm_i386.h"
38bc535e59SPeter Xu #include "trace.h"
391da12ec4SLe Tan 
401da12ec4SLe Tan /*#define DEBUG_INTEL_IOMMU*/
411da12ec4SLe Tan #ifdef DEBUG_INTEL_IOMMU
421da12ec4SLe Tan enum {
431da12ec4SLe Tan     DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
44a5861439SPeter Xu     DEBUG_CACHE, DEBUG_IR,
451da12ec4SLe Tan };
461da12ec4SLe Tan #define VTD_DBGBIT(x)   (1 << DEBUG_##x)
471da12ec4SLe Tan static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
481da12ec4SLe Tan 
491da12ec4SLe Tan #define VTD_DPRINTF(what, fmt, ...) do { \
501da12ec4SLe Tan     if (vtd_dbgflags & VTD_DBGBIT(what)) { \
511da12ec4SLe Tan         fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
521da12ec4SLe Tan                 ## __VA_ARGS__); } \
531da12ec4SLe Tan     } while (0)
541da12ec4SLe Tan #else
551da12ec4SLe Tan #define VTD_DPRINTF(what, fmt, ...) do {} while (0)
561da12ec4SLe Tan #endif
571da12ec4SLe Tan 
581da12ec4SLe Tan static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
591da12ec4SLe Tan                             uint64_t wmask, uint64_t w1cmask)
601da12ec4SLe Tan {
611da12ec4SLe Tan     stq_le_p(&s->csr[addr], val);
621da12ec4SLe Tan     stq_le_p(&s->wmask[addr], wmask);
631da12ec4SLe Tan     stq_le_p(&s->w1cmask[addr], w1cmask);
641da12ec4SLe Tan }
651da12ec4SLe Tan 
661da12ec4SLe Tan static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
671da12ec4SLe Tan {
681da12ec4SLe Tan     stq_le_p(&s->womask[addr], mask);
691da12ec4SLe Tan }
701da12ec4SLe Tan 
711da12ec4SLe Tan static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
721da12ec4SLe Tan                             uint32_t wmask, uint32_t w1cmask)
731da12ec4SLe Tan {
741da12ec4SLe Tan     stl_le_p(&s->csr[addr], val);
751da12ec4SLe Tan     stl_le_p(&s->wmask[addr], wmask);
761da12ec4SLe Tan     stl_le_p(&s->w1cmask[addr], w1cmask);
771da12ec4SLe Tan }
781da12ec4SLe Tan 
791da12ec4SLe Tan static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
801da12ec4SLe Tan {
811da12ec4SLe Tan     stl_le_p(&s->womask[addr], mask);
821da12ec4SLe Tan }
831da12ec4SLe Tan 
841da12ec4SLe Tan /* "External" get/set operations */
851da12ec4SLe Tan static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
861da12ec4SLe Tan {
871da12ec4SLe Tan     uint64_t oldval = ldq_le_p(&s->csr[addr]);
881da12ec4SLe Tan     uint64_t wmask = ldq_le_p(&s->wmask[addr]);
891da12ec4SLe Tan     uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
901da12ec4SLe Tan     stq_le_p(&s->csr[addr],
911da12ec4SLe Tan              ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
921da12ec4SLe Tan }
931da12ec4SLe Tan 
941da12ec4SLe Tan static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
951da12ec4SLe Tan {
961da12ec4SLe Tan     uint32_t oldval = ldl_le_p(&s->csr[addr]);
971da12ec4SLe Tan     uint32_t wmask = ldl_le_p(&s->wmask[addr]);
981da12ec4SLe Tan     uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
991da12ec4SLe Tan     stl_le_p(&s->csr[addr],
1001da12ec4SLe Tan              ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
1011da12ec4SLe Tan }
1021da12ec4SLe Tan 
1031da12ec4SLe Tan static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
1041da12ec4SLe Tan {
1051da12ec4SLe Tan     uint64_t val = ldq_le_p(&s->csr[addr]);
1061da12ec4SLe Tan     uint64_t womask = ldq_le_p(&s->womask[addr]);
1071da12ec4SLe Tan     return val & ~womask;
1081da12ec4SLe Tan }
1091da12ec4SLe Tan 
1101da12ec4SLe Tan static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
1111da12ec4SLe Tan {
1121da12ec4SLe Tan     uint32_t val = ldl_le_p(&s->csr[addr]);
1131da12ec4SLe Tan     uint32_t womask = ldl_le_p(&s->womask[addr]);
1141da12ec4SLe Tan     return val & ~womask;
1151da12ec4SLe Tan }
1161da12ec4SLe Tan 
1171da12ec4SLe Tan /* "Internal" get/set operations */
1181da12ec4SLe Tan static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
1191da12ec4SLe Tan {
1201da12ec4SLe Tan     return ldq_le_p(&s->csr[addr]);
1211da12ec4SLe Tan }
1221da12ec4SLe Tan 
1231da12ec4SLe Tan static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
1241da12ec4SLe Tan {
1251da12ec4SLe Tan     return ldl_le_p(&s->csr[addr]);
1261da12ec4SLe Tan }
1271da12ec4SLe Tan 
1281da12ec4SLe Tan static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
1291da12ec4SLe Tan {
1301da12ec4SLe Tan     stq_le_p(&s->csr[addr], val);
1311da12ec4SLe Tan }
1321da12ec4SLe Tan 
1331da12ec4SLe Tan static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
1341da12ec4SLe Tan                                         uint32_t clear, uint32_t mask)
1351da12ec4SLe Tan {
1361da12ec4SLe Tan     uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
1371da12ec4SLe Tan     stl_le_p(&s->csr[addr], new_val);
1381da12ec4SLe Tan     return new_val;
1391da12ec4SLe Tan }
1401da12ec4SLe Tan 
1411da12ec4SLe Tan static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
1421da12ec4SLe Tan                                         uint64_t clear, uint64_t mask)
1431da12ec4SLe Tan {
1441da12ec4SLe Tan     uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
1451da12ec4SLe Tan     stq_le_p(&s->csr[addr], new_val);
1461da12ec4SLe Tan     return new_val;
1471da12ec4SLe Tan }
1481da12ec4SLe Tan 
149b5a280c0SLe Tan /* GHashTable functions */
150b5a280c0SLe Tan static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
151b5a280c0SLe Tan {
152b5a280c0SLe Tan     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
153b5a280c0SLe Tan }
154b5a280c0SLe Tan 
155b5a280c0SLe Tan static guint vtd_uint64_hash(gconstpointer v)
156b5a280c0SLe Tan {
157b5a280c0SLe Tan     return (guint)*(const uint64_t *)v;
158b5a280c0SLe Tan }
159b5a280c0SLe Tan 
160b5a280c0SLe Tan static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
161b5a280c0SLe Tan                                           gpointer user_data)
162b5a280c0SLe Tan {
163b5a280c0SLe Tan     VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
164b5a280c0SLe Tan     uint16_t domain_id = *(uint16_t *)user_data;
165b5a280c0SLe Tan     return entry->domain_id == domain_id;
166b5a280c0SLe Tan }
167b5a280c0SLe Tan 
168d66b969bSJason Wang /* The shift of an addr for a certain level of paging structure */
169d66b969bSJason Wang static inline uint32_t vtd_slpt_level_shift(uint32_t level)
170d66b969bSJason Wang {
1717e58326aSPeter Xu     assert(level != 0);
172d66b969bSJason Wang     return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
173d66b969bSJason Wang }
174d66b969bSJason Wang 
175d66b969bSJason Wang static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
176d66b969bSJason Wang {
177d66b969bSJason Wang     return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
178d66b969bSJason Wang }
179d66b969bSJason Wang 
180b5a280c0SLe Tan static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
181b5a280c0SLe Tan                                         gpointer user_data)
182b5a280c0SLe Tan {
183b5a280c0SLe Tan     VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
184b5a280c0SLe Tan     VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
185d66b969bSJason Wang     uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
186d66b969bSJason Wang     uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
187b5a280c0SLe Tan     return (entry->domain_id == info->domain_id) &&
188d66b969bSJason Wang             (((entry->gfn & info->mask) == gfn) ||
189d66b969bSJason Wang              (entry->gfn == gfn_tlb));
190b5a280c0SLe Tan }
191b5a280c0SLe Tan 
192d92fa2dcSLe Tan /* Reset all the gen of VTDAddressSpace to zero and set the gen of
193d92fa2dcSLe Tan  * IntelIOMMUState to 1.
194d92fa2dcSLe Tan  */
195d92fa2dcSLe Tan static void vtd_reset_context_cache(IntelIOMMUState *s)
196d92fa2dcSLe Tan {
197d92fa2dcSLe Tan     VTDAddressSpace *vtd_as;
1987df953bdSKnut Omang     VTDBus *vtd_bus;
1997df953bdSKnut Omang     GHashTableIter bus_it;
200d92fa2dcSLe Tan     uint32_t devfn_it;
201d92fa2dcSLe Tan 
2027df953bdSKnut Omang     g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
2037df953bdSKnut Omang 
204d92fa2dcSLe Tan     VTD_DPRINTF(CACHE, "global context_cache_gen=1");
2057df953bdSKnut Omang     while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
20604af0e18SPeter Xu         for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
2077df953bdSKnut Omang             vtd_as = vtd_bus->dev_as[devfn_it];
208d92fa2dcSLe Tan             if (!vtd_as) {
209d92fa2dcSLe Tan                 continue;
210d92fa2dcSLe Tan             }
211d92fa2dcSLe Tan             vtd_as->context_cache_entry.context_cache_gen = 0;
212d92fa2dcSLe Tan         }
213d92fa2dcSLe Tan     }
214d92fa2dcSLe Tan     s->context_cache_gen = 1;
215d92fa2dcSLe Tan }
216d92fa2dcSLe Tan 
217b5a280c0SLe Tan static void vtd_reset_iotlb(IntelIOMMUState *s)
218b5a280c0SLe Tan {
219b5a280c0SLe Tan     assert(s->iotlb);
220b5a280c0SLe Tan     g_hash_table_remove_all(s->iotlb);
221b5a280c0SLe Tan }
222b5a280c0SLe Tan 
223bacabb0aSJason Wang static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
224d66b969bSJason Wang                                   uint32_t level)
225d66b969bSJason Wang {
226d66b969bSJason Wang     return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
227d66b969bSJason Wang            ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
228d66b969bSJason Wang }
229d66b969bSJason Wang 
230d66b969bSJason Wang static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
231d66b969bSJason Wang {
232d66b969bSJason Wang     return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
233d66b969bSJason Wang }
234d66b969bSJason Wang 
235b5a280c0SLe Tan static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
236b5a280c0SLe Tan                                        hwaddr addr)
237b5a280c0SLe Tan {
238d66b969bSJason Wang     VTDIOTLBEntry *entry;
239b5a280c0SLe Tan     uint64_t key;
240d66b969bSJason Wang     int level;
241b5a280c0SLe Tan 
242d66b969bSJason Wang     for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
243d66b969bSJason Wang         key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
244d66b969bSJason Wang                                 source_id, level);
245d66b969bSJason Wang         entry = g_hash_table_lookup(s->iotlb, &key);
246d66b969bSJason Wang         if (entry) {
247d66b969bSJason Wang             goto out;
248d66b969bSJason Wang         }
249d66b969bSJason Wang     }
250b5a280c0SLe Tan 
251d66b969bSJason Wang out:
252d66b969bSJason Wang     return entry;
253b5a280c0SLe Tan }
254b5a280c0SLe Tan 
255b5a280c0SLe Tan static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
256b5a280c0SLe Tan                              uint16_t domain_id, hwaddr addr, uint64_t slpte,
257d66b969bSJason Wang                              bool read_flags, bool write_flags,
258d66b969bSJason Wang                              uint32_t level)
259b5a280c0SLe Tan {
260b5a280c0SLe Tan     VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
261b5a280c0SLe Tan     uint64_t *key = g_malloc(sizeof(*key));
262d66b969bSJason Wang     uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
263b5a280c0SLe Tan 
2646c441e1dSPeter Xu     trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
265b5a280c0SLe Tan     if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
2666c441e1dSPeter Xu         trace_vtd_iotlb_reset("iotlb exceeds size limit");
267b5a280c0SLe Tan         vtd_reset_iotlb(s);
268b5a280c0SLe Tan     }
269b5a280c0SLe Tan 
270b5a280c0SLe Tan     entry->gfn = gfn;
271b5a280c0SLe Tan     entry->domain_id = domain_id;
272b5a280c0SLe Tan     entry->slpte = slpte;
273b5a280c0SLe Tan     entry->read_flags = read_flags;
274b5a280c0SLe Tan     entry->write_flags = write_flags;
275d66b969bSJason Wang     entry->mask = vtd_slpt_level_page_mask(level);
276d66b969bSJason Wang     *key = vtd_get_iotlb_key(gfn, source_id, level);
277b5a280c0SLe Tan     g_hash_table_replace(s->iotlb, key, entry);
278b5a280c0SLe Tan }
279b5a280c0SLe Tan 
2801da12ec4SLe Tan /* Given the reg addr of both the message data and address, generate an
2811da12ec4SLe Tan  * interrupt via MSI.
2821da12ec4SLe Tan  */
2831da12ec4SLe Tan static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
2841da12ec4SLe Tan                                    hwaddr mesg_data_reg)
2851da12ec4SLe Tan {
28632946019SRadim Krčmář     MSIMessage msi;
2871da12ec4SLe Tan 
2881da12ec4SLe Tan     assert(mesg_data_reg < DMAR_REG_SIZE);
2891da12ec4SLe Tan     assert(mesg_addr_reg < DMAR_REG_SIZE);
2901da12ec4SLe Tan 
29132946019SRadim Krčmář     msi.address = vtd_get_long_raw(s, mesg_addr_reg);
29232946019SRadim Krčmář     msi.data = vtd_get_long_raw(s, mesg_data_reg);
2931da12ec4SLe Tan 
29432946019SRadim Krčmář     VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32,
29532946019SRadim Krčmář                 msi.address, msi.data);
29632946019SRadim Krčmář     apic_get_class()->send_msi(&msi);
2971da12ec4SLe Tan }
2981da12ec4SLe Tan 
2991da12ec4SLe Tan /* Generate a fault event to software via MSI if conditions are met.
3001da12ec4SLe Tan  * Notice that the value of FSTS_REG being passed to it should be the one
3011da12ec4SLe Tan  * before any update.
3021da12ec4SLe Tan  */
3031da12ec4SLe Tan static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
3041da12ec4SLe Tan {
3051da12ec4SLe Tan     if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
3061da12ec4SLe Tan         pre_fsts & VTD_FSTS_IQE) {
3071da12ec4SLe Tan         VTD_DPRINTF(FLOG, "there are previous interrupt conditions "
3081da12ec4SLe Tan                     "to be serviced by software, fault event is not generated "
3091da12ec4SLe Tan                     "(FSTS_REG 0x%"PRIx32 ")", pre_fsts);
3101da12ec4SLe Tan         return;
3111da12ec4SLe Tan     }
3121da12ec4SLe Tan     vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
3131da12ec4SLe Tan     if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
3141da12ec4SLe Tan         VTD_DPRINTF(FLOG, "Interrupt Mask set, fault event is not generated");
3151da12ec4SLe Tan     } else {
3161da12ec4SLe Tan         vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
3171da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
3181da12ec4SLe Tan     }
3191da12ec4SLe Tan }
3201da12ec4SLe Tan 
3211da12ec4SLe Tan /* Check if the Fault (F) field of the Fault Recording Register referenced by
3221da12ec4SLe Tan  * @index is Set.
3231da12ec4SLe Tan  */
3241da12ec4SLe Tan static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
3251da12ec4SLe Tan {
3261da12ec4SLe Tan     /* Each reg is 128-bit */
3271da12ec4SLe Tan     hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
3281da12ec4SLe Tan     addr += 8; /* Access the high 64-bit half */
3291da12ec4SLe Tan 
3301da12ec4SLe Tan     assert(index < DMAR_FRCD_REG_NR);
3311da12ec4SLe Tan 
3321da12ec4SLe Tan     return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
3331da12ec4SLe Tan }
3341da12ec4SLe Tan 
3351da12ec4SLe Tan /* Update the PPF field of Fault Status Register.
3361da12ec4SLe Tan  * Should be called whenever change the F field of any fault recording
3371da12ec4SLe Tan  * registers.
3381da12ec4SLe Tan  */
3391da12ec4SLe Tan static void vtd_update_fsts_ppf(IntelIOMMUState *s)
3401da12ec4SLe Tan {
3411da12ec4SLe Tan     uint32_t i;
3421da12ec4SLe Tan     uint32_t ppf_mask = 0;
3431da12ec4SLe Tan 
3441da12ec4SLe Tan     for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
3451da12ec4SLe Tan         if (vtd_is_frcd_set(s, i)) {
3461da12ec4SLe Tan             ppf_mask = VTD_FSTS_PPF;
3471da12ec4SLe Tan             break;
3481da12ec4SLe Tan         }
3491da12ec4SLe Tan     }
3501da12ec4SLe Tan     vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
3511da12ec4SLe Tan     VTD_DPRINTF(FLOG, "set PPF of FSTS_REG to %d", ppf_mask ? 1 : 0);
3521da12ec4SLe Tan }
3531da12ec4SLe Tan 
3541da12ec4SLe Tan static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
3551da12ec4SLe Tan {
3561da12ec4SLe Tan     /* Each reg is 128-bit */
3571da12ec4SLe Tan     hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
3581da12ec4SLe Tan     addr += 8; /* Access the high 64-bit half */
3591da12ec4SLe Tan 
3601da12ec4SLe Tan     assert(index < DMAR_FRCD_REG_NR);
3611da12ec4SLe Tan 
3621da12ec4SLe Tan     vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
3631da12ec4SLe Tan     vtd_update_fsts_ppf(s);
3641da12ec4SLe Tan }
3651da12ec4SLe Tan 
3661da12ec4SLe Tan /* Must not update F field now, should be done later */
3671da12ec4SLe Tan static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
3681da12ec4SLe Tan                             uint16_t source_id, hwaddr addr,
3691da12ec4SLe Tan                             VTDFaultReason fault, bool is_write)
3701da12ec4SLe Tan {
3711da12ec4SLe Tan     uint64_t hi = 0, lo;
3721da12ec4SLe Tan     hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
3731da12ec4SLe Tan 
3741da12ec4SLe Tan     assert(index < DMAR_FRCD_REG_NR);
3751da12ec4SLe Tan 
3761da12ec4SLe Tan     lo = VTD_FRCD_FI(addr);
3771da12ec4SLe Tan     hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
3781da12ec4SLe Tan     if (!is_write) {
3791da12ec4SLe Tan         hi |= VTD_FRCD_T;
3801da12ec4SLe Tan     }
3811da12ec4SLe Tan     vtd_set_quad_raw(s, frcd_reg_addr, lo);
3821da12ec4SLe Tan     vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
3831da12ec4SLe Tan     VTD_DPRINTF(FLOG, "record to FRCD_REG #%"PRIu16 ": hi 0x%"PRIx64
3841da12ec4SLe Tan                 ", lo 0x%"PRIx64, index, hi, lo);
3851da12ec4SLe Tan }
3861da12ec4SLe Tan 
3871da12ec4SLe Tan /* Try to collapse multiple pending faults from the same requester */
3881da12ec4SLe Tan static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
3891da12ec4SLe Tan {
3901da12ec4SLe Tan     uint32_t i;
3911da12ec4SLe Tan     uint64_t frcd_reg;
3921da12ec4SLe Tan     hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
3931da12ec4SLe Tan 
3941da12ec4SLe Tan     for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
3951da12ec4SLe Tan         frcd_reg = vtd_get_quad_raw(s, addr);
3961da12ec4SLe Tan         VTD_DPRINTF(FLOG, "frcd_reg #%d 0x%"PRIx64, i, frcd_reg);
3971da12ec4SLe Tan         if ((frcd_reg & VTD_FRCD_F) &&
3981da12ec4SLe Tan             ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
3991da12ec4SLe Tan             return true;
4001da12ec4SLe Tan         }
4011da12ec4SLe Tan         addr += 16; /* 128-bit for each */
4021da12ec4SLe Tan     }
4031da12ec4SLe Tan     return false;
4041da12ec4SLe Tan }
4051da12ec4SLe Tan 
4061da12ec4SLe Tan /* Log and report an DMAR (address translation) fault to software */
4071da12ec4SLe Tan static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
4081da12ec4SLe Tan                                   hwaddr addr, VTDFaultReason fault,
4091da12ec4SLe Tan                                   bool is_write)
4101da12ec4SLe Tan {
4111da12ec4SLe Tan     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
4121da12ec4SLe Tan 
4131da12ec4SLe Tan     assert(fault < VTD_FR_MAX);
4141da12ec4SLe Tan 
4151da12ec4SLe Tan     if (fault == VTD_FR_RESERVED_ERR) {
4161da12ec4SLe Tan         /* This is not a normal fault reason case. Drop it. */
4171da12ec4SLe Tan         return;
4181da12ec4SLe Tan     }
4191da12ec4SLe Tan     VTD_DPRINTF(FLOG, "sid 0x%"PRIx16 ", fault %d, addr 0x%"PRIx64
4201da12ec4SLe Tan                 ", is_write %d", source_id, fault, addr, is_write);
4211da12ec4SLe Tan     if (fsts_reg & VTD_FSTS_PFO) {
4221da12ec4SLe Tan         VTD_DPRINTF(FLOG, "new fault is not recorded due to "
4231da12ec4SLe Tan                     "Primary Fault Overflow");
4241da12ec4SLe Tan         return;
4251da12ec4SLe Tan     }
4261da12ec4SLe Tan     if (vtd_try_collapse_fault(s, source_id)) {
4271da12ec4SLe Tan         VTD_DPRINTF(FLOG, "new fault is not recorded due to "
4281da12ec4SLe Tan                     "compression of faults");
4291da12ec4SLe Tan         return;
4301da12ec4SLe Tan     }
4311da12ec4SLe Tan     if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
4321da12ec4SLe Tan         VTD_DPRINTF(FLOG, "Primary Fault Overflow and "
4331da12ec4SLe Tan                     "new fault is not recorded, set PFO field");
4341da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
4351da12ec4SLe Tan         return;
4361da12ec4SLe Tan     }
4371da12ec4SLe Tan 
4381da12ec4SLe Tan     vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
4391da12ec4SLe Tan 
4401da12ec4SLe Tan     if (fsts_reg & VTD_FSTS_PPF) {
4411da12ec4SLe Tan         VTD_DPRINTF(FLOG, "there are pending faults already, "
4421da12ec4SLe Tan                     "fault event is not generated");
4431da12ec4SLe Tan         vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
4441da12ec4SLe Tan         s->next_frcd_reg++;
4451da12ec4SLe Tan         if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
4461da12ec4SLe Tan             s->next_frcd_reg = 0;
4471da12ec4SLe Tan         }
4481da12ec4SLe Tan     } else {
4491da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
4501da12ec4SLe Tan                                 VTD_FSTS_FRI(s->next_frcd_reg));
4511da12ec4SLe Tan         vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
4521da12ec4SLe Tan         s->next_frcd_reg++;
4531da12ec4SLe Tan         if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
4541da12ec4SLe Tan             s->next_frcd_reg = 0;
4551da12ec4SLe Tan         }
4561da12ec4SLe Tan         /* This case actually cause the PPF to be Set.
4571da12ec4SLe Tan          * So generate fault event (interrupt).
4581da12ec4SLe Tan          */
4591da12ec4SLe Tan          vtd_generate_fault_event(s, fsts_reg);
4601da12ec4SLe Tan     }
4611da12ec4SLe Tan }
4621da12ec4SLe Tan 
463ed7b8fbcSLe Tan /* Handle Invalidation Queue Errors of queued invalidation interface error
464ed7b8fbcSLe Tan  * conditions.
465ed7b8fbcSLe Tan  */
466ed7b8fbcSLe Tan static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
467ed7b8fbcSLe Tan {
468ed7b8fbcSLe Tan     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
469ed7b8fbcSLe Tan 
470ed7b8fbcSLe Tan     vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
471ed7b8fbcSLe Tan     vtd_generate_fault_event(s, fsts_reg);
472ed7b8fbcSLe Tan }
473ed7b8fbcSLe Tan 
474ed7b8fbcSLe Tan /* Set the IWC field and try to generate an invalidation completion interrupt */
475ed7b8fbcSLe Tan static void vtd_generate_completion_event(IntelIOMMUState *s)
476ed7b8fbcSLe Tan {
477ed7b8fbcSLe Tan     if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
478bc535e59SPeter Xu         trace_vtd_inv_desc_wait_irq("One pending, skip current");
479ed7b8fbcSLe Tan         return;
480ed7b8fbcSLe Tan     }
481ed7b8fbcSLe Tan     vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
482ed7b8fbcSLe Tan     vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
483ed7b8fbcSLe Tan     if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
484bc535e59SPeter Xu         trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
485bc535e59SPeter Xu                                     "new event not generated");
486ed7b8fbcSLe Tan         return;
487ed7b8fbcSLe Tan     } else {
488ed7b8fbcSLe Tan         /* Generate the interrupt event */
489bc535e59SPeter Xu         trace_vtd_inv_desc_wait_irq("Generating complete event");
490ed7b8fbcSLe Tan         vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
491ed7b8fbcSLe Tan         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
492ed7b8fbcSLe Tan     }
493ed7b8fbcSLe Tan }
494ed7b8fbcSLe Tan 
4951da12ec4SLe Tan static inline bool vtd_root_entry_present(VTDRootEntry *root)
4961da12ec4SLe Tan {
4971da12ec4SLe Tan     return root->val & VTD_ROOT_ENTRY_P;
4981da12ec4SLe Tan }
4991da12ec4SLe Tan 
5001da12ec4SLe Tan static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
5011da12ec4SLe Tan                               VTDRootEntry *re)
5021da12ec4SLe Tan {
5031da12ec4SLe Tan     dma_addr_t addr;
5041da12ec4SLe Tan 
5051da12ec4SLe Tan     addr = s->root + index * sizeof(*re);
5061da12ec4SLe Tan     if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
5076c441e1dSPeter Xu         trace_vtd_re_invalid(re->rsvd, re->val);
5081da12ec4SLe Tan         re->val = 0;
5091da12ec4SLe Tan         return -VTD_FR_ROOT_TABLE_INV;
5101da12ec4SLe Tan     }
5111da12ec4SLe Tan     re->val = le64_to_cpu(re->val);
5121da12ec4SLe Tan     return 0;
5131da12ec4SLe Tan }
5141da12ec4SLe Tan 
5151da12ec4SLe Tan static inline bool vtd_context_entry_present(VTDContextEntry *context)
5161da12ec4SLe Tan {
5171da12ec4SLe Tan     return context->lo & VTD_CONTEXT_ENTRY_P;
5181da12ec4SLe Tan }
5191da12ec4SLe Tan 
5201da12ec4SLe Tan static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
5211da12ec4SLe Tan                                            VTDContextEntry *ce)
5221da12ec4SLe Tan {
5231da12ec4SLe Tan     dma_addr_t addr;
5241da12ec4SLe Tan 
5256c441e1dSPeter Xu     /* we have checked that root entry is present */
5261da12ec4SLe Tan     addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
5271da12ec4SLe Tan     if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
5286c441e1dSPeter Xu         trace_vtd_re_invalid(root->rsvd, root->val);
5291da12ec4SLe Tan         return -VTD_FR_CONTEXT_TABLE_INV;
5301da12ec4SLe Tan     }
5311da12ec4SLe Tan     ce->lo = le64_to_cpu(ce->lo);
5321da12ec4SLe Tan     ce->hi = le64_to_cpu(ce->hi);
5331da12ec4SLe Tan     return 0;
5341da12ec4SLe Tan }
5351da12ec4SLe Tan 
5361da12ec4SLe Tan static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
5371da12ec4SLe Tan {
5381da12ec4SLe Tan     return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
5391da12ec4SLe Tan }
5401da12ec4SLe Tan 
5411da12ec4SLe Tan static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
5421da12ec4SLe Tan {
5431da12ec4SLe Tan     return slpte & VTD_SL_PT_BASE_ADDR_MASK;
5441da12ec4SLe Tan }
5451da12ec4SLe Tan 
5461da12ec4SLe Tan /* Whether the pte indicates the address of the page frame */
5471da12ec4SLe Tan static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
5481da12ec4SLe Tan {
5491da12ec4SLe Tan     return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
5501da12ec4SLe Tan }
5511da12ec4SLe Tan 
5521da12ec4SLe Tan /* Get the content of a spte located in @base_addr[@index] */
5531da12ec4SLe Tan static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
5541da12ec4SLe Tan {
5551da12ec4SLe Tan     uint64_t slpte;
5561da12ec4SLe Tan 
5571da12ec4SLe Tan     assert(index < VTD_SL_PT_ENTRY_NR);
5581da12ec4SLe Tan 
5591da12ec4SLe Tan     if (dma_memory_read(&address_space_memory,
5601da12ec4SLe Tan                         base_addr + index * sizeof(slpte), &slpte,
5611da12ec4SLe Tan                         sizeof(slpte))) {
5621da12ec4SLe Tan         slpte = (uint64_t)-1;
5631da12ec4SLe Tan         return slpte;
5641da12ec4SLe Tan     }
5651da12ec4SLe Tan     slpte = le64_to_cpu(slpte);
5661da12ec4SLe Tan     return slpte;
5671da12ec4SLe Tan }
5681da12ec4SLe Tan 
5696e905564SPeter Xu /* Given an iova and the level of paging structure, return the offset
5706e905564SPeter Xu  * of current level.
5711da12ec4SLe Tan  */
5726e905564SPeter Xu static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
5731da12ec4SLe Tan {
5746e905564SPeter Xu     return (iova >> vtd_slpt_level_shift(level)) &
5751da12ec4SLe Tan             ((1ULL << VTD_SL_LEVEL_BITS) - 1);
5761da12ec4SLe Tan }
5771da12ec4SLe Tan 
5781da12ec4SLe Tan /* Check Capability Register to see if the @level of page-table is supported */
5791da12ec4SLe Tan static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
5801da12ec4SLe Tan {
5811da12ec4SLe Tan     return VTD_CAP_SAGAW_MASK & s->cap &
5821da12ec4SLe Tan            (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
5831da12ec4SLe Tan }
5841da12ec4SLe Tan 
5851da12ec4SLe Tan /* Get the page-table level that hardware should use for the second-level
5861da12ec4SLe Tan  * page-table walk from the Address Width field of context-entry.
5871da12ec4SLe Tan  */
5881da12ec4SLe Tan static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry *ce)
5891da12ec4SLe Tan {
5901da12ec4SLe Tan     return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
5911da12ec4SLe Tan }
5921da12ec4SLe Tan 
5931da12ec4SLe Tan static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
5941da12ec4SLe Tan {
5951da12ec4SLe Tan     return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
5961da12ec4SLe Tan }
5971da12ec4SLe Tan 
598f06a696dSPeter Xu static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
599f06a696dSPeter Xu {
600f06a696dSPeter Xu     uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
601f06a696dSPeter Xu     return 1ULL << MIN(ce_agaw, VTD_MGAW);
602f06a696dSPeter Xu }
603f06a696dSPeter Xu 
604f06a696dSPeter Xu /* Return true if IOVA passes range check, otherwise false. */
605f06a696dSPeter Xu static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce)
606f06a696dSPeter Xu {
607f06a696dSPeter Xu     /*
608f06a696dSPeter Xu      * Check if @iova is above 2^X-1, where X is the minimum of MGAW
609f06a696dSPeter Xu      * in CAP_REG and AW in context-entry.
610f06a696dSPeter Xu      */
611f06a696dSPeter Xu     return !(iova & ~(vtd_iova_limit(ce) - 1));
612f06a696dSPeter Xu }
613f06a696dSPeter Xu 
6141da12ec4SLe Tan static const uint64_t vtd_paging_entry_rsvd_field[] = {
6151da12ec4SLe Tan     [0] = ~0ULL,
6161da12ec4SLe Tan     /* For not large page */
6171da12ec4SLe Tan     [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6181da12ec4SLe Tan     [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6191da12ec4SLe Tan     [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6201da12ec4SLe Tan     [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6211da12ec4SLe Tan     /* For large page */
6221da12ec4SLe Tan     [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6231da12ec4SLe Tan     [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6241da12ec4SLe Tan     [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6251da12ec4SLe Tan     [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
6261da12ec4SLe Tan };
6271da12ec4SLe Tan 
6281da12ec4SLe Tan static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
6291da12ec4SLe Tan {
6301da12ec4SLe Tan     if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
6311da12ec4SLe Tan         /* Maybe large page */
6321da12ec4SLe Tan         return slpte & vtd_paging_entry_rsvd_field[level + 4];
6331da12ec4SLe Tan     } else {
6341da12ec4SLe Tan         return slpte & vtd_paging_entry_rsvd_field[level];
6351da12ec4SLe Tan     }
6361da12ec4SLe Tan }
6371da12ec4SLe Tan 
6386e905564SPeter Xu /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
6391da12ec4SLe Tan  * of the translation, can be used for deciding the size of large page.
6401da12ec4SLe Tan  */
6416e905564SPeter Xu static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
6421da12ec4SLe Tan                              uint64_t *slptep, uint32_t *slpte_level,
6431da12ec4SLe Tan                              bool *reads, bool *writes)
6441da12ec4SLe Tan {
6451da12ec4SLe Tan     dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
6461da12ec4SLe Tan     uint32_t level = vtd_get_level_from_context_entry(ce);
6471da12ec4SLe Tan     uint32_t offset;
6481da12ec4SLe Tan     uint64_t slpte;
6491da12ec4SLe Tan     uint64_t access_right_check;
6501da12ec4SLe Tan 
651f06a696dSPeter Xu     if (!vtd_iova_range_check(iova, ce)) {
6526e905564SPeter Xu         VTD_DPRINTF(GENERAL, "error: iova 0x%"PRIx64 " exceeds limits", iova);
6531da12ec4SLe Tan         return -VTD_FR_ADDR_BEYOND_MGAW;
6541da12ec4SLe Tan     }
6551da12ec4SLe Tan 
6561da12ec4SLe Tan     /* FIXME: what is the Atomics request here? */
6571da12ec4SLe Tan     access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
6581da12ec4SLe Tan 
6591da12ec4SLe Tan     while (true) {
6606e905564SPeter Xu         offset = vtd_iova_level_offset(iova, level);
6611da12ec4SLe Tan         slpte = vtd_get_slpte(addr, offset);
6621da12ec4SLe Tan 
6631da12ec4SLe Tan         if (slpte == (uint64_t)-1) {
6641da12ec4SLe Tan             VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
6656e905564SPeter Xu                         "entry at level %"PRIu32 " for iova 0x%"PRIx64,
6666e905564SPeter Xu                         level, iova);
6671da12ec4SLe Tan             if (level == vtd_get_level_from_context_entry(ce)) {
6681da12ec4SLe Tan                 /* Invalid programming of context-entry */
6691da12ec4SLe Tan                 return -VTD_FR_CONTEXT_ENTRY_INV;
6701da12ec4SLe Tan             } else {
6711da12ec4SLe Tan                 return -VTD_FR_PAGING_ENTRY_INV;
6721da12ec4SLe Tan             }
6731da12ec4SLe Tan         }
6741da12ec4SLe Tan         *reads = (*reads) && (slpte & VTD_SL_R);
6751da12ec4SLe Tan         *writes = (*writes) && (slpte & VTD_SL_W);
6761da12ec4SLe Tan         if (!(slpte & access_right_check)) {
6771da12ec4SLe Tan             VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
6786e905564SPeter Xu                         "iova 0x%"PRIx64 " slpte 0x%"PRIx64,
6796e905564SPeter Xu                         (is_write ? "write" : "read"), iova, slpte);
6801da12ec4SLe Tan             return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
6811da12ec4SLe Tan         }
6821da12ec4SLe Tan         if (vtd_slpte_nonzero_rsvd(slpte, level)) {
6831da12ec4SLe Tan             VTD_DPRINTF(GENERAL, "error: non-zero reserved field in second "
6841da12ec4SLe Tan                         "level paging entry level %"PRIu32 " slpte 0x%"PRIx64,
6851da12ec4SLe Tan                         level, slpte);
6861da12ec4SLe Tan             return -VTD_FR_PAGING_ENTRY_RSVD;
6871da12ec4SLe Tan         }
6881da12ec4SLe Tan 
6891da12ec4SLe Tan         if (vtd_is_last_slpte(slpte, level)) {
6901da12ec4SLe Tan             *slptep = slpte;
6911da12ec4SLe Tan             *slpte_level = level;
6921da12ec4SLe Tan             return 0;
6931da12ec4SLe Tan         }
6941da12ec4SLe Tan         addr = vtd_get_slpte_addr(slpte);
6951da12ec4SLe Tan         level--;
6961da12ec4SLe Tan     }
6971da12ec4SLe Tan }
6981da12ec4SLe Tan 
699f06a696dSPeter Xu typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
700f06a696dSPeter Xu 
701f06a696dSPeter Xu /**
702f06a696dSPeter Xu  * vtd_page_walk_level - walk over specific level for IOVA range
703f06a696dSPeter Xu  *
704f06a696dSPeter Xu  * @addr: base GPA addr to start the walk
705f06a696dSPeter Xu  * @start: IOVA range start address
706f06a696dSPeter Xu  * @end: IOVA range end address (start <= addr < end)
707f06a696dSPeter Xu  * @hook_fn: hook func to be called when detected page
708f06a696dSPeter Xu  * @private: private data to be passed into hook func
709f06a696dSPeter Xu  * @read: whether parent level has read permission
710f06a696dSPeter Xu  * @write: whether parent level has write permission
711f06a696dSPeter Xu  * @notify_unmap: whether we should notify invalid entries
712f06a696dSPeter Xu  */
713f06a696dSPeter Xu static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
714f06a696dSPeter Xu                                uint64_t end, vtd_page_walk_hook hook_fn,
715f06a696dSPeter Xu                                void *private, uint32_t level,
716f06a696dSPeter Xu                                bool read, bool write, bool notify_unmap)
717f06a696dSPeter Xu {
718f06a696dSPeter Xu     bool read_cur, write_cur, entry_valid;
719f06a696dSPeter Xu     uint32_t offset;
720f06a696dSPeter Xu     uint64_t slpte;
721f06a696dSPeter Xu     uint64_t subpage_size, subpage_mask;
722f06a696dSPeter Xu     IOMMUTLBEntry entry;
723f06a696dSPeter Xu     uint64_t iova = start;
724f06a696dSPeter Xu     uint64_t iova_next;
725f06a696dSPeter Xu     int ret = 0;
726f06a696dSPeter Xu 
727f06a696dSPeter Xu     trace_vtd_page_walk_level(addr, level, start, end);
728f06a696dSPeter Xu 
729f06a696dSPeter Xu     subpage_size = 1ULL << vtd_slpt_level_shift(level);
730f06a696dSPeter Xu     subpage_mask = vtd_slpt_level_page_mask(level);
731f06a696dSPeter Xu 
732f06a696dSPeter Xu     while (iova < end) {
733f06a696dSPeter Xu         iova_next = (iova & subpage_mask) + subpage_size;
734f06a696dSPeter Xu 
735f06a696dSPeter Xu         offset = vtd_iova_level_offset(iova, level);
736f06a696dSPeter Xu         slpte = vtd_get_slpte(addr, offset);
737f06a696dSPeter Xu 
738f06a696dSPeter Xu         if (slpte == (uint64_t)-1) {
739f06a696dSPeter Xu             trace_vtd_page_walk_skip_read(iova, iova_next);
740f06a696dSPeter Xu             goto next;
741f06a696dSPeter Xu         }
742f06a696dSPeter Xu 
743f06a696dSPeter Xu         if (vtd_slpte_nonzero_rsvd(slpte, level)) {
744f06a696dSPeter Xu             trace_vtd_page_walk_skip_reserve(iova, iova_next);
745f06a696dSPeter Xu             goto next;
746f06a696dSPeter Xu         }
747f06a696dSPeter Xu 
748f06a696dSPeter Xu         /* Permissions are stacked with parents' */
749f06a696dSPeter Xu         read_cur = read && (slpte & VTD_SL_R);
750f06a696dSPeter Xu         write_cur = write && (slpte & VTD_SL_W);
751f06a696dSPeter Xu 
752f06a696dSPeter Xu         /*
753f06a696dSPeter Xu          * As long as we have either read/write permission, this is a
754f06a696dSPeter Xu          * valid entry. The rule works for both page entries and page
755f06a696dSPeter Xu          * table entries.
756f06a696dSPeter Xu          */
757f06a696dSPeter Xu         entry_valid = read_cur | write_cur;
758f06a696dSPeter Xu 
759f06a696dSPeter Xu         if (vtd_is_last_slpte(slpte, level)) {
760f06a696dSPeter Xu             entry.target_as = &address_space_memory;
761f06a696dSPeter Xu             entry.iova = iova & subpage_mask;
762f06a696dSPeter Xu             /* NOTE: this is only meaningful if entry_valid == true */
763f06a696dSPeter Xu             entry.translated_addr = vtd_get_slpte_addr(slpte);
764f06a696dSPeter Xu             entry.addr_mask = ~subpage_mask;
765f06a696dSPeter Xu             entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
766f06a696dSPeter Xu             if (!entry_valid && !notify_unmap) {
767f06a696dSPeter Xu                 trace_vtd_page_walk_skip_perm(iova, iova_next);
768f06a696dSPeter Xu                 goto next;
769f06a696dSPeter Xu             }
770f06a696dSPeter Xu             trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr,
771f06a696dSPeter Xu                                     entry.addr_mask, entry.perm);
772f06a696dSPeter Xu             if (hook_fn) {
773f06a696dSPeter Xu                 ret = hook_fn(&entry, private);
774f06a696dSPeter Xu                 if (ret < 0) {
775f06a696dSPeter Xu                     return ret;
776f06a696dSPeter Xu                 }
777f06a696dSPeter Xu             }
778f06a696dSPeter Xu         } else {
779f06a696dSPeter Xu             if (!entry_valid) {
780f06a696dSPeter Xu                 trace_vtd_page_walk_skip_perm(iova, iova_next);
781f06a696dSPeter Xu                 goto next;
782f06a696dSPeter Xu             }
783f06a696dSPeter Xu             ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova,
784f06a696dSPeter Xu                                       MIN(iova_next, end), hook_fn, private,
785f06a696dSPeter Xu                                       level - 1, read_cur, write_cur,
786f06a696dSPeter Xu                                       notify_unmap);
787f06a696dSPeter Xu             if (ret < 0) {
788f06a696dSPeter Xu                 return ret;
789f06a696dSPeter Xu             }
790f06a696dSPeter Xu         }
791f06a696dSPeter Xu 
792f06a696dSPeter Xu next:
793f06a696dSPeter Xu         iova = iova_next;
794f06a696dSPeter Xu     }
795f06a696dSPeter Xu 
796f06a696dSPeter Xu     return 0;
797f06a696dSPeter Xu }
798f06a696dSPeter Xu 
799f06a696dSPeter Xu /**
800f06a696dSPeter Xu  * vtd_page_walk - walk specific IOVA range, and call the hook
801f06a696dSPeter Xu  *
802f06a696dSPeter Xu  * @ce: context entry to walk upon
803f06a696dSPeter Xu  * @start: IOVA address to start the walk
804f06a696dSPeter Xu  * @end: IOVA range end address (start <= addr < end)
805f06a696dSPeter Xu  * @hook_fn: the hook that to be called for each detected area
806f06a696dSPeter Xu  * @private: private data for the hook function
807f06a696dSPeter Xu  */
808f06a696dSPeter Xu static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
809dd4d607eSPeter Xu                          vtd_page_walk_hook hook_fn, void *private,
810dd4d607eSPeter Xu                          bool notify_unmap)
811f06a696dSPeter Xu {
812f06a696dSPeter Xu     dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
813f06a696dSPeter Xu     uint32_t level = vtd_get_level_from_context_entry(ce);
814f06a696dSPeter Xu 
815f06a696dSPeter Xu     if (!vtd_iova_range_check(start, ce)) {
816f06a696dSPeter Xu         return -VTD_FR_ADDR_BEYOND_MGAW;
817f06a696dSPeter Xu     }
818f06a696dSPeter Xu 
819f06a696dSPeter Xu     if (!vtd_iova_range_check(end, ce)) {
820f06a696dSPeter Xu         /* Fix end so that it reaches the maximum */
821f06a696dSPeter Xu         end = vtd_iova_limit(ce);
822f06a696dSPeter Xu     }
823f06a696dSPeter Xu 
824f06a696dSPeter Xu     return vtd_page_walk_level(addr, start, end, hook_fn, private,
825dd4d607eSPeter Xu                                level, true, true, notify_unmap);
826f06a696dSPeter Xu }
827f06a696dSPeter Xu 
8281da12ec4SLe Tan /* Map a device to its corresponding domain (context-entry) */
8291da12ec4SLe Tan static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
8301da12ec4SLe Tan                                     uint8_t devfn, VTDContextEntry *ce)
8311da12ec4SLe Tan {
8321da12ec4SLe Tan     VTDRootEntry re;
8331da12ec4SLe Tan     int ret_fr;
8341da12ec4SLe Tan 
8351da12ec4SLe Tan     ret_fr = vtd_get_root_entry(s, bus_num, &re);
8361da12ec4SLe Tan     if (ret_fr) {
8371da12ec4SLe Tan         return ret_fr;
8381da12ec4SLe Tan     }
8391da12ec4SLe Tan 
8401da12ec4SLe Tan     if (!vtd_root_entry_present(&re)) {
8416c441e1dSPeter Xu         /* Not error - it's okay we don't have root entry. */
8426c441e1dSPeter Xu         trace_vtd_re_not_present(bus_num);
8431da12ec4SLe Tan         return -VTD_FR_ROOT_ENTRY_P;
8441da12ec4SLe Tan     } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
8456c441e1dSPeter Xu         trace_vtd_re_invalid(re.rsvd, re.val);
8461da12ec4SLe Tan         return -VTD_FR_ROOT_ENTRY_RSVD;
8471da12ec4SLe Tan     }
8481da12ec4SLe Tan 
8491da12ec4SLe Tan     ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
8501da12ec4SLe Tan     if (ret_fr) {
8511da12ec4SLe Tan         return ret_fr;
8521da12ec4SLe Tan     }
8531da12ec4SLe Tan 
8541da12ec4SLe Tan     if (!vtd_context_entry_present(ce)) {
8556c441e1dSPeter Xu         /* Not error - it's okay we don't have context entry. */
8566c441e1dSPeter Xu         trace_vtd_ce_not_present(bus_num, devfn);
8571da12ec4SLe Tan         return -VTD_FR_CONTEXT_ENTRY_P;
8581da12ec4SLe Tan     } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
8591da12ec4SLe Tan                (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
8606c441e1dSPeter Xu         trace_vtd_ce_invalid(ce->hi, ce->lo);
8611da12ec4SLe Tan         return -VTD_FR_CONTEXT_ENTRY_RSVD;
8621da12ec4SLe Tan     }
8631da12ec4SLe Tan     /* Check if the programming of context-entry is valid */
8641da12ec4SLe Tan     if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
8656c441e1dSPeter Xu         trace_vtd_ce_invalid(ce->hi, ce->lo);
8661da12ec4SLe Tan         return -VTD_FR_CONTEXT_ENTRY_INV;
867554f5e16SJason Wang     } else {
868554f5e16SJason Wang         switch (ce->lo & VTD_CONTEXT_ENTRY_TT) {
869554f5e16SJason Wang         case VTD_CONTEXT_TT_MULTI_LEVEL:
870554f5e16SJason Wang             /* fall through */
871554f5e16SJason Wang         case VTD_CONTEXT_TT_DEV_IOTLB:
872554f5e16SJason Wang             break;
873554f5e16SJason Wang         default:
8746c441e1dSPeter Xu             trace_vtd_ce_invalid(ce->hi, ce->lo);
8751da12ec4SLe Tan             return -VTD_FR_CONTEXT_ENTRY_INV;
8761da12ec4SLe Tan         }
877554f5e16SJason Wang     }
8781da12ec4SLe Tan     return 0;
8791da12ec4SLe Tan }
8801da12ec4SLe Tan 
8811da12ec4SLe Tan static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
8821da12ec4SLe Tan {
8831da12ec4SLe Tan     return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
8841da12ec4SLe Tan }
8851da12ec4SLe Tan 
8861da12ec4SLe Tan static const bool vtd_qualified_faults[] = {
8871da12ec4SLe Tan     [VTD_FR_RESERVED] = false,
8881da12ec4SLe Tan     [VTD_FR_ROOT_ENTRY_P] = false,
8891da12ec4SLe Tan     [VTD_FR_CONTEXT_ENTRY_P] = true,
8901da12ec4SLe Tan     [VTD_FR_CONTEXT_ENTRY_INV] = true,
8911da12ec4SLe Tan     [VTD_FR_ADDR_BEYOND_MGAW] = true,
8921da12ec4SLe Tan     [VTD_FR_WRITE] = true,
8931da12ec4SLe Tan     [VTD_FR_READ] = true,
8941da12ec4SLe Tan     [VTD_FR_PAGING_ENTRY_INV] = true,
8951da12ec4SLe Tan     [VTD_FR_ROOT_TABLE_INV] = false,
8961da12ec4SLe Tan     [VTD_FR_CONTEXT_TABLE_INV] = false,
8971da12ec4SLe Tan     [VTD_FR_ROOT_ENTRY_RSVD] = false,
8981da12ec4SLe Tan     [VTD_FR_PAGING_ENTRY_RSVD] = true,
8991da12ec4SLe Tan     [VTD_FR_CONTEXT_ENTRY_TT] = true,
9001da12ec4SLe Tan     [VTD_FR_RESERVED_ERR] = false,
9011da12ec4SLe Tan     [VTD_FR_MAX] = false,
9021da12ec4SLe Tan };
9031da12ec4SLe Tan 
9041da12ec4SLe Tan /* To see if a fault condition is "qualified", which is reported to software
9051da12ec4SLe Tan  * only if the FPD field in the context-entry used to process the faulting
9061da12ec4SLe Tan  * request is 0.
9071da12ec4SLe Tan  */
9081da12ec4SLe Tan static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
9091da12ec4SLe Tan {
9101da12ec4SLe Tan     return vtd_qualified_faults[fault];
9111da12ec4SLe Tan }
9121da12ec4SLe Tan 
9131da12ec4SLe Tan static inline bool vtd_is_interrupt_addr(hwaddr addr)
9141da12ec4SLe Tan {
9151da12ec4SLe Tan     return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
9161da12ec4SLe Tan }
9171da12ec4SLe Tan 
9181da12ec4SLe Tan /* Map dev to context-entry then do a paging-structures walk to do a iommu
9191da12ec4SLe Tan  * translation.
92079e2b9aeSPaolo Bonzini  *
92179e2b9aeSPaolo Bonzini  * Called from RCU critical section.
92279e2b9aeSPaolo Bonzini  *
9231da12ec4SLe Tan  * @bus_num: The bus number
9241da12ec4SLe Tan  * @devfn: The devfn, which is the  combined of device and function number
9251da12ec4SLe Tan  * @is_write: The access is a write operation
9261da12ec4SLe Tan  * @entry: IOMMUTLBEntry that contain the addr to be translated and result
9271da12ec4SLe Tan  */
9287df953bdSKnut Omang static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
9291da12ec4SLe Tan                                    uint8_t devfn, hwaddr addr, bool is_write,
9301da12ec4SLe Tan                                    IOMMUTLBEntry *entry)
9311da12ec4SLe Tan {
932d92fa2dcSLe Tan     IntelIOMMUState *s = vtd_as->iommu_state;
9331da12ec4SLe Tan     VTDContextEntry ce;
9347df953bdSKnut Omang     uint8_t bus_num = pci_bus_num(bus);
935d92fa2dcSLe Tan     VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
936d66b969bSJason Wang     uint64_t slpte, page_mask;
9371da12ec4SLe Tan     uint32_t level;
9381da12ec4SLe Tan     uint16_t source_id = vtd_make_source_id(bus_num, devfn);
9391da12ec4SLe Tan     int ret_fr;
9401da12ec4SLe Tan     bool is_fpd_set = false;
9411da12ec4SLe Tan     bool reads = true;
9421da12ec4SLe Tan     bool writes = true;
943b5a280c0SLe Tan     VTDIOTLBEntry *iotlb_entry;
9441da12ec4SLe Tan 
945046ab7e9SPeter Xu     /*
946046ab7e9SPeter Xu      * We have standalone memory region for interrupt addresses, we
947046ab7e9SPeter Xu      * should never receive translation requests in this region.
9481da12ec4SLe Tan      */
949046ab7e9SPeter Xu     assert(!vtd_is_interrupt_addr(addr));
950046ab7e9SPeter Xu 
951b5a280c0SLe Tan     /* Try to fetch slpte form IOTLB */
952b5a280c0SLe Tan     iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
953b5a280c0SLe Tan     if (iotlb_entry) {
9546c441e1dSPeter Xu         trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
9556c441e1dSPeter Xu                                  iotlb_entry->domain_id);
956b5a280c0SLe Tan         slpte = iotlb_entry->slpte;
957b5a280c0SLe Tan         reads = iotlb_entry->read_flags;
958b5a280c0SLe Tan         writes = iotlb_entry->write_flags;
959d66b969bSJason Wang         page_mask = iotlb_entry->mask;
960b5a280c0SLe Tan         goto out;
961b5a280c0SLe Tan     }
962d92fa2dcSLe Tan     /* Try to fetch context-entry from cache first */
963d92fa2dcSLe Tan     if (cc_entry->context_cache_gen == s->context_cache_gen) {
9646c441e1dSPeter Xu         trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
9656c441e1dSPeter Xu                                cc_entry->context_entry.lo,
9666c441e1dSPeter Xu                                cc_entry->context_cache_gen);
967d92fa2dcSLe Tan         ce = cc_entry->context_entry;
968d92fa2dcSLe Tan         is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
969d92fa2dcSLe Tan     } else {
9701da12ec4SLe Tan         ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
9711da12ec4SLe Tan         is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
9721da12ec4SLe Tan         if (ret_fr) {
9731da12ec4SLe Tan             ret_fr = -ret_fr;
9741da12ec4SLe Tan             if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
9756c441e1dSPeter Xu                 trace_vtd_fault_disabled();
9761da12ec4SLe Tan             } else {
9771da12ec4SLe Tan                 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
9781da12ec4SLe Tan             }
9791da12ec4SLe Tan             return;
9801da12ec4SLe Tan         }
981d92fa2dcSLe Tan         /* Update context-cache */
9826c441e1dSPeter Xu         trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
9836c441e1dSPeter Xu                                   cc_entry->context_cache_gen,
9846c441e1dSPeter Xu                                   s->context_cache_gen);
985d92fa2dcSLe Tan         cc_entry->context_entry = ce;
986d92fa2dcSLe Tan         cc_entry->context_cache_gen = s->context_cache_gen;
987d92fa2dcSLe Tan     }
9881da12ec4SLe Tan 
9896e905564SPeter Xu     ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
9901da12ec4SLe Tan                                &reads, &writes);
9911da12ec4SLe Tan     if (ret_fr) {
9921da12ec4SLe Tan         ret_fr = -ret_fr;
9931da12ec4SLe Tan         if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
9946c441e1dSPeter Xu             trace_vtd_fault_disabled();
9951da12ec4SLe Tan         } else {
9961da12ec4SLe Tan             vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
9971da12ec4SLe Tan         }
9981da12ec4SLe Tan         return;
9991da12ec4SLe Tan     }
10001da12ec4SLe Tan 
1001d66b969bSJason Wang     page_mask = vtd_slpt_level_page_mask(level);
1002b5a280c0SLe Tan     vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
1003d66b969bSJason Wang                      reads, writes, level);
1004b5a280c0SLe Tan out:
1005d66b969bSJason Wang     entry->iova = addr & page_mask;
1006d66b969bSJason Wang     entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
1007d66b969bSJason Wang     entry->addr_mask = ~page_mask;
10081da12ec4SLe Tan     entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
10091da12ec4SLe Tan }
10101da12ec4SLe Tan 
10111da12ec4SLe Tan static void vtd_root_table_setup(IntelIOMMUState *s)
10121da12ec4SLe Tan {
10131da12ec4SLe Tan     s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
10141da12ec4SLe Tan     s->root_extended = s->root & VTD_RTADDR_RTT;
10151da12ec4SLe Tan     s->root &= VTD_RTADDR_ADDR_MASK;
10161da12ec4SLe Tan 
10171da12ec4SLe Tan     VTD_DPRINTF(CSR, "root_table addr 0x%"PRIx64 " %s", s->root,
10181da12ec4SLe Tan                 (s->root_extended ? "(extended)" : ""));
10191da12ec4SLe Tan }
10201da12ec4SLe Tan 
102102a2cbc8SPeter Xu static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
102202a2cbc8SPeter Xu                                uint32_t index, uint32_t mask)
102302a2cbc8SPeter Xu {
102402a2cbc8SPeter Xu     x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
102502a2cbc8SPeter Xu }
102602a2cbc8SPeter Xu 
1027a5861439SPeter Xu static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
1028a5861439SPeter Xu {
1029a5861439SPeter Xu     uint64_t value = 0;
1030a5861439SPeter Xu     value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
1031a5861439SPeter Xu     s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
1032a5861439SPeter Xu     s->intr_root = value & VTD_IRTA_ADDR_MASK;
103328589311SJan Kiszka     s->intr_eime = value & VTD_IRTA_EIME;
1034a5861439SPeter Xu 
103502a2cbc8SPeter Xu     /* Notify global invalidation */
103602a2cbc8SPeter Xu     vtd_iec_notify_all(s, true, 0, 0);
1037a5861439SPeter Xu 
1038a5861439SPeter Xu     VTD_DPRINTF(CSR, "int remap table addr 0x%"PRIx64 " size %"PRIu32,
1039a5861439SPeter Xu                 s->intr_root, s->intr_size);
1040a5861439SPeter Xu }
1041a5861439SPeter Xu 
1042dd4d607eSPeter Xu static void vtd_iommu_replay_all(IntelIOMMUState *s)
1043dd4d607eSPeter Xu {
1044dd4d607eSPeter Xu     IntelIOMMUNotifierNode *node;
1045dd4d607eSPeter Xu 
1046dd4d607eSPeter Xu     QLIST_FOREACH(node, &s->notifiers_list, next) {
1047dd4d607eSPeter Xu         memory_region_iommu_replay_all(&node->vtd_as->iommu);
1048dd4d607eSPeter Xu     }
1049dd4d607eSPeter Xu }
1050dd4d607eSPeter Xu 
1051d92fa2dcSLe Tan static void vtd_context_global_invalidate(IntelIOMMUState *s)
1052d92fa2dcSLe Tan {
1053bc535e59SPeter Xu     trace_vtd_inv_desc_cc_global();
1054d92fa2dcSLe Tan     s->context_cache_gen++;
1055d92fa2dcSLe Tan     if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
1056d92fa2dcSLe Tan         vtd_reset_context_cache(s);
1057d92fa2dcSLe Tan     }
1058dd4d607eSPeter Xu     /*
1059dd4d607eSPeter Xu      * From VT-d spec 6.5.2.1, a global context entry invalidation
1060dd4d607eSPeter Xu      * should be followed by a IOTLB global invalidation, so we should
1061dd4d607eSPeter Xu      * be safe even without this. Hoewever, let's replay the region as
1062dd4d607eSPeter Xu      * well to be safer, and go back here when we need finer tunes for
1063dd4d607eSPeter Xu      * VT-d emulation codes.
1064dd4d607eSPeter Xu      */
1065dd4d607eSPeter Xu     vtd_iommu_replay_all(s);
1066d92fa2dcSLe Tan }
1067d92fa2dcSLe Tan 
10687df953bdSKnut Omang 
10697df953bdSKnut Omang /* Find the VTD address space currently associated with a given bus number,
10707df953bdSKnut Omang  */
10717df953bdSKnut Omang static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
10727df953bdSKnut Omang {
10737df953bdSKnut Omang     VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
10747df953bdSKnut Omang     if (!vtd_bus) {
10757df953bdSKnut Omang         /* Iterate over the registered buses to find the one
10767df953bdSKnut Omang          * which currently hold this bus number, and update the bus_num lookup table:
10777df953bdSKnut Omang          */
10787df953bdSKnut Omang         GHashTableIter iter;
10797df953bdSKnut Omang 
10807df953bdSKnut Omang         g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
10817df953bdSKnut Omang         while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
10827df953bdSKnut Omang             if (pci_bus_num(vtd_bus->bus) == bus_num) {
10837df953bdSKnut Omang                 s->vtd_as_by_bus_num[bus_num] = vtd_bus;
10847df953bdSKnut Omang                 return vtd_bus;
10857df953bdSKnut Omang             }
10867df953bdSKnut Omang         }
10877df953bdSKnut Omang     }
10887df953bdSKnut Omang     return vtd_bus;
10897df953bdSKnut Omang }
10907df953bdSKnut Omang 
1091d92fa2dcSLe Tan /* Do a context-cache device-selective invalidation.
1092d92fa2dcSLe Tan  * @func_mask: FM field after shifting
1093d92fa2dcSLe Tan  */
1094d92fa2dcSLe Tan static void vtd_context_device_invalidate(IntelIOMMUState *s,
1095d92fa2dcSLe Tan                                           uint16_t source_id,
1096d92fa2dcSLe Tan                                           uint16_t func_mask)
1097d92fa2dcSLe Tan {
1098d92fa2dcSLe Tan     uint16_t mask;
10997df953bdSKnut Omang     VTDBus *vtd_bus;
1100d92fa2dcSLe Tan     VTDAddressSpace *vtd_as;
1101bc535e59SPeter Xu     uint8_t bus_n, devfn;
1102d92fa2dcSLe Tan     uint16_t devfn_it;
1103d92fa2dcSLe Tan 
1104bc535e59SPeter Xu     trace_vtd_inv_desc_cc_devices(source_id, func_mask);
1105bc535e59SPeter Xu 
1106d92fa2dcSLe Tan     switch (func_mask & 3) {
1107d92fa2dcSLe Tan     case 0:
1108d92fa2dcSLe Tan         mask = 0;   /* No bits in the SID field masked */
1109d92fa2dcSLe Tan         break;
1110d92fa2dcSLe Tan     case 1:
1111d92fa2dcSLe Tan         mask = 4;   /* Mask bit 2 in the SID field */
1112d92fa2dcSLe Tan         break;
1113d92fa2dcSLe Tan     case 2:
1114d92fa2dcSLe Tan         mask = 6;   /* Mask bit 2:1 in the SID field */
1115d92fa2dcSLe Tan         break;
1116d92fa2dcSLe Tan     case 3:
1117d92fa2dcSLe Tan         mask = 7;   /* Mask bit 2:0 in the SID field */
1118d92fa2dcSLe Tan         break;
1119d92fa2dcSLe Tan     }
11206cb99accSPeter Xu     mask = ~mask;
1121bc535e59SPeter Xu 
1122bc535e59SPeter Xu     bus_n = VTD_SID_TO_BUS(source_id);
1123bc535e59SPeter Xu     vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
11247df953bdSKnut Omang     if (vtd_bus) {
1125d92fa2dcSLe Tan         devfn = VTD_SID_TO_DEVFN(source_id);
112604af0e18SPeter Xu         for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
11277df953bdSKnut Omang             vtd_as = vtd_bus->dev_as[devfn_it];
1128d92fa2dcSLe Tan             if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
1129bc535e59SPeter Xu                 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
1130bc535e59SPeter Xu                                              VTD_PCI_FUNC(devfn_it));
1131d92fa2dcSLe Tan                 vtd_as->context_cache_entry.context_cache_gen = 0;
1132dd4d607eSPeter Xu                 /*
1133dd4d607eSPeter Xu                  * So a device is moving out of (or moving into) a
1134dd4d607eSPeter Xu                  * domain, a replay() suites here to notify all the
1135dd4d607eSPeter Xu                  * IOMMU_NOTIFIER_MAP registers about this change.
1136dd4d607eSPeter Xu                  * This won't bring bad even if we have no such
1137dd4d607eSPeter Xu                  * notifier registered - the IOMMU notification
1138dd4d607eSPeter Xu                  * framework will skip MAP notifications if that
1139dd4d607eSPeter Xu                  * happened.
1140dd4d607eSPeter Xu                  */
1141dd4d607eSPeter Xu                 memory_region_iommu_replay_all(&vtd_as->iommu);
1142d92fa2dcSLe Tan             }
1143d92fa2dcSLe Tan         }
1144d92fa2dcSLe Tan     }
1145d92fa2dcSLe Tan }
1146d92fa2dcSLe Tan 
11471da12ec4SLe Tan /* Context-cache invalidation
11481da12ec4SLe Tan  * Returns the Context Actual Invalidation Granularity.
11491da12ec4SLe Tan  * @val: the content of the CCMD_REG
11501da12ec4SLe Tan  */
11511da12ec4SLe Tan static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
11521da12ec4SLe Tan {
11531da12ec4SLe Tan     uint64_t caig;
11541da12ec4SLe Tan     uint64_t type = val & VTD_CCMD_CIRG_MASK;
11551da12ec4SLe Tan 
11561da12ec4SLe Tan     switch (type) {
11571da12ec4SLe Tan     case VTD_CCMD_DOMAIN_INVL:
1158d92fa2dcSLe Tan         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1159d92fa2dcSLe Tan                     (uint16_t)VTD_CCMD_DID(val));
1160d92fa2dcSLe Tan         /* Fall through */
1161d92fa2dcSLe Tan     case VTD_CCMD_GLOBAL_INVL:
1162d92fa2dcSLe Tan         VTD_DPRINTF(INV, "global invalidation");
1163d92fa2dcSLe Tan         caig = VTD_CCMD_GLOBAL_INVL_A;
1164d92fa2dcSLe Tan         vtd_context_global_invalidate(s);
11651da12ec4SLe Tan         break;
11661da12ec4SLe Tan 
11671da12ec4SLe Tan     case VTD_CCMD_DEVICE_INVL:
11681da12ec4SLe Tan         caig = VTD_CCMD_DEVICE_INVL_A;
1169d92fa2dcSLe Tan         vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
11701da12ec4SLe Tan         break;
11711da12ec4SLe Tan 
11721da12ec4SLe Tan     default:
1173d92fa2dcSLe Tan         VTD_DPRINTF(GENERAL, "error: invalid granularity");
11741da12ec4SLe Tan         caig = 0;
11751da12ec4SLe Tan     }
11761da12ec4SLe Tan     return caig;
11771da12ec4SLe Tan }
11781da12ec4SLe Tan 
1179b5a280c0SLe Tan static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
1180b5a280c0SLe Tan {
11816c441e1dSPeter Xu     trace_vtd_iotlb_reset("global invalidation recved");
1182b5a280c0SLe Tan     vtd_reset_iotlb(s);
1183dd4d607eSPeter Xu     vtd_iommu_replay_all(s);
1184b5a280c0SLe Tan }
1185b5a280c0SLe Tan 
1186b5a280c0SLe Tan static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
1187b5a280c0SLe Tan {
1188dd4d607eSPeter Xu     IntelIOMMUNotifierNode *node;
1189dd4d607eSPeter Xu     VTDContextEntry ce;
1190dd4d607eSPeter Xu     VTDAddressSpace *vtd_as;
1191dd4d607eSPeter Xu 
1192b5a280c0SLe Tan     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
1193b5a280c0SLe Tan                                 &domain_id);
1194dd4d607eSPeter Xu 
1195dd4d607eSPeter Xu     QLIST_FOREACH(node, &s->notifiers_list, next) {
1196dd4d607eSPeter Xu         vtd_as = node->vtd_as;
1197dd4d607eSPeter Xu         if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1198dd4d607eSPeter Xu                                       vtd_as->devfn, &ce) &&
1199dd4d607eSPeter Xu             domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1200dd4d607eSPeter Xu             memory_region_iommu_replay_all(&vtd_as->iommu);
1201dd4d607eSPeter Xu         }
1202dd4d607eSPeter Xu     }
1203dd4d607eSPeter Xu }
1204dd4d607eSPeter Xu 
1205dd4d607eSPeter Xu static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry,
1206dd4d607eSPeter Xu                                            void *private)
1207dd4d607eSPeter Xu {
1208dd4d607eSPeter Xu     memory_region_notify_iommu((MemoryRegion *)private, *entry);
1209dd4d607eSPeter Xu     return 0;
1210dd4d607eSPeter Xu }
1211dd4d607eSPeter Xu 
1212dd4d607eSPeter Xu static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
1213dd4d607eSPeter Xu                                            uint16_t domain_id, hwaddr addr,
1214dd4d607eSPeter Xu                                            uint8_t am)
1215dd4d607eSPeter Xu {
1216dd4d607eSPeter Xu     IntelIOMMUNotifierNode *node;
1217dd4d607eSPeter Xu     VTDContextEntry ce;
1218dd4d607eSPeter Xu     int ret;
1219dd4d607eSPeter Xu 
1220dd4d607eSPeter Xu     QLIST_FOREACH(node, &(s->notifiers_list), next) {
1221dd4d607eSPeter Xu         VTDAddressSpace *vtd_as = node->vtd_as;
1222dd4d607eSPeter Xu         ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1223dd4d607eSPeter Xu                                        vtd_as->devfn, &ce);
1224dd4d607eSPeter Xu         if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1225dd4d607eSPeter Xu             vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
1226dd4d607eSPeter Xu                           vtd_page_invalidate_notify_hook,
1227dd4d607eSPeter Xu                           (void *)&vtd_as->iommu, true);
1228dd4d607eSPeter Xu         }
1229dd4d607eSPeter Xu     }
1230b5a280c0SLe Tan }
1231b5a280c0SLe Tan 
1232b5a280c0SLe Tan static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
1233b5a280c0SLe Tan                                       hwaddr addr, uint8_t am)
1234b5a280c0SLe Tan {
1235b5a280c0SLe Tan     VTDIOTLBPageInvInfo info;
1236b5a280c0SLe Tan 
1237b5a280c0SLe Tan     assert(am <= VTD_MAMV);
1238b5a280c0SLe Tan     info.domain_id = domain_id;
1239d66b969bSJason Wang     info.addr = addr;
1240b5a280c0SLe Tan     info.mask = ~((1 << am) - 1);
1241b5a280c0SLe Tan     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
1242dd4d607eSPeter Xu     vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
1243b5a280c0SLe Tan }
1244b5a280c0SLe Tan 
12451da12ec4SLe Tan /* Flush IOTLB
12461da12ec4SLe Tan  * Returns the IOTLB Actual Invalidation Granularity.
12471da12ec4SLe Tan  * @val: the content of the IOTLB_REG
12481da12ec4SLe Tan  */
12491da12ec4SLe Tan static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
12501da12ec4SLe Tan {
12511da12ec4SLe Tan     uint64_t iaig;
12521da12ec4SLe Tan     uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
1253b5a280c0SLe Tan     uint16_t domain_id;
1254b5a280c0SLe Tan     hwaddr addr;
1255b5a280c0SLe Tan     uint8_t am;
12561da12ec4SLe Tan 
12571da12ec4SLe Tan     switch (type) {
12581da12ec4SLe Tan     case VTD_TLB_GLOBAL_FLUSH:
1259b5a280c0SLe Tan         VTD_DPRINTF(INV, "global invalidation");
12601da12ec4SLe Tan         iaig = VTD_TLB_GLOBAL_FLUSH_A;
1261b5a280c0SLe Tan         vtd_iotlb_global_invalidate(s);
12621da12ec4SLe Tan         break;
12631da12ec4SLe Tan 
12641da12ec4SLe Tan     case VTD_TLB_DSI_FLUSH:
1265b5a280c0SLe Tan         domain_id = VTD_TLB_DID(val);
1266b5a280c0SLe Tan         VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1267b5a280c0SLe Tan                     domain_id);
12681da12ec4SLe Tan         iaig = VTD_TLB_DSI_FLUSH_A;
1269b5a280c0SLe Tan         vtd_iotlb_domain_invalidate(s, domain_id);
12701da12ec4SLe Tan         break;
12711da12ec4SLe Tan 
12721da12ec4SLe Tan     case VTD_TLB_PSI_FLUSH:
1273b5a280c0SLe Tan         domain_id = VTD_TLB_DID(val);
1274b5a280c0SLe Tan         addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
1275b5a280c0SLe Tan         am = VTD_IVA_AM(addr);
1276b5a280c0SLe Tan         addr = VTD_IVA_ADDR(addr);
1277b5a280c0SLe Tan         VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
1278b5a280c0SLe Tan                     " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
1279b5a280c0SLe Tan         if (am > VTD_MAMV) {
1280b5a280c0SLe Tan             VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
1281b5a280c0SLe Tan                         "%"PRIu8, (uint8_t)VTD_MAMV);
1282b5a280c0SLe Tan             iaig = 0;
1283b5a280c0SLe Tan             break;
1284b5a280c0SLe Tan         }
12851da12ec4SLe Tan         iaig = VTD_TLB_PSI_FLUSH_A;
1286b5a280c0SLe Tan         vtd_iotlb_page_invalidate(s, domain_id, addr, am);
12871da12ec4SLe Tan         break;
12881da12ec4SLe Tan 
12891da12ec4SLe Tan     default:
1290b5a280c0SLe Tan         VTD_DPRINTF(GENERAL, "error: invalid granularity");
12911da12ec4SLe Tan         iaig = 0;
12921da12ec4SLe Tan     }
12931da12ec4SLe Tan     return iaig;
12941da12ec4SLe Tan }
12951da12ec4SLe Tan 
1296ed7b8fbcSLe Tan static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s)
1297ed7b8fbcSLe Tan {
1298ed7b8fbcSLe Tan     return s->iq_tail == 0;
1299ed7b8fbcSLe Tan }
1300ed7b8fbcSLe Tan 
1301ed7b8fbcSLe Tan static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
1302ed7b8fbcSLe Tan {
1303ed7b8fbcSLe Tan     return s->qi_enabled && (s->iq_tail == s->iq_head) &&
1304ed7b8fbcSLe Tan            (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
1305ed7b8fbcSLe Tan }
1306ed7b8fbcSLe Tan 
1307ed7b8fbcSLe Tan static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
1308ed7b8fbcSLe Tan {
1309ed7b8fbcSLe Tan     uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
1310ed7b8fbcSLe Tan 
1311ed7b8fbcSLe Tan     VTD_DPRINTF(INV, "Queued Invalidation Enable %s", (en ? "on" : "off"));
1312ed7b8fbcSLe Tan     if (en) {
1313ed7b8fbcSLe Tan         if (vtd_queued_inv_enable_check(s)) {
1314ed7b8fbcSLe Tan             s->iq = iqa_val & VTD_IQA_IQA_MASK;
1315ed7b8fbcSLe Tan             /* 2^(x+8) entries */
1316ed7b8fbcSLe Tan             s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
1317ed7b8fbcSLe Tan             s->qi_enabled = true;
1318ed7b8fbcSLe Tan             VTD_DPRINTF(INV, "DMAR_IQA_REG 0x%"PRIx64, iqa_val);
1319ed7b8fbcSLe Tan             VTD_DPRINTF(INV, "Invalidation Queue addr 0x%"PRIx64 " size %d",
1320ed7b8fbcSLe Tan                         s->iq, s->iq_size);
1321ed7b8fbcSLe Tan             /* Ok - report back to driver */
1322ed7b8fbcSLe Tan             vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
1323ed7b8fbcSLe Tan         } else {
1324ed7b8fbcSLe Tan             VTD_DPRINTF(GENERAL, "error: can't enable Queued Invalidation: "
1325ed7b8fbcSLe Tan                         "tail %"PRIu16, s->iq_tail);
1326ed7b8fbcSLe Tan         }
1327ed7b8fbcSLe Tan     } else {
1328ed7b8fbcSLe Tan         if (vtd_queued_inv_disable_check(s)) {
1329ed7b8fbcSLe Tan             /* disable Queued Invalidation */
1330ed7b8fbcSLe Tan             vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
1331ed7b8fbcSLe Tan             s->iq_head = 0;
1332ed7b8fbcSLe Tan             s->qi_enabled = false;
1333ed7b8fbcSLe Tan             /* Ok - report back to driver */
1334ed7b8fbcSLe Tan             vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
1335ed7b8fbcSLe Tan         } else {
1336ed7b8fbcSLe Tan             VTD_DPRINTF(GENERAL, "error: can't disable Queued Invalidation: "
1337ed7b8fbcSLe Tan                         "head %"PRIu16 ", tail %"PRIu16
1338ed7b8fbcSLe Tan                         ", last_descriptor %"PRIu8,
1339ed7b8fbcSLe Tan                         s->iq_head, s->iq_tail, s->iq_last_desc_type);
1340ed7b8fbcSLe Tan         }
1341ed7b8fbcSLe Tan     }
1342ed7b8fbcSLe Tan }
1343ed7b8fbcSLe Tan 
13441da12ec4SLe Tan /* Set Root Table Pointer */
13451da12ec4SLe Tan static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
13461da12ec4SLe Tan {
13471da12ec4SLe Tan     VTD_DPRINTF(CSR, "set Root Table Pointer");
13481da12ec4SLe Tan 
13491da12ec4SLe Tan     vtd_root_table_setup(s);
13501da12ec4SLe Tan     /* Ok - report back to driver */
13511da12ec4SLe Tan     vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
13521da12ec4SLe Tan }
13531da12ec4SLe Tan 
1354a5861439SPeter Xu /* Set Interrupt Remap Table Pointer */
1355a5861439SPeter Xu static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
1356a5861439SPeter Xu {
1357a5861439SPeter Xu     VTD_DPRINTF(CSR, "set Interrupt Remap Table Pointer");
1358a5861439SPeter Xu 
1359a5861439SPeter Xu     vtd_interrupt_remap_table_setup(s);
1360a5861439SPeter Xu     /* Ok - report back to driver */
1361a5861439SPeter Xu     vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
1362a5861439SPeter Xu }
1363a5861439SPeter Xu 
1364558e0024SPeter Xu static void vtd_switch_address_space(VTDAddressSpace *as)
1365558e0024SPeter Xu {
1366558e0024SPeter Xu     assert(as);
1367558e0024SPeter Xu 
1368558e0024SPeter Xu     trace_vtd_switch_address_space(pci_bus_num(as->bus),
1369558e0024SPeter Xu                                    VTD_PCI_SLOT(as->devfn),
1370558e0024SPeter Xu                                    VTD_PCI_FUNC(as->devfn),
1371558e0024SPeter Xu                                    as->iommu_state->dmar_enabled);
1372558e0024SPeter Xu 
1373558e0024SPeter Xu     /* Turn off first then on the other */
1374558e0024SPeter Xu     if (as->iommu_state->dmar_enabled) {
1375558e0024SPeter Xu         memory_region_set_enabled(&as->sys_alias, false);
1376558e0024SPeter Xu         memory_region_set_enabled(&as->iommu, true);
1377558e0024SPeter Xu     } else {
1378558e0024SPeter Xu         memory_region_set_enabled(&as->iommu, false);
1379558e0024SPeter Xu         memory_region_set_enabled(&as->sys_alias, true);
1380558e0024SPeter Xu     }
1381558e0024SPeter Xu }
1382558e0024SPeter Xu 
1383558e0024SPeter Xu static void vtd_switch_address_space_all(IntelIOMMUState *s)
1384558e0024SPeter Xu {
1385558e0024SPeter Xu     GHashTableIter iter;
1386558e0024SPeter Xu     VTDBus *vtd_bus;
1387558e0024SPeter Xu     int i;
1388558e0024SPeter Xu 
1389558e0024SPeter Xu     g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
1390558e0024SPeter Xu     while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
1391558e0024SPeter Xu         for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) {
1392558e0024SPeter Xu             if (!vtd_bus->dev_as[i]) {
1393558e0024SPeter Xu                 continue;
1394558e0024SPeter Xu             }
1395558e0024SPeter Xu             vtd_switch_address_space(vtd_bus->dev_as[i]);
1396558e0024SPeter Xu         }
1397558e0024SPeter Xu     }
1398558e0024SPeter Xu }
1399558e0024SPeter Xu 
14001da12ec4SLe Tan /* Handle Translation Enable/Disable */
14011da12ec4SLe Tan static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
14021da12ec4SLe Tan {
1403558e0024SPeter Xu     if (s->dmar_enabled == en) {
1404558e0024SPeter Xu         return;
1405558e0024SPeter Xu     }
1406558e0024SPeter Xu 
14071da12ec4SLe Tan     VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off"));
14081da12ec4SLe Tan 
14091da12ec4SLe Tan     if (en) {
14101da12ec4SLe Tan         s->dmar_enabled = true;
14111da12ec4SLe Tan         /* Ok - report back to driver */
14121da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
14131da12ec4SLe Tan     } else {
14141da12ec4SLe Tan         s->dmar_enabled = false;
14151da12ec4SLe Tan 
14161da12ec4SLe Tan         /* Clear the index of Fault Recording Register */
14171da12ec4SLe Tan         s->next_frcd_reg = 0;
14181da12ec4SLe Tan         /* Ok - report back to driver */
14191da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
14201da12ec4SLe Tan     }
1421558e0024SPeter Xu 
1422558e0024SPeter Xu     vtd_switch_address_space_all(s);
14231da12ec4SLe Tan }
14241da12ec4SLe Tan 
142580de52baSPeter Xu /* Handle Interrupt Remap Enable/Disable */
142680de52baSPeter Xu static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
142780de52baSPeter Xu {
142880de52baSPeter Xu     VTD_DPRINTF(CSR, "Interrupt Remap Enable %s", (en ? "on" : "off"));
142980de52baSPeter Xu 
143080de52baSPeter Xu     if (en) {
143180de52baSPeter Xu         s->intr_enabled = true;
143280de52baSPeter Xu         /* Ok - report back to driver */
143380de52baSPeter Xu         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
143480de52baSPeter Xu     } else {
143580de52baSPeter Xu         s->intr_enabled = false;
143680de52baSPeter Xu         /* Ok - report back to driver */
143780de52baSPeter Xu         vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
143880de52baSPeter Xu     }
143980de52baSPeter Xu }
144080de52baSPeter Xu 
14411da12ec4SLe Tan /* Handle write to Global Command Register */
14421da12ec4SLe Tan static void vtd_handle_gcmd_write(IntelIOMMUState *s)
14431da12ec4SLe Tan {
14441da12ec4SLe Tan     uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
14451da12ec4SLe Tan     uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
14461da12ec4SLe Tan     uint32_t changed = status ^ val;
14471da12ec4SLe Tan 
14481da12ec4SLe Tan     VTD_DPRINTF(CSR, "value 0x%"PRIx32 " status 0x%"PRIx32, val, status);
14491da12ec4SLe Tan     if (changed & VTD_GCMD_TE) {
14501da12ec4SLe Tan         /* Translation enable/disable */
14511da12ec4SLe Tan         vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
14521da12ec4SLe Tan     }
14531da12ec4SLe Tan     if (val & VTD_GCMD_SRTP) {
14541da12ec4SLe Tan         /* Set/update the root-table pointer */
14551da12ec4SLe Tan         vtd_handle_gcmd_srtp(s);
14561da12ec4SLe Tan     }
1457ed7b8fbcSLe Tan     if (changed & VTD_GCMD_QIE) {
1458ed7b8fbcSLe Tan         /* Queued Invalidation Enable */
1459ed7b8fbcSLe Tan         vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
1460ed7b8fbcSLe Tan     }
1461a5861439SPeter Xu     if (val & VTD_GCMD_SIRTP) {
1462a5861439SPeter Xu         /* Set/update the interrupt remapping root-table pointer */
1463a5861439SPeter Xu         vtd_handle_gcmd_sirtp(s);
1464a5861439SPeter Xu     }
146580de52baSPeter Xu     if (changed & VTD_GCMD_IRE) {
146680de52baSPeter Xu         /* Interrupt remap enable/disable */
146780de52baSPeter Xu         vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
146880de52baSPeter Xu     }
14691da12ec4SLe Tan }
14701da12ec4SLe Tan 
14711da12ec4SLe Tan /* Handle write to Context Command Register */
14721da12ec4SLe Tan static void vtd_handle_ccmd_write(IntelIOMMUState *s)
14731da12ec4SLe Tan {
14741da12ec4SLe Tan     uint64_t ret;
14751da12ec4SLe Tan     uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
14761da12ec4SLe Tan 
14771da12ec4SLe Tan     /* Context-cache invalidation request */
14781da12ec4SLe Tan     if (val & VTD_CCMD_ICC) {
1479ed7b8fbcSLe Tan         if (s->qi_enabled) {
1480ed7b8fbcSLe Tan             VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1481ed7b8fbcSLe Tan                         "should not use register-based invalidation");
1482ed7b8fbcSLe Tan             return;
1483ed7b8fbcSLe Tan         }
14841da12ec4SLe Tan         ret = vtd_context_cache_invalidate(s, val);
14851da12ec4SLe Tan         /* Invalidation completed. Change something to show */
14861da12ec4SLe Tan         vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
14871da12ec4SLe Tan         ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
14881da12ec4SLe Tan                                       ret);
14891da12ec4SLe Tan         VTD_DPRINTF(INV, "CCMD_REG write-back val: 0x%"PRIx64, ret);
14901da12ec4SLe Tan     }
14911da12ec4SLe Tan }
14921da12ec4SLe Tan 
14931da12ec4SLe Tan /* Handle write to IOTLB Invalidation Register */
14941da12ec4SLe Tan static void vtd_handle_iotlb_write(IntelIOMMUState *s)
14951da12ec4SLe Tan {
14961da12ec4SLe Tan     uint64_t ret;
14971da12ec4SLe Tan     uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
14981da12ec4SLe Tan 
14991da12ec4SLe Tan     /* IOTLB invalidation request */
15001da12ec4SLe Tan     if (val & VTD_TLB_IVT) {
1501ed7b8fbcSLe Tan         if (s->qi_enabled) {
1502ed7b8fbcSLe Tan             VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1503ed7b8fbcSLe Tan                         "should not use register-based invalidation");
1504ed7b8fbcSLe Tan             return;
1505ed7b8fbcSLe Tan         }
15061da12ec4SLe Tan         ret = vtd_iotlb_flush(s, val);
15071da12ec4SLe Tan         /* Invalidation completed. Change something to show */
15081da12ec4SLe Tan         vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
15091da12ec4SLe Tan         ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
15101da12ec4SLe Tan                                       VTD_TLB_FLUSH_GRANU_MASK_A, ret);
15111da12ec4SLe Tan         VTD_DPRINTF(INV, "IOTLB_REG write-back val: 0x%"PRIx64, ret);
15121da12ec4SLe Tan     }
15131da12ec4SLe Tan }
15141da12ec4SLe Tan 
1515ed7b8fbcSLe Tan /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1516ed7b8fbcSLe Tan static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
1517ed7b8fbcSLe Tan                              VTDInvDesc *inv_desc)
1518ed7b8fbcSLe Tan {
1519ed7b8fbcSLe Tan     dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
1520ed7b8fbcSLe Tan     if (dma_memory_read(&address_space_memory, addr, inv_desc,
1521ed7b8fbcSLe Tan         sizeof(*inv_desc))) {
1522ed7b8fbcSLe Tan         VTD_DPRINTF(GENERAL, "error: fail to fetch Invalidation Descriptor "
1523ed7b8fbcSLe Tan                     "base_addr 0x%"PRIx64 " offset %"PRIu32, base_addr, offset);
1524ed7b8fbcSLe Tan         inv_desc->lo = 0;
1525ed7b8fbcSLe Tan         inv_desc->hi = 0;
1526ed7b8fbcSLe Tan 
1527ed7b8fbcSLe Tan         return false;
1528ed7b8fbcSLe Tan     }
1529ed7b8fbcSLe Tan     inv_desc->lo = le64_to_cpu(inv_desc->lo);
1530ed7b8fbcSLe Tan     inv_desc->hi = le64_to_cpu(inv_desc->hi);
1531ed7b8fbcSLe Tan     return true;
1532ed7b8fbcSLe Tan }
1533ed7b8fbcSLe Tan 
1534ed7b8fbcSLe Tan static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1535ed7b8fbcSLe Tan {
1536ed7b8fbcSLe Tan     if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
1537ed7b8fbcSLe Tan         (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
1538bc535e59SPeter Xu         trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
1539ed7b8fbcSLe Tan         return false;
1540ed7b8fbcSLe Tan     }
1541ed7b8fbcSLe Tan     if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
1542ed7b8fbcSLe Tan         /* Status Write */
1543ed7b8fbcSLe Tan         uint32_t status_data = (uint32_t)(inv_desc->lo >>
1544ed7b8fbcSLe Tan                                VTD_INV_DESC_WAIT_DATA_SHIFT);
1545ed7b8fbcSLe Tan 
1546ed7b8fbcSLe Tan         assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
1547ed7b8fbcSLe Tan 
1548ed7b8fbcSLe Tan         /* FIXME: need to be masked with HAW? */
1549ed7b8fbcSLe Tan         dma_addr_t status_addr = inv_desc->hi;
1550bc535e59SPeter Xu         trace_vtd_inv_desc_wait_sw(status_addr, status_data);
1551ed7b8fbcSLe Tan         status_data = cpu_to_le32(status_data);
1552ed7b8fbcSLe Tan         if (dma_memory_write(&address_space_memory, status_addr, &status_data,
1553ed7b8fbcSLe Tan                              sizeof(status_data))) {
1554bc535e59SPeter Xu             trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
1555ed7b8fbcSLe Tan             return false;
1556ed7b8fbcSLe Tan         }
1557ed7b8fbcSLe Tan     } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
1558ed7b8fbcSLe Tan         /* Interrupt flag */
1559ed7b8fbcSLe Tan         vtd_generate_completion_event(s);
1560ed7b8fbcSLe Tan     } else {
1561bc535e59SPeter Xu         trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
1562ed7b8fbcSLe Tan         return false;
1563ed7b8fbcSLe Tan     }
1564ed7b8fbcSLe Tan     return true;
1565ed7b8fbcSLe Tan }
1566ed7b8fbcSLe Tan 
1567d92fa2dcSLe Tan static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
1568d92fa2dcSLe Tan                                            VTDInvDesc *inv_desc)
1569d92fa2dcSLe Tan {
1570bc535e59SPeter Xu     uint16_t sid, fmask;
1571bc535e59SPeter Xu 
1572d92fa2dcSLe Tan     if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
1573bc535e59SPeter Xu         trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
1574d92fa2dcSLe Tan         return false;
1575d92fa2dcSLe Tan     }
1576d92fa2dcSLe Tan     switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
1577d92fa2dcSLe Tan     case VTD_INV_DESC_CC_DOMAIN:
1578bc535e59SPeter Xu         trace_vtd_inv_desc_cc_domain(
1579d92fa2dcSLe Tan             (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
1580d92fa2dcSLe Tan         /* Fall through */
1581d92fa2dcSLe Tan     case VTD_INV_DESC_CC_GLOBAL:
1582d92fa2dcSLe Tan         vtd_context_global_invalidate(s);
1583d92fa2dcSLe Tan         break;
1584d92fa2dcSLe Tan 
1585d92fa2dcSLe Tan     case VTD_INV_DESC_CC_DEVICE:
1586bc535e59SPeter Xu         sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
1587bc535e59SPeter Xu         fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
1588bc535e59SPeter Xu         vtd_context_device_invalidate(s, sid, fmask);
1589d92fa2dcSLe Tan         break;
1590d92fa2dcSLe Tan 
1591d92fa2dcSLe Tan     default:
1592bc535e59SPeter Xu         trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
1593d92fa2dcSLe Tan         return false;
1594d92fa2dcSLe Tan     }
1595d92fa2dcSLe Tan     return true;
1596d92fa2dcSLe Tan }
1597d92fa2dcSLe Tan 
1598b5a280c0SLe Tan static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1599b5a280c0SLe Tan {
1600b5a280c0SLe Tan     uint16_t domain_id;
1601b5a280c0SLe Tan     uint8_t am;
1602b5a280c0SLe Tan     hwaddr addr;
1603b5a280c0SLe Tan 
1604b5a280c0SLe Tan     if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
1605b5a280c0SLe Tan         (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
1606bc535e59SPeter Xu         trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1607b5a280c0SLe Tan         return false;
1608b5a280c0SLe Tan     }
1609b5a280c0SLe Tan 
1610b5a280c0SLe Tan     switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
1611b5a280c0SLe Tan     case VTD_INV_DESC_IOTLB_GLOBAL:
1612bc535e59SPeter Xu         trace_vtd_inv_desc_iotlb_global();
1613b5a280c0SLe Tan         vtd_iotlb_global_invalidate(s);
1614b5a280c0SLe Tan         break;
1615b5a280c0SLe Tan 
1616b5a280c0SLe Tan     case VTD_INV_DESC_IOTLB_DOMAIN:
1617b5a280c0SLe Tan         domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1618bc535e59SPeter Xu         trace_vtd_inv_desc_iotlb_domain(domain_id);
1619b5a280c0SLe Tan         vtd_iotlb_domain_invalidate(s, domain_id);
1620b5a280c0SLe Tan         break;
1621b5a280c0SLe Tan 
1622b5a280c0SLe Tan     case VTD_INV_DESC_IOTLB_PAGE:
1623b5a280c0SLe Tan         domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1624b5a280c0SLe Tan         addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
1625b5a280c0SLe Tan         am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
1626bc535e59SPeter Xu         trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
1627b5a280c0SLe Tan         if (am > VTD_MAMV) {
1628bc535e59SPeter Xu             trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1629b5a280c0SLe Tan             return false;
1630b5a280c0SLe Tan         }
1631b5a280c0SLe Tan         vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1632b5a280c0SLe Tan         break;
1633b5a280c0SLe Tan 
1634b5a280c0SLe Tan     default:
1635bc535e59SPeter Xu         trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
1636b5a280c0SLe Tan         return false;
1637b5a280c0SLe Tan     }
1638b5a280c0SLe Tan     return true;
1639b5a280c0SLe Tan }
1640b5a280c0SLe Tan 
164102a2cbc8SPeter Xu static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
164202a2cbc8SPeter Xu                                      VTDInvDesc *inv_desc)
164302a2cbc8SPeter Xu {
164402a2cbc8SPeter Xu     VTD_DPRINTF(INV, "inv ir glob %d index %d mask %d",
164502a2cbc8SPeter Xu                 inv_desc->iec.granularity,
164602a2cbc8SPeter Xu                 inv_desc->iec.index,
164702a2cbc8SPeter Xu                 inv_desc->iec.index_mask);
164802a2cbc8SPeter Xu 
164902a2cbc8SPeter Xu     vtd_iec_notify_all(s, !inv_desc->iec.granularity,
165002a2cbc8SPeter Xu                        inv_desc->iec.index,
165102a2cbc8SPeter Xu                        inv_desc->iec.index_mask);
1652554f5e16SJason Wang     return true;
1653554f5e16SJason Wang }
165402a2cbc8SPeter Xu 
1655554f5e16SJason Wang static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
1656554f5e16SJason Wang                                           VTDInvDesc *inv_desc)
1657554f5e16SJason Wang {
1658554f5e16SJason Wang     VTDAddressSpace *vtd_dev_as;
1659554f5e16SJason Wang     IOMMUTLBEntry entry;
1660554f5e16SJason Wang     struct VTDBus *vtd_bus;
1661554f5e16SJason Wang     hwaddr addr;
1662554f5e16SJason Wang     uint64_t sz;
1663554f5e16SJason Wang     uint16_t sid;
1664554f5e16SJason Wang     uint8_t devfn;
1665554f5e16SJason Wang     bool size;
1666554f5e16SJason Wang     uint8_t bus_num;
1667554f5e16SJason Wang 
1668554f5e16SJason Wang     addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
1669554f5e16SJason Wang     sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
1670554f5e16SJason Wang     devfn = sid & 0xff;
1671554f5e16SJason Wang     bus_num = sid >> 8;
1672554f5e16SJason Wang     size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
1673554f5e16SJason Wang 
1674554f5e16SJason Wang     if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
1675554f5e16SJason Wang         (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
1676554f5e16SJason Wang         VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Device "
1677554f5e16SJason Wang                     "IOTLB Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1678554f5e16SJason Wang                     inv_desc->hi, inv_desc->lo);
1679554f5e16SJason Wang         return false;
1680554f5e16SJason Wang     }
1681554f5e16SJason Wang 
1682554f5e16SJason Wang     vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
1683554f5e16SJason Wang     if (!vtd_bus) {
1684554f5e16SJason Wang         goto done;
1685554f5e16SJason Wang     }
1686554f5e16SJason Wang 
1687554f5e16SJason Wang     vtd_dev_as = vtd_bus->dev_as[devfn];
1688554f5e16SJason Wang     if (!vtd_dev_as) {
1689554f5e16SJason Wang         goto done;
1690554f5e16SJason Wang     }
1691554f5e16SJason Wang 
169204eb6247SJason Wang     /* According to ATS spec table 2.4:
169304eb6247SJason Wang      * S = 0, bits 15:12 = xxxx     range size: 4K
169404eb6247SJason Wang      * S = 1, bits 15:12 = xxx0     range size: 8K
169504eb6247SJason Wang      * S = 1, bits 15:12 = xx01     range size: 16K
169604eb6247SJason Wang      * S = 1, bits 15:12 = x011     range size: 32K
169704eb6247SJason Wang      * S = 1, bits 15:12 = 0111     range size: 64K
169804eb6247SJason Wang      * ...
169904eb6247SJason Wang      */
1700554f5e16SJason Wang     if (size) {
170104eb6247SJason Wang         sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
1702554f5e16SJason Wang         addr &= ~(sz - 1);
1703554f5e16SJason Wang     } else {
1704554f5e16SJason Wang         sz = VTD_PAGE_SIZE;
1705554f5e16SJason Wang     }
1706554f5e16SJason Wang 
1707554f5e16SJason Wang     entry.target_as = &vtd_dev_as->as;
1708554f5e16SJason Wang     entry.addr_mask = sz - 1;
1709554f5e16SJason Wang     entry.iova = addr;
1710554f5e16SJason Wang     entry.perm = IOMMU_NONE;
1711554f5e16SJason Wang     entry.translated_addr = 0;
171210315b9bSJason Wang     memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
1713554f5e16SJason Wang 
1714554f5e16SJason Wang done:
171502a2cbc8SPeter Xu     return true;
171602a2cbc8SPeter Xu }
171702a2cbc8SPeter Xu 
1718ed7b8fbcSLe Tan static bool vtd_process_inv_desc(IntelIOMMUState *s)
1719ed7b8fbcSLe Tan {
1720ed7b8fbcSLe Tan     VTDInvDesc inv_desc;
1721ed7b8fbcSLe Tan     uint8_t desc_type;
1722ed7b8fbcSLe Tan 
1723ed7b8fbcSLe Tan     VTD_DPRINTF(INV, "iq head %"PRIu16, s->iq_head);
1724ed7b8fbcSLe Tan     if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
1725ed7b8fbcSLe Tan         s->iq_last_desc_type = VTD_INV_DESC_NONE;
1726ed7b8fbcSLe Tan         return false;
1727ed7b8fbcSLe Tan     }
1728ed7b8fbcSLe Tan     desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
1729ed7b8fbcSLe Tan     /* FIXME: should update at first or at last? */
1730ed7b8fbcSLe Tan     s->iq_last_desc_type = desc_type;
1731ed7b8fbcSLe Tan 
1732ed7b8fbcSLe Tan     switch (desc_type) {
1733ed7b8fbcSLe Tan     case VTD_INV_DESC_CC:
1734bc535e59SPeter Xu         trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
1735d92fa2dcSLe Tan         if (!vtd_process_context_cache_desc(s, &inv_desc)) {
1736d92fa2dcSLe Tan             return false;
1737d92fa2dcSLe Tan         }
1738ed7b8fbcSLe Tan         break;
1739ed7b8fbcSLe Tan 
1740ed7b8fbcSLe Tan     case VTD_INV_DESC_IOTLB:
1741bc535e59SPeter Xu         trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
1742b5a280c0SLe Tan         if (!vtd_process_iotlb_desc(s, &inv_desc)) {
1743b5a280c0SLe Tan             return false;
1744b5a280c0SLe Tan         }
1745ed7b8fbcSLe Tan         break;
1746ed7b8fbcSLe Tan 
1747ed7b8fbcSLe Tan     case VTD_INV_DESC_WAIT:
1748bc535e59SPeter Xu         trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
1749ed7b8fbcSLe Tan         if (!vtd_process_wait_desc(s, &inv_desc)) {
1750ed7b8fbcSLe Tan             return false;
1751ed7b8fbcSLe Tan         }
1752ed7b8fbcSLe Tan         break;
1753ed7b8fbcSLe Tan 
1754b7910472SPeter Xu     case VTD_INV_DESC_IEC:
1755bc535e59SPeter Xu         trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
175602a2cbc8SPeter Xu         if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
175702a2cbc8SPeter Xu             return false;
175802a2cbc8SPeter Xu         }
1759b7910472SPeter Xu         break;
1760b7910472SPeter Xu 
1761554f5e16SJason Wang     case VTD_INV_DESC_DEVICE:
1762554f5e16SJason Wang         VTD_DPRINTF(INV, "Device IOTLB Invalidation Descriptor hi 0x%"PRIx64
1763554f5e16SJason Wang                     " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
1764554f5e16SJason Wang         if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
1765554f5e16SJason Wang             return false;
1766554f5e16SJason Wang         }
1767554f5e16SJason Wang         break;
1768554f5e16SJason Wang 
1769ed7b8fbcSLe Tan     default:
1770bc535e59SPeter Xu         trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
1771ed7b8fbcSLe Tan         return false;
1772ed7b8fbcSLe Tan     }
1773ed7b8fbcSLe Tan     s->iq_head++;
1774ed7b8fbcSLe Tan     if (s->iq_head == s->iq_size) {
1775ed7b8fbcSLe Tan         s->iq_head = 0;
1776ed7b8fbcSLe Tan     }
1777ed7b8fbcSLe Tan     return true;
1778ed7b8fbcSLe Tan }
1779ed7b8fbcSLe Tan 
1780ed7b8fbcSLe Tan /* Try to fetch and process more Invalidation Descriptors */
1781ed7b8fbcSLe Tan static void vtd_fetch_inv_desc(IntelIOMMUState *s)
1782ed7b8fbcSLe Tan {
1783ed7b8fbcSLe Tan     VTD_DPRINTF(INV, "fetch Invalidation Descriptors");
1784ed7b8fbcSLe Tan     if (s->iq_tail >= s->iq_size) {
1785ed7b8fbcSLe Tan         /* Detects an invalid Tail pointer */
1786ed7b8fbcSLe Tan         VTD_DPRINTF(GENERAL, "error: iq_tail is %"PRIu16
1787ed7b8fbcSLe Tan                     " while iq_size is %"PRIu16, s->iq_tail, s->iq_size);
1788ed7b8fbcSLe Tan         vtd_handle_inv_queue_error(s);
1789ed7b8fbcSLe Tan         return;
1790ed7b8fbcSLe Tan     }
1791ed7b8fbcSLe Tan     while (s->iq_head != s->iq_tail) {
1792ed7b8fbcSLe Tan         if (!vtd_process_inv_desc(s)) {
1793ed7b8fbcSLe Tan             /* Invalidation Queue Errors */
1794ed7b8fbcSLe Tan             vtd_handle_inv_queue_error(s);
1795ed7b8fbcSLe Tan             break;
1796ed7b8fbcSLe Tan         }
1797ed7b8fbcSLe Tan         /* Must update the IQH_REG in time */
1798ed7b8fbcSLe Tan         vtd_set_quad_raw(s, DMAR_IQH_REG,
1799ed7b8fbcSLe Tan                          (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
1800ed7b8fbcSLe Tan                          VTD_IQH_QH_MASK);
1801ed7b8fbcSLe Tan     }
1802ed7b8fbcSLe Tan }
1803ed7b8fbcSLe Tan 
1804ed7b8fbcSLe Tan /* Handle write to Invalidation Queue Tail Register */
1805ed7b8fbcSLe Tan static void vtd_handle_iqt_write(IntelIOMMUState *s)
1806ed7b8fbcSLe Tan {
1807ed7b8fbcSLe Tan     uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
1808ed7b8fbcSLe Tan 
1809ed7b8fbcSLe Tan     s->iq_tail = VTD_IQT_QT(val);
1810ed7b8fbcSLe Tan     VTD_DPRINTF(INV, "set iq tail %"PRIu16, s->iq_tail);
1811ed7b8fbcSLe Tan     if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1812ed7b8fbcSLe Tan         /* Process Invalidation Queue here */
1813ed7b8fbcSLe Tan         vtd_fetch_inv_desc(s);
1814ed7b8fbcSLe Tan     }
1815ed7b8fbcSLe Tan }
1816ed7b8fbcSLe Tan 
18171da12ec4SLe Tan static void vtd_handle_fsts_write(IntelIOMMUState *s)
18181da12ec4SLe Tan {
18191da12ec4SLe Tan     uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
18201da12ec4SLe Tan     uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
18211da12ec4SLe Tan     uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
18221da12ec4SLe Tan 
18231da12ec4SLe Tan     if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
18241da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
18251da12ec4SLe Tan         VTD_DPRINTF(FLOG, "all pending interrupt conditions serviced, clear "
18261da12ec4SLe Tan                     "IP field of FECTL_REG");
18271da12ec4SLe Tan     }
1828ed7b8fbcSLe Tan     /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1829ed7b8fbcSLe Tan      * Descriptors if there are any when Queued Invalidation is enabled?
1830ed7b8fbcSLe Tan      */
18311da12ec4SLe Tan }
18321da12ec4SLe Tan 
18331da12ec4SLe Tan static void vtd_handle_fectl_write(IntelIOMMUState *s)
18341da12ec4SLe Tan {
18351da12ec4SLe Tan     uint32_t fectl_reg;
18361da12ec4SLe Tan     /* FIXME: when software clears the IM field, check the IP field. But do we
18371da12ec4SLe Tan      * need to compare the old value and the new value to conclude that
18381da12ec4SLe Tan      * software clears the IM field? Or just check if the IM field is zero?
18391da12ec4SLe Tan      */
18401da12ec4SLe Tan     fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
18411da12ec4SLe Tan     if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
18421da12ec4SLe Tan         vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
18431da12ec4SLe Tan         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
18441da12ec4SLe Tan         VTD_DPRINTF(FLOG, "IM field is cleared, generate "
18451da12ec4SLe Tan                     "fault event interrupt");
18461da12ec4SLe Tan     }
18471da12ec4SLe Tan }
18481da12ec4SLe Tan 
1849ed7b8fbcSLe Tan static void vtd_handle_ics_write(IntelIOMMUState *s)
1850ed7b8fbcSLe Tan {
1851ed7b8fbcSLe Tan     uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
1852ed7b8fbcSLe Tan     uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1853ed7b8fbcSLe Tan 
1854ed7b8fbcSLe Tan     if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
1855ed7b8fbcSLe Tan         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1856ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "pending completion interrupt condition serviced, "
1857ed7b8fbcSLe Tan                     "clear IP field of IECTL_REG");
1858ed7b8fbcSLe Tan     }
1859ed7b8fbcSLe Tan }
1860ed7b8fbcSLe Tan 
1861ed7b8fbcSLe Tan static void vtd_handle_iectl_write(IntelIOMMUState *s)
1862ed7b8fbcSLe Tan {
1863ed7b8fbcSLe Tan     uint32_t iectl_reg;
1864ed7b8fbcSLe Tan     /* FIXME: when software clears the IM field, check the IP field. But do we
1865ed7b8fbcSLe Tan      * need to compare the old value and the new value to conclude that
1866ed7b8fbcSLe Tan      * software clears the IM field? Or just check if the IM field is zero?
1867ed7b8fbcSLe Tan      */
1868ed7b8fbcSLe Tan     iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1869ed7b8fbcSLe Tan     if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
1870ed7b8fbcSLe Tan         vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
1871ed7b8fbcSLe Tan         vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1872ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "IM field is cleared, generate "
1873ed7b8fbcSLe Tan                     "invalidation event interrupt");
1874ed7b8fbcSLe Tan     }
1875ed7b8fbcSLe Tan }
1876ed7b8fbcSLe Tan 
18771da12ec4SLe Tan static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
18781da12ec4SLe Tan {
18791da12ec4SLe Tan     IntelIOMMUState *s = opaque;
18801da12ec4SLe Tan     uint64_t val;
18811da12ec4SLe Tan 
18821da12ec4SLe Tan     if (addr + size > DMAR_REG_SIZE) {
18831da12ec4SLe Tan         VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
18841da12ec4SLe Tan                     ", got 0x%"PRIx64 " %d",
18851da12ec4SLe Tan                     (uint64_t)DMAR_REG_SIZE, addr, size);
18861da12ec4SLe Tan         return (uint64_t)-1;
18871da12ec4SLe Tan     }
18881da12ec4SLe Tan 
18891da12ec4SLe Tan     switch (addr) {
18901da12ec4SLe Tan     /* Root Table Address Register, 64-bit */
18911da12ec4SLe Tan     case DMAR_RTADDR_REG:
18921da12ec4SLe Tan         if (size == 4) {
18931da12ec4SLe Tan             val = s->root & ((1ULL << 32) - 1);
18941da12ec4SLe Tan         } else {
18951da12ec4SLe Tan             val = s->root;
18961da12ec4SLe Tan         }
18971da12ec4SLe Tan         break;
18981da12ec4SLe Tan 
18991da12ec4SLe Tan     case DMAR_RTADDR_REG_HI:
19001da12ec4SLe Tan         assert(size == 4);
19011da12ec4SLe Tan         val = s->root >> 32;
19021da12ec4SLe Tan         break;
19031da12ec4SLe Tan 
1904ed7b8fbcSLe Tan     /* Invalidation Queue Address Register, 64-bit */
1905ed7b8fbcSLe Tan     case DMAR_IQA_REG:
1906ed7b8fbcSLe Tan         val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
1907ed7b8fbcSLe Tan         if (size == 4) {
1908ed7b8fbcSLe Tan             val = val & ((1ULL << 32) - 1);
1909ed7b8fbcSLe Tan         }
1910ed7b8fbcSLe Tan         break;
1911ed7b8fbcSLe Tan 
1912ed7b8fbcSLe Tan     case DMAR_IQA_REG_HI:
1913ed7b8fbcSLe Tan         assert(size == 4);
1914ed7b8fbcSLe Tan         val = s->iq >> 32;
1915ed7b8fbcSLe Tan         break;
1916ed7b8fbcSLe Tan 
19171da12ec4SLe Tan     default:
19181da12ec4SLe Tan         if (size == 4) {
19191da12ec4SLe Tan             val = vtd_get_long(s, addr);
19201da12ec4SLe Tan         } else {
19211da12ec4SLe Tan             val = vtd_get_quad(s, addr);
19221da12ec4SLe Tan         }
19231da12ec4SLe Tan     }
19241da12ec4SLe Tan     VTD_DPRINTF(CSR, "addr 0x%"PRIx64 " size %d val 0x%"PRIx64,
19251da12ec4SLe Tan                 addr, size, val);
19261da12ec4SLe Tan     return val;
19271da12ec4SLe Tan }
19281da12ec4SLe Tan 
19291da12ec4SLe Tan static void vtd_mem_write(void *opaque, hwaddr addr,
19301da12ec4SLe Tan                           uint64_t val, unsigned size)
19311da12ec4SLe Tan {
19321da12ec4SLe Tan     IntelIOMMUState *s = opaque;
19331da12ec4SLe Tan 
19341da12ec4SLe Tan     if (addr + size > DMAR_REG_SIZE) {
19351da12ec4SLe Tan         VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
19361da12ec4SLe Tan                     ", got 0x%"PRIx64 " %d",
19371da12ec4SLe Tan                     (uint64_t)DMAR_REG_SIZE, addr, size);
19381da12ec4SLe Tan         return;
19391da12ec4SLe Tan     }
19401da12ec4SLe Tan 
19411da12ec4SLe Tan     switch (addr) {
19421da12ec4SLe Tan     /* Global Command Register, 32-bit */
19431da12ec4SLe Tan     case DMAR_GCMD_REG:
19441da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_GCMD_REG write addr 0x%"PRIx64
19451da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
19461da12ec4SLe Tan         vtd_set_long(s, addr, val);
19471da12ec4SLe Tan         vtd_handle_gcmd_write(s);
19481da12ec4SLe Tan         break;
19491da12ec4SLe Tan 
19501da12ec4SLe Tan     /* Context Command Register, 64-bit */
19511da12ec4SLe Tan     case DMAR_CCMD_REG:
19521da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_CCMD_REG write addr 0x%"PRIx64
19531da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
19541da12ec4SLe Tan         if (size == 4) {
19551da12ec4SLe Tan             vtd_set_long(s, addr, val);
19561da12ec4SLe Tan         } else {
19571da12ec4SLe Tan             vtd_set_quad(s, addr, val);
19581da12ec4SLe Tan             vtd_handle_ccmd_write(s);
19591da12ec4SLe Tan         }
19601da12ec4SLe Tan         break;
19611da12ec4SLe Tan 
19621da12ec4SLe Tan     case DMAR_CCMD_REG_HI:
19631da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
19641da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
19651da12ec4SLe Tan         assert(size == 4);
19661da12ec4SLe Tan         vtd_set_long(s, addr, val);
19671da12ec4SLe Tan         vtd_handle_ccmd_write(s);
19681da12ec4SLe Tan         break;
19691da12ec4SLe Tan 
19701da12ec4SLe Tan     /* IOTLB Invalidation Register, 64-bit */
19711da12ec4SLe Tan     case DMAR_IOTLB_REG:
19721da12ec4SLe Tan         VTD_DPRINTF(INV, "DMAR_IOTLB_REG write addr 0x%"PRIx64
19731da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
19741da12ec4SLe Tan         if (size == 4) {
19751da12ec4SLe Tan             vtd_set_long(s, addr, val);
19761da12ec4SLe Tan         } else {
19771da12ec4SLe Tan             vtd_set_quad(s, addr, val);
19781da12ec4SLe Tan             vtd_handle_iotlb_write(s);
19791da12ec4SLe Tan         }
19801da12ec4SLe Tan         break;
19811da12ec4SLe Tan 
19821da12ec4SLe Tan     case DMAR_IOTLB_REG_HI:
19831da12ec4SLe Tan         VTD_DPRINTF(INV, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
19841da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
19851da12ec4SLe Tan         assert(size == 4);
19861da12ec4SLe Tan         vtd_set_long(s, addr, val);
19871da12ec4SLe Tan         vtd_handle_iotlb_write(s);
19881da12ec4SLe Tan         break;
19891da12ec4SLe Tan 
1990b5a280c0SLe Tan     /* Invalidate Address Register, 64-bit */
1991b5a280c0SLe Tan     case DMAR_IVA_REG:
1992b5a280c0SLe Tan         VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
1993b5a280c0SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
1994b5a280c0SLe Tan         if (size == 4) {
1995b5a280c0SLe Tan             vtd_set_long(s, addr, val);
1996b5a280c0SLe Tan         } else {
1997b5a280c0SLe Tan             vtd_set_quad(s, addr, val);
1998b5a280c0SLe Tan         }
1999b5a280c0SLe Tan         break;
2000b5a280c0SLe Tan 
2001b5a280c0SLe Tan     case DMAR_IVA_REG_HI:
2002b5a280c0SLe Tan         VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
2003b5a280c0SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2004b5a280c0SLe Tan         assert(size == 4);
2005b5a280c0SLe Tan         vtd_set_long(s, addr, val);
2006b5a280c0SLe Tan         break;
2007b5a280c0SLe Tan 
20081da12ec4SLe Tan     /* Fault Status Register, 32-bit */
20091da12ec4SLe Tan     case DMAR_FSTS_REG:
20101da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
20111da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20121da12ec4SLe Tan         assert(size == 4);
20131da12ec4SLe Tan         vtd_set_long(s, addr, val);
20141da12ec4SLe Tan         vtd_handle_fsts_write(s);
20151da12ec4SLe Tan         break;
20161da12ec4SLe Tan 
20171da12ec4SLe Tan     /* Fault Event Control Register, 32-bit */
20181da12ec4SLe Tan     case DMAR_FECTL_REG:
20191da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FECTL_REG write addr 0x%"PRIx64
20201da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20211da12ec4SLe Tan         assert(size == 4);
20221da12ec4SLe Tan         vtd_set_long(s, addr, val);
20231da12ec4SLe Tan         vtd_handle_fectl_write(s);
20241da12ec4SLe Tan         break;
20251da12ec4SLe Tan 
20261da12ec4SLe Tan     /* Fault Event Data Register, 32-bit */
20271da12ec4SLe Tan     case DMAR_FEDATA_REG:
20281da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FEDATA_REG write addr 0x%"PRIx64
20291da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20301da12ec4SLe Tan         assert(size == 4);
20311da12ec4SLe Tan         vtd_set_long(s, addr, val);
20321da12ec4SLe Tan         break;
20331da12ec4SLe Tan 
20341da12ec4SLe Tan     /* Fault Event Address Register, 32-bit */
20351da12ec4SLe Tan     case DMAR_FEADDR_REG:
20361da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FEADDR_REG write addr 0x%"PRIx64
20371da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20381da12ec4SLe Tan         assert(size == 4);
20391da12ec4SLe Tan         vtd_set_long(s, addr, val);
20401da12ec4SLe Tan         break;
20411da12ec4SLe Tan 
20421da12ec4SLe Tan     /* Fault Event Upper Address Register, 32-bit */
20431da12ec4SLe Tan     case DMAR_FEUADDR_REG:
20441da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
20451da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20461da12ec4SLe Tan         assert(size == 4);
20471da12ec4SLe Tan         vtd_set_long(s, addr, val);
20481da12ec4SLe Tan         break;
20491da12ec4SLe Tan 
20501da12ec4SLe Tan     /* Protected Memory Enable Register, 32-bit */
20511da12ec4SLe Tan     case DMAR_PMEN_REG:
20521da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_PMEN_REG write addr 0x%"PRIx64
20531da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20541da12ec4SLe Tan         assert(size == 4);
20551da12ec4SLe Tan         vtd_set_long(s, addr, val);
20561da12ec4SLe Tan         break;
20571da12ec4SLe Tan 
20581da12ec4SLe Tan     /* Root Table Address Register, 64-bit */
20591da12ec4SLe Tan     case DMAR_RTADDR_REG:
20601da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_RTADDR_REG write addr 0x%"PRIx64
20611da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20621da12ec4SLe Tan         if (size == 4) {
20631da12ec4SLe Tan             vtd_set_long(s, addr, val);
20641da12ec4SLe Tan         } else {
20651da12ec4SLe Tan             vtd_set_quad(s, addr, val);
20661da12ec4SLe Tan         }
20671da12ec4SLe Tan         break;
20681da12ec4SLe Tan 
20691da12ec4SLe Tan     case DMAR_RTADDR_REG_HI:
20701da12ec4SLe Tan         VTD_DPRINTF(CSR, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
20711da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
20721da12ec4SLe Tan         assert(size == 4);
20731da12ec4SLe Tan         vtd_set_long(s, addr, val);
20741da12ec4SLe Tan         break;
20751da12ec4SLe Tan 
2076ed7b8fbcSLe Tan     /* Invalidation Queue Tail Register, 64-bit */
2077ed7b8fbcSLe Tan     case DMAR_IQT_REG:
2078ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IQT_REG write addr 0x%"PRIx64
2079ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2080ed7b8fbcSLe Tan         if (size == 4) {
2081ed7b8fbcSLe Tan             vtd_set_long(s, addr, val);
2082ed7b8fbcSLe Tan         } else {
2083ed7b8fbcSLe Tan             vtd_set_quad(s, addr, val);
2084ed7b8fbcSLe Tan         }
2085ed7b8fbcSLe Tan         vtd_handle_iqt_write(s);
2086ed7b8fbcSLe Tan         break;
2087ed7b8fbcSLe Tan 
2088ed7b8fbcSLe Tan     case DMAR_IQT_REG_HI:
2089ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
2090ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2091ed7b8fbcSLe Tan         assert(size == 4);
2092ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2093ed7b8fbcSLe Tan         /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2094ed7b8fbcSLe Tan         break;
2095ed7b8fbcSLe Tan 
2096ed7b8fbcSLe Tan     /* Invalidation Queue Address Register, 64-bit */
2097ed7b8fbcSLe Tan     case DMAR_IQA_REG:
2098ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IQA_REG write addr 0x%"PRIx64
2099ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2100ed7b8fbcSLe Tan         if (size == 4) {
2101ed7b8fbcSLe Tan             vtd_set_long(s, addr, val);
2102ed7b8fbcSLe Tan         } else {
2103ed7b8fbcSLe Tan             vtd_set_quad(s, addr, val);
2104ed7b8fbcSLe Tan         }
2105ed7b8fbcSLe Tan         break;
2106ed7b8fbcSLe Tan 
2107ed7b8fbcSLe Tan     case DMAR_IQA_REG_HI:
2108ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
2109ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2110ed7b8fbcSLe Tan         assert(size == 4);
2111ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2112ed7b8fbcSLe Tan         break;
2113ed7b8fbcSLe Tan 
2114ed7b8fbcSLe Tan     /* Invalidation Completion Status Register, 32-bit */
2115ed7b8fbcSLe Tan     case DMAR_ICS_REG:
2116ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_ICS_REG write addr 0x%"PRIx64
2117ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2118ed7b8fbcSLe Tan         assert(size == 4);
2119ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2120ed7b8fbcSLe Tan         vtd_handle_ics_write(s);
2121ed7b8fbcSLe Tan         break;
2122ed7b8fbcSLe Tan 
2123ed7b8fbcSLe Tan     /* Invalidation Event Control Register, 32-bit */
2124ed7b8fbcSLe Tan     case DMAR_IECTL_REG:
2125ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IECTL_REG write addr 0x%"PRIx64
2126ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2127ed7b8fbcSLe Tan         assert(size == 4);
2128ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2129ed7b8fbcSLe Tan         vtd_handle_iectl_write(s);
2130ed7b8fbcSLe Tan         break;
2131ed7b8fbcSLe Tan 
2132ed7b8fbcSLe Tan     /* Invalidation Event Data Register, 32-bit */
2133ed7b8fbcSLe Tan     case DMAR_IEDATA_REG:
2134ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IEDATA_REG write addr 0x%"PRIx64
2135ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2136ed7b8fbcSLe Tan         assert(size == 4);
2137ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2138ed7b8fbcSLe Tan         break;
2139ed7b8fbcSLe Tan 
2140ed7b8fbcSLe Tan     /* Invalidation Event Address Register, 32-bit */
2141ed7b8fbcSLe Tan     case DMAR_IEADDR_REG:
2142ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IEADDR_REG write addr 0x%"PRIx64
2143ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2144ed7b8fbcSLe Tan         assert(size == 4);
2145ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2146ed7b8fbcSLe Tan         break;
2147ed7b8fbcSLe Tan 
2148ed7b8fbcSLe Tan     /* Invalidation Event Upper Address Register, 32-bit */
2149ed7b8fbcSLe Tan     case DMAR_IEUADDR_REG:
2150ed7b8fbcSLe Tan         VTD_DPRINTF(INV, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
2151ed7b8fbcSLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
2152ed7b8fbcSLe Tan         assert(size == 4);
2153ed7b8fbcSLe Tan         vtd_set_long(s, addr, val);
2154ed7b8fbcSLe Tan         break;
2155ed7b8fbcSLe Tan 
21561da12ec4SLe Tan     /* Fault Recording Registers, 128-bit */
21571da12ec4SLe Tan     case DMAR_FRCD_REG_0_0:
21581da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
21591da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
21601da12ec4SLe Tan         if (size == 4) {
21611da12ec4SLe Tan             vtd_set_long(s, addr, val);
21621da12ec4SLe Tan         } else {
21631da12ec4SLe Tan             vtd_set_quad(s, addr, val);
21641da12ec4SLe Tan         }
21651da12ec4SLe Tan         break;
21661da12ec4SLe Tan 
21671da12ec4SLe Tan     case DMAR_FRCD_REG_0_1:
21681da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
21691da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
21701da12ec4SLe Tan         assert(size == 4);
21711da12ec4SLe Tan         vtd_set_long(s, addr, val);
21721da12ec4SLe Tan         break;
21731da12ec4SLe Tan 
21741da12ec4SLe Tan     case DMAR_FRCD_REG_0_2:
21751da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
21761da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
21771da12ec4SLe Tan         if (size == 4) {
21781da12ec4SLe Tan             vtd_set_long(s, addr, val);
21791da12ec4SLe Tan         } else {
21801da12ec4SLe Tan             vtd_set_quad(s, addr, val);
21811da12ec4SLe Tan             /* May clear bit 127 (Fault), update PPF */
21821da12ec4SLe Tan             vtd_update_fsts_ppf(s);
21831da12ec4SLe Tan         }
21841da12ec4SLe Tan         break;
21851da12ec4SLe Tan 
21861da12ec4SLe Tan     case DMAR_FRCD_REG_0_3:
21871da12ec4SLe Tan         VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
21881da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
21891da12ec4SLe Tan         assert(size == 4);
21901da12ec4SLe Tan         vtd_set_long(s, addr, val);
21911da12ec4SLe Tan         /* May clear bit 127 (Fault), update PPF */
21921da12ec4SLe Tan         vtd_update_fsts_ppf(s);
21931da12ec4SLe Tan         break;
21941da12ec4SLe Tan 
2195a5861439SPeter Xu     case DMAR_IRTA_REG:
2196a5861439SPeter Xu         VTD_DPRINTF(IR, "DMAR_IRTA_REG write addr 0x%"PRIx64
2197a5861439SPeter Xu                     ", size %d, val 0x%"PRIx64, addr, size, val);
2198a5861439SPeter Xu         if (size == 4) {
2199a5861439SPeter Xu             vtd_set_long(s, addr, val);
2200a5861439SPeter Xu         } else {
2201a5861439SPeter Xu             vtd_set_quad(s, addr, val);
2202a5861439SPeter Xu         }
2203a5861439SPeter Xu         break;
2204a5861439SPeter Xu 
2205a5861439SPeter Xu     case DMAR_IRTA_REG_HI:
2206a5861439SPeter Xu         VTD_DPRINTF(IR, "DMAR_IRTA_REG_HI write addr 0x%"PRIx64
2207a5861439SPeter Xu                     ", size %d, val 0x%"PRIx64, addr, size, val);
2208a5861439SPeter Xu         assert(size == 4);
2209a5861439SPeter Xu         vtd_set_long(s, addr, val);
2210a5861439SPeter Xu         break;
2211a5861439SPeter Xu 
22121da12ec4SLe Tan     default:
22131da12ec4SLe Tan         VTD_DPRINTF(GENERAL, "error: unhandled reg write addr 0x%"PRIx64
22141da12ec4SLe Tan                     ", size %d, val 0x%"PRIx64, addr, size, val);
22151da12ec4SLe Tan         if (size == 4) {
22161da12ec4SLe Tan             vtd_set_long(s, addr, val);
22171da12ec4SLe Tan         } else {
22181da12ec4SLe Tan             vtd_set_quad(s, addr, val);
22191da12ec4SLe Tan         }
22201da12ec4SLe Tan     }
22211da12ec4SLe Tan }
22221da12ec4SLe Tan 
22231da12ec4SLe Tan static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
22241da12ec4SLe Tan                                          bool is_write)
22251da12ec4SLe Tan {
22261da12ec4SLe Tan     VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
22271da12ec4SLe Tan     IntelIOMMUState *s = vtd_as->iommu_state;
22281da12ec4SLe Tan     IOMMUTLBEntry ret = {
22291da12ec4SLe Tan         .target_as = &address_space_memory,
22301da12ec4SLe Tan         .iova = addr,
22311da12ec4SLe Tan         .translated_addr = 0,
22321da12ec4SLe Tan         .addr_mask = ~(hwaddr)0,
22331da12ec4SLe Tan         .perm = IOMMU_NONE,
22341da12ec4SLe Tan     };
22351da12ec4SLe Tan 
22361da12ec4SLe Tan     if (!s->dmar_enabled) {
22371da12ec4SLe Tan         /* DMAR disabled, passthrough, use 4k-page*/
22381da12ec4SLe Tan         ret.iova = addr & VTD_PAGE_MASK_4K;
22391da12ec4SLe Tan         ret.translated_addr = addr & VTD_PAGE_MASK_4K;
22401da12ec4SLe Tan         ret.addr_mask = ~VTD_PAGE_MASK_4K;
22411da12ec4SLe Tan         ret.perm = IOMMU_RW;
22421da12ec4SLe Tan         return ret;
22431da12ec4SLe Tan     }
22441da12ec4SLe Tan 
22457df953bdSKnut Omang     vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
2246d92fa2dcSLe Tan                            is_write, &ret);
22471da12ec4SLe Tan     VTD_DPRINTF(MMU,
22481da12ec4SLe Tan                 "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
22496e905564SPeter Xu                 " iova 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
2250d92fa2dcSLe Tan                 VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
2251d92fa2dcSLe Tan                 vtd_as->devfn, addr, ret.translated_addr);
22521da12ec4SLe Tan     return ret;
22531da12ec4SLe Tan }
22541da12ec4SLe Tan 
22555bf3d319SPeter Xu static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
22565bf3d319SPeter Xu                                           IOMMUNotifierFlag old,
22575bf3d319SPeter Xu                                           IOMMUNotifierFlag new)
22583cb3b154SAlex Williamson {
22593cb3b154SAlex Williamson     VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
2260dd4d607eSPeter Xu     IntelIOMMUState *s = vtd_as->iommu_state;
2261dd4d607eSPeter Xu     IntelIOMMUNotifierNode *node = NULL;
2262dd4d607eSPeter Xu     IntelIOMMUNotifierNode *next_node = NULL;
22633cb3b154SAlex Williamson 
2264dd4d607eSPeter Xu     if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
2265dd4d607eSPeter Xu         error_report("We need to set cache_mode=1 for intel-iommu to enable "
2266dd4d607eSPeter Xu                      "device assignment with IOMMU protection.");
2267a3276f78SPeter Xu         exit(1);
2268a3276f78SPeter Xu     }
2269dd4d607eSPeter Xu 
2270dd4d607eSPeter Xu     if (old == IOMMU_NOTIFIER_NONE) {
2271dd4d607eSPeter Xu         node = g_malloc0(sizeof(*node));
2272dd4d607eSPeter Xu         node->vtd_as = vtd_as;
2273dd4d607eSPeter Xu         QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
2274dd4d607eSPeter Xu         return;
2275dd4d607eSPeter Xu     }
2276dd4d607eSPeter Xu 
2277dd4d607eSPeter Xu     /* update notifier node with new flags */
2278dd4d607eSPeter Xu     QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
2279dd4d607eSPeter Xu         if (node->vtd_as == vtd_as) {
2280dd4d607eSPeter Xu             if (new == IOMMU_NOTIFIER_NONE) {
2281dd4d607eSPeter Xu                 QLIST_REMOVE(node, next);
2282dd4d607eSPeter Xu                 g_free(node);
2283dd4d607eSPeter Xu             }
2284dd4d607eSPeter Xu             return;
2285dd4d607eSPeter Xu         }
2286dd4d607eSPeter Xu     }
22873cb3b154SAlex Williamson }
22883cb3b154SAlex Williamson 
22891da12ec4SLe Tan static const VMStateDescription vtd_vmstate = {
22901da12ec4SLe Tan     .name = "iommu-intel",
22918cdcf3c1SPeter Xu     .version_id = 1,
22928cdcf3c1SPeter Xu     .minimum_version_id = 1,
22938cdcf3c1SPeter Xu     .priority = MIG_PRI_IOMMU,
22948cdcf3c1SPeter Xu     .fields = (VMStateField[]) {
22958cdcf3c1SPeter Xu         VMSTATE_UINT64(root, IntelIOMMUState),
22968cdcf3c1SPeter Xu         VMSTATE_UINT64(intr_root, IntelIOMMUState),
22978cdcf3c1SPeter Xu         VMSTATE_UINT64(iq, IntelIOMMUState),
22988cdcf3c1SPeter Xu         VMSTATE_UINT32(intr_size, IntelIOMMUState),
22998cdcf3c1SPeter Xu         VMSTATE_UINT16(iq_head, IntelIOMMUState),
23008cdcf3c1SPeter Xu         VMSTATE_UINT16(iq_tail, IntelIOMMUState),
23018cdcf3c1SPeter Xu         VMSTATE_UINT16(iq_size, IntelIOMMUState),
23028cdcf3c1SPeter Xu         VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
23038cdcf3c1SPeter Xu         VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
23048cdcf3c1SPeter Xu         VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
23058cdcf3c1SPeter Xu         VMSTATE_BOOL(root_extended, IntelIOMMUState),
23068cdcf3c1SPeter Xu         VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
23078cdcf3c1SPeter Xu         VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
23088cdcf3c1SPeter Xu         VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
23098cdcf3c1SPeter Xu         VMSTATE_BOOL(intr_eime, IntelIOMMUState),
23108cdcf3c1SPeter Xu         VMSTATE_END_OF_LIST()
23118cdcf3c1SPeter Xu     }
23121da12ec4SLe Tan };
23131da12ec4SLe Tan 
23141da12ec4SLe Tan static const MemoryRegionOps vtd_mem_ops = {
23151da12ec4SLe Tan     .read = vtd_mem_read,
23161da12ec4SLe Tan     .write = vtd_mem_write,
23171da12ec4SLe Tan     .endianness = DEVICE_LITTLE_ENDIAN,
23181da12ec4SLe Tan     .impl = {
23191da12ec4SLe Tan         .min_access_size = 4,
23201da12ec4SLe Tan         .max_access_size = 8,
23211da12ec4SLe Tan     },
23221da12ec4SLe Tan     .valid = {
23231da12ec4SLe Tan         .min_access_size = 4,
23241da12ec4SLe Tan         .max_access_size = 8,
23251da12ec4SLe Tan     },
23261da12ec4SLe Tan };
23271da12ec4SLe Tan 
23281da12ec4SLe Tan static Property vtd_properties[] = {
23291da12ec4SLe Tan     DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
2330e6b6af05SRadim Krčmář     DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
2331e6b6af05SRadim Krčmář                             ON_OFF_AUTO_AUTO),
2332fb506e70SRadim Krčmář     DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
23333b40f0e5SAviv Ben-David     DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
23341da12ec4SLe Tan     DEFINE_PROP_END_OF_LIST(),
23351da12ec4SLe Tan };
23361da12ec4SLe Tan 
2337651e4cefSPeter Xu /* Read IRTE entry with specific index */
2338651e4cefSPeter Xu static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
2339bc38ee10SMichael S. Tsirkin                         VTD_IR_TableEntry *entry, uint16_t sid)
2340651e4cefSPeter Xu {
2341ede9c94aSPeter Xu     static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
2342ede9c94aSPeter Xu         {0xffff, 0xfffb, 0xfff9, 0xfff8};
2343651e4cefSPeter Xu     dma_addr_t addr = 0x00;
2344ede9c94aSPeter Xu     uint16_t mask, source_id;
2345ede9c94aSPeter Xu     uint8_t bus, bus_max, bus_min;
2346651e4cefSPeter Xu 
2347651e4cefSPeter Xu     addr = iommu->intr_root + index * sizeof(*entry);
2348651e4cefSPeter Xu     if (dma_memory_read(&address_space_memory, addr, entry,
2349651e4cefSPeter Xu                         sizeof(*entry))) {
2350651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "error: fail to access IR root at 0x%"PRIx64
2351651e4cefSPeter Xu                     " + %"PRIu16, iommu->intr_root, index);
2352651e4cefSPeter Xu         return -VTD_FR_IR_ROOT_INVAL;
2353651e4cefSPeter Xu     }
2354651e4cefSPeter Xu 
2355bc38ee10SMichael S. Tsirkin     if (!entry->irte.present) {
2356651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "error: present flag not set in IRTE"
2357651e4cefSPeter Xu                     " entry index %u value 0x%"PRIx64 " 0x%"PRIx64,
2358651e4cefSPeter Xu                     index, le64_to_cpu(entry->data[1]),
2359651e4cefSPeter Xu                     le64_to_cpu(entry->data[0]));
2360651e4cefSPeter Xu         return -VTD_FR_IR_ENTRY_P;
2361651e4cefSPeter Xu     }
2362651e4cefSPeter Xu 
2363bc38ee10SMichael S. Tsirkin     if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
2364bc38ee10SMichael S. Tsirkin         entry->irte.__reserved_2) {
2365651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "error: IRTE entry index %"PRIu16
2366651e4cefSPeter Xu                     " reserved fields non-zero: 0x%"PRIx64 " 0x%"PRIx64,
2367651e4cefSPeter Xu                     index, le64_to_cpu(entry->data[1]),
2368651e4cefSPeter Xu                     le64_to_cpu(entry->data[0]));
2369651e4cefSPeter Xu         return -VTD_FR_IR_IRTE_RSVD;
2370651e4cefSPeter Xu     }
2371651e4cefSPeter Xu 
2372ede9c94aSPeter Xu     if (sid != X86_IOMMU_SID_INVALID) {
2373ede9c94aSPeter Xu         /* Validate IRTE SID */
2374bc38ee10SMichael S. Tsirkin         source_id = le32_to_cpu(entry->irte.source_id);
2375bc38ee10SMichael S. Tsirkin         switch (entry->irte.sid_vtype) {
2376ede9c94aSPeter Xu         case VTD_SVT_NONE:
2377ede9c94aSPeter Xu             VTD_DPRINTF(IR, "No SID validation for IRTE index %d", index);
2378ede9c94aSPeter Xu             break;
2379ede9c94aSPeter Xu 
2380ede9c94aSPeter Xu         case VTD_SVT_ALL:
2381bc38ee10SMichael S. Tsirkin             mask = vtd_svt_mask[entry->irte.sid_q];
2382ede9c94aSPeter Xu             if ((source_id & mask) != (sid & mask)) {
2383ede9c94aSPeter Xu                 VTD_DPRINTF(GENERAL, "SID validation for IRTE index "
2384ede9c94aSPeter Xu                             "%d failed (reqid 0x%04x sid 0x%04x)", index,
2385ede9c94aSPeter Xu                             sid, source_id);
2386ede9c94aSPeter Xu                 return -VTD_FR_IR_SID_ERR;
2387ede9c94aSPeter Xu             }
2388ede9c94aSPeter Xu             break;
2389ede9c94aSPeter Xu 
2390ede9c94aSPeter Xu         case VTD_SVT_BUS:
2391ede9c94aSPeter Xu             bus_max = source_id >> 8;
2392ede9c94aSPeter Xu             bus_min = source_id & 0xff;
2393ede9c94aSPeter Xu             bus = sid >> 8;
2394ede9c94aSPeter Xu             if (bus > bus_max || bus < bus_min) {
2395ede9c94aSPeter Xu                 VTD_DPRINTF(GENERAL, "SID validation for IRTE index %d "
2396ede9c94aSPeter Xu                             "failed (bus %d outside %d-%d)", index, bus,
2397ede9c94aSPeter Xu                             bus_min, bus_max);
2398ede9c94aSPeter Xu                 return -VTD_FR_IR_SID_ERR;
2399ede9c94aSPeter Xu             }
2400ede9c94aSPeter Xu             break;
2401ede9c94aSPeter Xu 
2402ede9c94aSPeter Xu         default:
2403ede9c94aSPeter Xu             VTD_DPRINTF(GENERAL, "Invalid SVT bits (0x%x) in IRTE index "
2404bc38ee10SMichael S. Tsirkin                         "%d", entry->irte.sid_vtype, index);
2405ede9c94aSPeter Xu             /* Take this as verification failure. */
2406ede9c94aSPeter Xu             return -VTD_FR_IR_SID_ERR;
2407ede9c94aSPeter Xu             break;
2408ede9c94aSPeter Xu         }
2409ede9c94aSPeter Xu     }
2410651e4cefSPeter Xu 
2411651e4cefSPeter Xu     return 0;
2412651e4cefSPeter Xu }
2413651e4cefSPeter Xu 
2414651e4cefSPeter Xu /* Fetch IRQ information of specific IR index */
2415ede9c94aSPeter Xu static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
2416ede9c94aSPeter Xu                              VTDIrq *irq, uint16_t sid)
2417651e4cefSPeter Xu {
2418bc38ee10SMichael S. Tsirkin     VTD_IR_TableEntry irte = {};
2419651e4cefSPeter Xu     int ret = 0;
2420651e4cefSPeter Xu 
2421ede9c94aSPeter Xu     ret = vtd_irte_get(iommu, index, &irte, sid);
2422651e4cefSPeter Xu     if (ret) {
2423651e4cefSPeter Xu         return ret;
2424651e4cefSPeter Xu     }
2425651e4cefSPeter Xu 
2426bc38ee10SMichael S. Tsirkin     irq->trigger_mode = irte.irte.trigger_mode;
2427bc38ee10SMichael S. Tsirkin     irq->vector = irte.irte.vector;
2428bc38ee10SMichael S. Tsirkin     irq->delivery_mode = irte.irte.delivery_mode;
2429bc38ee10SMichael S. Tsirkin     irq->dest = le32_to_cpu(irte.irte.dest_id);
243028589311SJan Kiszka     if (!iommu->intr_eime) {
2431651e4cefSPeter Xu #define  VTD_IR_APIC_DEST_MASK         (0xff00ULL)
2432651e4cefSPeter Xu #define  VTD_IR_APIC_DEST_SHIFT        (8)
243328589311SJan Kiszka         irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
2434651e4cefSPeter Xu             VTD_IR_APIC_DEST_SHIFT;
243528589311SJan Kiszka     }
2436bc38ee10SMichael S. Tsirkin     irq->dest_mode = irte.irte.dest_mode;
2437bc38ee10SMichael S. Tsirkin     irq->redir_hint = irte.irte.redir_hint;
2438651e4cefSPeter Xu 
2439651e4cefSPeter Xu     VTD_DPRINTF(IR, "remapping interrupt index %d: trig:%u,vec:%u,"
2440651e4cefSPeter Xu                 "deliver:%u,dest:%u,dest_mode:%u", index,
2441651e4cefSPeter Xu                 irq->trigger_mode, irq->vector, irq->delivery_mode,
2442651e4cefSPeter Xu                 irq->dest, irq->dest_mode);
2443651e4cefSPeter Xu 
2444651e4cefSPeter Xu     return 0;
2445651e4cefSPeter Xu }
2446651e4cefSPeter Xu 
2447651e4cefSPeter Xu /* Generate one MSI message from VTDIrq info */
2448651e4cefSPeter Xu static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
2449651e4cefSPeter Xu {
2450651e4cefSPeter Xu     VTD_MSIMessage msg = {};
2451651e4cefSPeter Xu 
2452651e4cefSPeter Xu     /* Generate address bits */
2453651e4cefSPeter Xu     msg.dest_mode = irq->dest_mode;
2454651e4cefSPeter Xu     msg.redir_hint = irq->redir_hint;
2455651e4cefSPeter Xu     msg.dest = irq->dest;
245632946019SRadim Krčmář     msg.__addr_hi = irq->dest & 0xffffff00;
2457651e4cefSPeter Xu     msg.__addr_head = cpu_to_le32(0xfee);
2458651e4cefSPeter Xu     /* Keep this from original MSI address bits */
2459651e4cefSPeter Xu     msg.__not_used = irq->msi_addr_last_bits;
2460651e4cefSPeter Xu 
2461651e4cefSPeter Xu     /* Generate data bits */
2462651e4cefSPeter Xu     msg.vector = irq->vector;
2463651e4cefSPeter Xu     msg.delivery_mode = irq->delivery_mode;
2464651e4cefSPeter Xu     msg.level = 1;
2465651e4cefSPeter Xu     msg.trigger_mode = irq->trigger_mode;
2466651e4cefSPeter Xu 
2467651e4cefSPeter Xu     msg_out->address = msg.msi_addr;
2468651e4cefSPeter Xu     msg_out->data = msg.msi_data;
2469651e4cefSPeter Xu }
2470651e4cefSPeter Xu 
2471651e4cefSPeter Xu /* Interrupt remapping for MSI/MSI-X entry */
2472651e4cefSPeter Xu static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
2473651e4cefSPeter Xu                                    MSIMessage *origin,
2474ede9c94aSPeter Xu                                    MSIMessage *translated,
2475ede9c94aSPeter Xu                                    uint16_t sid)
2476651e4cefSPeter Xu {
2477651e4cefSPeter Xu     int ret = 0;
2478651e4cefSPeter Xu     VTD_IR_MSIAddress addr;
2479651e4cefSPeter Xu     uint16_t index;
248009cd058aSMichael S. Tsirkin     VTDIrq irq = {};
2481651e4cefSPeter Xu 
2482651e4cefSPeter Xu     assert(origin && translated);
2483651e4cefSPeter Xu 
2484651e4cefSPeter Xu     if (!iommu || !iommu->intr_enabled) {
2485651e4cefSPeter Xu         goto do_not_translate;
2486651e4cefSPeter Xu     }
2487651e4cefSPeter Xu 
2488651e4cefSPeter Xu     if (origin->address & VTD_MSI_ADDR_HI_MASK) {
2489651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "error: MSI addr high 32 bits nonzero"
2490651e4cefSPeter Xu                     " during interrupt remapping: 0x%"PRIx32,
2491651e4cefSPeter Xu                     (uint32_t)((origin->address & VTD_MSI_ADDR_HI_MASK) >> \
2492651e4cefSPeter Xu                     VTD_MSI_ADDR_HI_SHIFT));
2493651e4cefSPeter Xu         return -VTD_FR_IR_REQ_RSVD;
2494651e4cefSPeter Xu     }
2495651e4cefSPeter Xu 
2496651e4cefSPeter Xu     addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
24971a43713bSPeter Xu     if (addr.addr.__head != 0xfee) {
2498651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "error: MSI addr low 32 bits invalid: "
2499651e4cefSPeter Xu                     "0x%"PRIx32, addr.data);
2500651e4cefSPeter Xu         return -VTD_FR_IR_REQ_RSVD;
2501651e4cefSPeter Xu     }
2502651e4cefSPeter Xu 
2503651e4cefSPeter Xu     /* This is compatible mode. */
2504bc38ee10SMichael S. Tsirkin     if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
2505651e4cefSPeter Xu         goto do_not_translate;
2506651e4cefSPeter Xu     }
2507651e4cefSPeter Xu 
2508bc38ee10SMichael S. Tsirkin     index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
2509651e4cefSPeter Xu 
2510651e4cefSPeter Xu #define  VTD_IR_MSI_DATA_SUBHANDLE       (0x0000ffff)
2511651e4cefSPeter Xu #define  VTD_IR_MSI_DATA_RESERVED        (0xffff0000)
2512651e4cefSPeter Xu 
2513bc38ee10SMichael S. Tsirkin     if (addr.addr.sub_valid) {
2514651e4cefSPeter Xu         /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2515651e4cefSPeter Xu         index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
2516651e4cefSPeter Xu     }
2517651e4cefSPeter Xu 
2518ede9c94aSPeter Xu     ret = vtd_remap_irq_get(iommu, index, &irq, sid);
2519651e4cefSPeter Xu     if (ret) {
2520651e4cefSPeter Xu         return ret;
2521651e4cefSPeter Xu     }
2522651e4cefSPeter Xu 
2523bc38ee10SMichael S. Tsirkin     if (addr.addr.sub_valid) {
2524651e4cefSPeter Xu         VTD_DPRINTF(IR, "received MSI interrupt");
2525651e4cefSPeter Xu         if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
2526651e4cefSPeter Xu             VTD_DPRINTF(GENERAL, "error: MSI data bits non-zero for "
2527651e4cefSPeter Xu                         "interrupt remappable entry: 0x%"PRIx32,
2528651e4cefSPeter Xu                         origin->data);
2529651e4cefSPeter Xu             return -VTD_FR_IR_REQ_RSVD;
2530651e4cefSPeter Xu         }
2531651e4cefSPeter Xu     } else {
2532651e4cefSPeter Xu         uint8_t vector = origin->data & 0xff;
2533dea651a9SFeng Wu         uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
2534dea651a9SFeng Wu 
2535651e4cefSPeter Xu         VTD_DPRINTF(IR, "received IOAPIC interrupt");
2536651e4cefSPeter Xu         /* IOAPIC entry vector should be aligned with IRTE vector
2537651e4cefSPeter Xu          * (see vt-d spec 5.1.5.1). */
2538651e4cefSPeter Xu         if (vector != irq.vector) {
2539651e4cefSPeter Xu             VTD_DPRINTF(GENERAL, "IOAPIC vector inconsistent: "
2540651e4cefSPeter Xu                         "entry: %d, IRTE: %d, index: %d",
2541651e4cefSPeter Xu                         vector, irq.vector, index);
2542651e4cefSPeter Xu         }
2543dea651a9SFeng Wu 
2544dea651a9SFeng Wu         /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2545dea651a9SFeng Wu          * (see vt-d spec 5.1.5.1). */
2546dea651a9SFeng Wu         if (trigger_mode != irq.trigger_mode) {
2547dea651a9SFeng Wu             VTD_DPRINTF(GENERAL, "IOAPIC trigger mode inconsistent: "
2548dea651a9SFeng Wu                         "entry: %u, IRTE: %u, index: %d",
2549dea651a9SFeng Wu                         trigger_mode, irq.trigger_mode, index);
2550dea651a9SFeng Wu         }
2551dea651a9SFeng Wu 
2552651e4cefSPeter Xu     }
2553651e4cefSPeter Xu 
2554651e4cefSPeter Xu     /*
2555651e4cefSPeter Xu      * We'd better keep the last two bits, assuming that guest OS
2556651e4cefSPeter Xu      * might modify it. Keep it does not hurt after all.
2557651e4cefSPeter Xu      */
2558bc38ee10SMichael S. Tsirkin     irq.msi_addr_last_bits = addr.addr.__not_care;
2559651e4cefSPeter Xu 
2560651e4cefSPeter Xu     /* Translate VTDIrq to MSI message */
2561651e4cefSPeter Xu     vtd_generate_msi_message(&irq, translated);
2562651e4cefSPeter Xu 
2563651e4cefSPeter Xu     VTD_DPRINTF(IR, "mapping MSI 0x%"PRIx64":0x%"PRIx32 " -> "
2564651e4cefSPeter Xu                 "0x%"PRIx64":0x%"PRIx32, origin->address, origin->data,
2565651e4cefSPeter Xu                 translated->address, translated->data);
2566651e4cefSPeter Xu     return 0;
2567651e4cefSPeter Xu 
2568651e4cefSPeter Xu do_not_translate:
2569651e4cefSPeter Xu     memcpy(translated, origin, sizeof(*origin));
2570651e4cefSPeter Xu     return 0;
2571651e4cefSPeter Xu }
2572651e4cefSPeter Xu 
25738b5ed7dfSPeter Xu static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
25748b5ed7dfSPeter Xu                          MSIMessage *dst, uint16_t sid)
25758b5ed7dfSPeter Xu {
2576ede9c94aSPeter Xu     return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
2577ede9c94aSPeter Xu                                    src, dst, sid);
25788b5ed7dfSPeter Xu }
25798b5ed7dfSPeter Xu 
2580651e4cefSPeter Xu static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
2581651e4cefSPeter Xu                                    uint64_t *data, unsigned size,
2582651e4cefSPeter Xu                                    MemTxAttrs attrs)
2583651e4cefSPeter Xu {
2584651e4cefSPeter Xu     return MEMTX_OK;
2585651e4cefSPeter Xu }
2586651e4cefSPeter Xu 
2587651e4cefSPeter Xu static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
2588651e4cefSPeter Xu                                     uint64_t value, unsigned size,
2589651e4cefSPeter Xu                                     MemTxAttrs attrs)
2590651e4cefSPeter Xu {
2591651e4cefSPeter Xu     int ret = 0;
259209cd058aSMichael S. Tsirkin     MSIMessage from = {}, to = {};
2593ede9c94aSPeter Xu     uint16_t sid = X86_IOMMU_SID_INVALID;
2594651e4cefSPeter Xu 
2595651e4cefSPeter Xu     from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
2596651e4cefSPeter Xu     from.data = (uint32_t) value;
2597651e4cefSPeter Xu 
2598ede9c94aSPeter Xu     if (!attrs.unspecified) {
2599ede9c94aSPeter Xu         /* We have explicit Source ID */
2600ede9c94aSPeter Xu         sid = attrs.requester_id;
2601ede9c94aSPeter Xu     }
2602ede9c94aSPeter Xu 
2603ede9c94aSPeter Xu     ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
2604651e4cefSPeter Xu     if (ret) {
2605651e4cefSPeter Xu         /* TODO: report error */
2606651e4cefSPeter Xu         VTD_DPRINTF(GENERAL, "int remap fail for addr 0x%"PRIx64
2607651e4cefSPeter Xu                     " data 0x%"PRIx32, from.address, from.data);
2608651e4cefSPeter Xu         /* Drop this interrupt */
2609651e4cefSPeter Xu         return MEMTX_ERROR;
2610651e4cefSPeter Xu     }
2611651e4cefSPeter Xu 
2612651e4cefSPeter Xu     VTD_DPRINTF(IR, "delivering MSI 0x%"PRIx64":0x%"PRIx32
2613651e4cefSPeter Xu                 " for device sid 0x%04x",
2614651e4cefSPeter Xu                 to.address, to.data, sid);
2615651e4cefSPeter Xu 
261632946019SRadim Krčmář     apic_get_class()->send_msi(&to);
2617651e4cefSPeter Xu 
2618651e4cefSPeter Xu     return MEMTX_OK;
2619651e4cefSPeter Xu }
2620651e4cefSPeter Xu 
2621651e4cefSPeter Xu static const MemoryRegionOps vtd_mem_ir_ops = {
2622651e4cefSPeter Xu     .read_with_attrs = vtd_mem_ir_read,
2623651e4cefSPeter Xu     .write_with_attrs = vtd_mem_ir_write,
2624651e4cefSPeter Xu     .endianness = DEVICE_LITTLE_ENDIAN,
2625651e4cefSPeter Xu     .impl = {
2626651e4cefSPeter Xu         .min_access_size = 4,
2627651e4cefSPeter Xu         .max_access_size = 4,
2628651e4cefSPeter Xu     },
2629651e4cefSPeter Xu     .valid = {
2630651e4cefSPeter Xu         .min_access_size = 4,
2631651e4cefSPeter Xu         .max_access_size = 4,
2632651e4cefSPeter Xu     },
2633651e4cefSPeter Xu };
26347df953bdSKnut Omang 
26357df953bdSKnut Omang VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
26367df953bdSKnut Omang {
26377df953bdSKnut Omang     uintptr_t key = (uintptr_t)bus;
26387df953bdSKnut Omang     VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
26397df953bdSKnut Omang     VTDAddressSpace *vtd_dev_as;
2640e0a3c8ccSJason Wang     char name[128];
26417df953bdSKnut Omang 
26427df953bdSKnut Omang     if (!vtd_bus) {
26432d3fc581SJason Wang         uintptr_t *new_key = g_malloc(sizeof(*new_key));
26442d3fc581SJason Wang         *new_key = (uintptr_t)bus;
26457df953bdSKnut Omang         /* No corresponding free() */
264604af0e18SPeter Xu         vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
264704af0e18SPeter Xu                             X86_IOMMU_PCI_DEVFN_MAX);
26487df953bdSKnut Omang         vtd_bus->bus = bus;
26492d3fc581SJason Wang         g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
26507df953bdSKnut Omang     }
26517df953bdSKnut Omang 
26527df953bdSKnut Omang     vtd_dev_as = vtd_bus->dev_as[devfn];
26537df953bdSKnut Omang 
26547df953bdSKnut Omang     if (!vtd_dev_as) {
2655e0a3c8ccSJason Wang         snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
26567df953bdSKnut Omang         vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
26577df953bdSKnut Omang 
26587df953bdSKnut Omang         vtd_dev_as->bus = bus;
26597df953bdSKnut Omang         vtd_dev_as->devfn = (uint8_t)devfn;
26607df953bdSKnut Omang         vtd_dev_as->iommu_state = s;
26617df953bdSKnut Omang         vtd_dev_as->context_cache_entry.context_cache_gen = 0;
2662558e0024SPeter Xu 
2663558e0024SPeter Xu         /*
2664558e0024SPeter Xu          * Memory region relationships looks like (Address range shows
2665558e0024SPeter Xu          * only lower 32 bits to make it short in length...):
2666558e0024SPeter Xu          *
2667558e0024SPeter Xu          * |-----------------+-------------------+----------|
2668558e0024SPeter Xu          * | Name            | Address range     | Priority |
2669558e0024SPeter Xu          * |-----------------+-------------------+----------+
2670558e0024SPeter Xu          * | vtd_root        | 00000000-ffffffff |        0 |
2671558e0024SPeter Xu          * |  intel_iommu    | 00000000-ffffffff |        1 |
2672558e0024SPeter Xu          * |  vtd_sys_alias  | 00000000-ffffffff |        1 |
2673558e0024SPeter Xu          * |  intel_iommu_ir | fee00000-feefffff |       64 |
2674558e0024SPeter Xu          * |-----------------+-------------------+----------|
2675558e0024SPeter Xu          *
2676558e0024SPeter Xu          * We enable/disable DMAR by switching enablement for
2677558e0024SPeter Xu          * vtd_sys_alias and intel_iommu regions. IR region is always
2678558e0024SPeter Xu          * enabled.
2679558e0024SPeter Xu          */
26807df953bdSKnut Omang         memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
2681558e0024SPeter Xu                                  &s->iommu_ops, "intel_iommu_dmar",
2682558e0024SPeter Xu                                  UINT64_MAX);
2683558e0024SPeter Xu         memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
2684558e0024SPeter Xu                                  "vtd_sys_alias", get_system_memory(),
2685558e0024SPeter Xu                                  0, memory_region_size(get_system_memory()));
2686651e4cefSPeter Xu         memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
2687651e4cefSPeter Xu                               &vtd_mem_ir_ops, s, "intel_iommu_ir",
2688651e4cefSPeter Xu                               VTD_INTERRUPT_ADDR_SIZE);
2689558e0024SPeter Xu         memory_region_init(&vtd_dev_as->root, OBJECT(s),
2690558e0024SPeter Xu                            "vtd_root", UINT64_MAX);
2691558e0024SPeter Xu         memory_region_add_subregion_overlap(&vtd_dev_as->root,
2692558e0024SPeter Xu                                             VTD_INTERRUPT_ADDR_FIRST,
2693558e0024SPeter Xu                                             &vtd_dev_as->iommu_ir, 64);
2694558e0024SPeter Xu         address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
2695558e0024SPeter Xu         memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2696558e0024SPeter Xu                                             &vtd_dev_as->sys_alias, 1);
2697558e0024SPeter Xu         memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2698558e0024SPeter Xu                                             &vtd_dev_as->iommu, 1);
2699558e0024SPeter Xu         vtd_switch_address_space(vtd_dev_as);
27007df953bdSKnut Omang     }
27017df953bdSKnut Omang     return vtd_dev_as;
27027df953bdSKnut Omang }
27037df953bdSKnut Omang 
2704dd4d607eSPeter Xu /* Unmap the whole range in the notifier's scope. */
2705dd4d607eSPeter Xu static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
2706dd4d607eSPeter Xu {
2707dd4d607eSPeter Xu     IOMMUTLBEntry entry;
2708dd4d607eSPeter Xu     hwaddr size;
2709dd4d607eSPeter Xu     hwaddr start = n->start;
2710dd4d607eSPeter Xu     hwaddr end = n->end;
2711dd4d607eSPeter Xu 
2712dd4d607eSPeter Xu     /*
2713dd4d607eSPeter Xu      * Note: all the codes in this function has a assumption that IOVA
2714dd4d607eSPeter Xu      * bits are no more than VTD_MGAW bits (which is restricted by
2715dd4d607eSPeter Xu      * VT-d spec), otherwise we need to consider overflow of 64 bits.
2716dd4d607eSPeter Xu      */
2717dd4d607eSPeter Xu 
2718dd4d607eSPeter Xu     if (end > VTD_ADDRESS_SIZE) {
2719dd4d607eSPeter Xu         /*
2720dd4d607eSPeter Xu          * Don't need to unmap regions that is bigger than the whole
2721dd4d607eSPeter Xu          * VT-d supported address space size
2722dd4d607eSPeter Xu          */
2723dd4d607eSPeter Xu         end = VTD_ADDRESS_SIZE;
2724dd4d607eSPeter Xu     }
2725dd4d607eSPeter Xu 
2726dd4d607eSPeter Xu     assert(start <= end);
2727dd4d607eSPeter Xu     size = end - start;
2728dd4d607eSPeter Xu 
2729dd4d607eSPeter Xu     if (ctpop64(size) != 1) {
2730dd4d607eSPeter Xu         /*
2731dd4d607eSPeter Xu          * This size cannot format a correct mask. Let's enlarge it to
2732dd4d607eSPeter Xu          * suite the minimum available mask.
2733dd4d607eSPeter Xu          */
2734dd4d607eSPeter Xu         int n = 64 - clz64(size);
2735dd4d607eSPeter Xu         if (n > VTD_MGAW) {
2736dd4d607eSPeter Xu             /* should not happen, but in case it happens, limit it */
2737dd4d607eSPeter Xu             n = VTD_MGAW;
2738dd4d607eSPeter Xu         }
2739dd4d607eSPeter Xu         size = 1ULL << n;
2740dd4d607eSPeter Xu     }
2741dd4d607eSPeter Xu 
2742dd4d607eSPeter Xu     entry.target_as = &address_space_memory;
2743dd4d607eSPeter Xu     /* Adjust iova for the size */
2744dd4d607eSPeter Xu     entry.iova = n->start & ~(size - 1);
2745dd4d607eSPeter Xu     /* This field is meaningless for unmap */
2746dd4d607eSPeter Xu     entry.translated_addr = 0;
2747dd4d607eSPeter Xu     entry.perm = IOMMU_NONE;
2748dd4d607eSPeter Xu     entry.addr_mask = size - 1;
2749dd4d607eSPeter Xu 
2750dd4d607eSPeter Xu     trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
2751dd4d607eSPeter Xu                              VTD_PCI_SLOT(as->devfn),
2752dd4d607eSPeter Xu                              VTD_PCI_FUNC(as->devfn),
2753dd4d607eSPeter Xu                              entry.iova, size);
2754dd4d607eSPeter Xu 
2755dd4d607eSPeter Xu     memory_region_notify_one(n, &entry);
2756dd4d607eSPeter Xu }
2757dd4d607eSPeter Xu 
2758dd4d607eSPeter Xu static void vtd_address_space_unmap_all(IntelIOMMUState *s)
2759dd4d607eSPeter Xu {
2760dd4d607eSPeter Xu     IntelIOMMUNotifierNode *node;
2761dd4d607eSPeter Xu     VTDAddressSpace *vtd_as;
2762dd4d607eSPeter Xu     IOMMUNotifier *n;
2763dd4d607eSPeter Xu 
2764dd4d607eSPeter Xu     QLIST_FOREACH(node, &s->notifiers_list, next) {
2765dd4d607eSPeter Xu         vtd_as = node->vtd_as;
2766dd4d607eSPeter Xu         IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
2767dd4d607eSPeter Xu             vtd_address_space_unmap(vtd_as, n);
2768dd4d607eSPeter Xu         }
2769dd4d607eSPeter Xu     }
2770dd4d607eSPeter Xu }
2771dd4d607eSPeter Xu 
2772f06a696dSPeter Xu static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
2773f06a696dSPeter Xu {
2774f06a696dSPeter Xu     memory_region_notify_one((IOMMUNotifier *)private, entry);
2775f06a696dSPeter Xu     return 0;
2776f06a696dSPeter Xu }
2777f06a696dSPeter Xu 
2778f06a696dSPeter Xu static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
2779f06a696dSPeter Xu {
2780f06a696dSPeter Xu     VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu);
2781f06a696dSPeter Xu     IntelIOMMUState *s = vtd_as->iommu_state;
2782f06a696dSPeter Xu     uint8_t bus_n = pci_bus_num(vtd_as->bus);
2783f06a696dSPeter Xu     VTDContextEntry ce;
2784f06a696dSPeter Xu 
2785f06a696dSPeter Xu     /*
2786dd4d607eSPeter Xu      * The replay can be triggered by either a invalidation or a newly
2787dd4d607eSPeter Xu      * created entry. No matter what, we release existing mappings
2788dd4d607eSPeter Xu      * (it means flushing caches for UNMAP-only registers).
2789f06a696dSPeter Xu      */
2790dd4d607eSPeter Xu     vtd_address_space_unmap(vtd_as, n);
2791dd4d607eSPeter Xu 
2792dd4d607eSPeter Xu     if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
2793f06a696dSPeter Xu         trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
2794f06a696dSPeter Xu                                   PCI_FUNC(vtd_as->devfn),
2795f06a696dSPeter Xu                                   VTD_CONTEXT_ENTRY_DID(ce.hi),
2796f06a696dSPeter Xu                                   ce.hi, ce.lo);
2797dd4d607eSPeter Xu         vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
2798f06a696dSPeter Xu     } else {
2799f06a696dSPeter Xu         trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
2800f06a696dSPeter Xu                                     PCI_FUNC(vtd_as->devfn));
2801f06a696dSPeter Xu     }
2802f06a696dSPeter Xu 
2803f06a696dSPeter Xu     return;
2804f06a696dSPeter Xu }
2805f06a696dSPeter Xu 
28061da12ec4SLe Tan /* Do the initialization. It will also be called when reset, so pay
28071da12ec4SLe Tan  * attention when adding new initialization stuff.
28081da12ec4SLe Tan  */
28091da12ec4SLe Tan static void vtd_init(IntelIOMMUState *s)
28101da12ec4SLe Tan {
2811d54bd7f8SPeter Xu     X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2812d54bd7f8SPeter Xu 
28131da12ec4SLe Tan     memset(s->csr, 0, DMAR_REG_SIZE);
28141da12ec4SLe Tan     memset(s->wmask, 0, DMAR_REG_SIZE);
28151da12ec4SLe Tan     memset(s->w1cmask, 0, DMAR_REG_SIZE);
28161da12ec4SLe Tan     memset(s->womask, 0, DMAR_REG_SIZE);
28171da12ec4SLe Tan 
28181da12ec4SLe Tan     s->iommu_ops.translate = vtd_iommu_translate;
28195bf3d319SPeter Xu     s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
2820f06a696dSPeter Xu     s->iommu_ops.replay = vtd_iommu_replay;
28211da12ec4SLe Tan     s->root = 0;
28221da12ec4SLe Tan     s->root_extended = false;
28231da12ec4SLe Tan     s->dmar_enabled = false;
28241da12ec4SLe Tan     s->iq_head = 0;
28251da12ec4SLe Tan     s->iq_tail = 0;
28261da12ec4SLe Tan     s->iq = 0;
28271da12ec4SLe Tan     s->iq_size = 0;
28281da12ec4SLe Tan     s->qi_enabled = false;
28291da12ec4SLe Tan     s->iq_last_desc_type = VTD_INV_DESC_NONE;
28301da12ec4SLe Tan     s->next_frcd_reg = 0;
28311da12ec4SLe Tan     s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
2832d66b969bSJason Wang              VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
2833ed7b8fbcSLe Tan     s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
28341da12ec4SLe Tan 
2835d54bd7f8SPeter Xu     if (x86_iommu->intr_supported) {
2836e6b6af05SRadim Krčmář         s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
2837e6b6af05SRadim Krčmář         if (s->intr_eim == ON_OFF_AUTO_ON) {
2838e6b6af05SRadim Krčmář             s->ecap |= VTD_ECAP_EIM;
2839e6b6af05SRadim Krčmář         }
2840e6b6af05SRadim Krčmář         assert(s->intr_eim != ON_OFF_AUTO_AUTO);
2841d54bd7f8SPeter Xu     }
2842d54bd7f8SPeter Xu 
2843554f5e16SJason Wang     if (x86_iommu->dt_supported) {
2844554f5e16SJason Wang         s->ecap |= VTD_ECAP_DT;
2845554f5e16SJason Wang     }
2846554f5e16SJason Wang 
28473b40f0e5SAviv Ben-David     if (s->caching_mode) {
28483b40f0e5SAviv Ben-David         s->cap |= VTD_CAP_CM;
28493b40f0e5SAviv Ben-David     }
28503b40f0e5SAviv Ben-David 
2851d92fa2dcSLe Tan     vtd_reset_context_cache(s);
2852b5a280c0SLe Tan     vtd_reset_iotlb(s);
2853d92fa2dcSLe Tan 
28541da12ec4SLe Tan     /* Define registers with default values and bit semantics */
28551da12ec4SLe Tan     vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
28561da12ec4SLe Tan     vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
28571da12ec4SLe Tan     vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
28581da12ec4SLe Tan     vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
28591da12ec4SLe Tan     vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
28601da12ec4SLe Tan     vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
28611da12ec4SLe Tan     vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
28621da12ec4SLe Tan     vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
28631da12ec4SLe Tan     vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
28641da12ec4SLe Tan 
28651da12ec4SLe Tan     /* Advanced Fault Logging not supported */
28661da12ec4SLe Tan     vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
28671da12ec4SLe Tan     vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
28681da12ec4SLe Tan     vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
28691da12ec4SLe Tan     vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
28701da12ec4SLe Tan 
28711da12ec4SLe Tan     /* Treated as RsvdZ when EIM in ECAP_REG is not supported
28721da12ec4SLe Tan      * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
28731da12ec4SLe Tan      */
28741da12ec4SLe Tan     vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
28751da12ec4SLe Tan 
28761da12ec4SLe Tan     /* Treated as RO for implementations that PLMR and PHMR fields reported
28771da12ec4SLe Tan      * as Clear in the CAP_REG.
28781da12ec4SLe Tan      * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
28791da12ec4SLe Tan      */
28801da12ec4SLe Tan     vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
28811da12ec4SLe Tan 
2882ed7b8fbcSLe Tan     vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
2883ed7b8fbcSLe Tan     vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
2884ed7b8fbcSLe Tan     vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
2885ed7b8fbcSLe Tan     vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
2886ed7b8fbcSLe Tan     vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2887ed7b8fbcSLe Tan     vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
2888ed7b8fbcSLe Tan     vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
2889ed7b8fbcSLe Tan     /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2890ed7b8fbcSLe Tan     vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
2891ed7b8fbcSLe Tan 
28921da12ec4SLe Tan     /* IOTLB registers */
28931da12ec4SLe Tan     vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
28941da12ec4SLe Tan     vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
28951da12ec4SLe Tan     vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
28961da12ec4SLe Tan 
28971da12ec4SLe Tan     /* Fault Recording Registers, 128-bit */
28981da12ec4SLe Tan     vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
28991da12ec4SLe Tan     vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
2900a5861439SPeter Xu 
2901a5861439SPeter Xu     /*
290228589311SJan Kiszka      * Interrupt remapping registers.
2903a5861439SPeter Xu      */
290428589311SJan Kiszka     vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
29051da12ec4SLe Tan }
29061da12ec4SLe Tan 
29071da12ec4SLe Tan /* Should not reset address_spaces when reset because devices will still use
29081da12ec4SLe Tan  * the address space they got at first (won't ask the bus again).
29091da12ec4SLe Tan  */
29101da12ec4SLe Tan static void vtd_reset(DeviceState *dev)
29111da12ec4SLe Tan {
29121da12ec4SLe Tan     IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
29131da12ec4SLe Tan 
29141da12ec4SLe Tan     VTD_DPRINTF(GENERAL, "");
29151da12ec4SLe Tan     vtd_init(s);
2916dd4d607eSPeter Xu 
2917dd4d607eSPeter Xu     /*
2918dd4d607eSPeter Xu      * When device reset, throw away all mappings and external caches
2919dd4d607eSPeter Xu      */
2920dd4d607eSPeter Xu     vtd_address_space_unmap_all(s);
29211da12ec4SLe Tan }
29221da12ec4SLe Tan 
2923621d983aSMarcel Apfelbaum static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
2924621d983aSMarcel Apfelbaum {
2925621d983aSMarcel Apfelbaum     IntelIOMMUState *s = opaque;
2926621d983aSMarcel Apfelbaum     VTDAddressSpace *vtd_as;
2927621d983aSMarcel Apfelbaum 
29288e7a0a16SPeter Xu     assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX);
2929621d983aSMarcel Apfelbaum 
2930621d983aSMarcel Apfelbaum     vtd_as = vtd_find_add_as(s, bus, devfn);
2931621d983aSMarcel Apfelbaum     return &vtd_as->as;
2932621d983aSMarcel Apfelbaum }
2933621d983aSMarcel Apfelbaum 
2934e6b6af05SRadim Krčmář static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
29356333e93cSRadim Krčmář {
2936e6b6af05SRadim Krčmář     X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2937e6b6af05SRadim Krčmář 
29386333e93cSRadim Krčmář     /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
29396333e93cSRadim Krčmář     if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
29406333e93cSRadim Krčmář         !kvm_irqchip_is_split()) {
29416333e93cSRadim Krčmář         error_setg(errp, "Intel Interrupt Remapping cannot work with "
29426333e93cSRadim Krčmář                          "kernel-irqchip=on, please use 'split|off'.");
29436333e93cSRadim Krčmář         return false;
29446333e93cSRadim Krčmář     }
2945e6b6af05SRadim Krčmář     if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) {
2946e6b6af05SRadim Krčmář         error_setg(errp, "eim=on cannot be selected without intremap=on");
2947e6b6af05SRadim Krčmář         return false;
2948e6b6af05SRadim Krčmář     }
2949e6b6af05SRadim Krčmář 
2950e6b6af05SRadim Krčmář     if (s->intr_eim == ON_OFF_AUTO_AUTO) {
2951fb506e70SRadim Krčmář         s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
2952fb506e70SRadim Krčmář                       && x86_iommu->intr_supported ?
2953e6b6af05SRadim Krčmář                                               ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2954e6b6af05SRadim Krčmář     }
2955fb506e70SRadim Krčmář     if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
2956fb506e70SRadim Krčmář         if (!kvm_irqchip_in_kernel()) {
2957fb506e70SRadim Krčmář             error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
2958fb506e70SRadim Krčmář             return false;
2959fb506e70SRadim Krčmář         }
2960fb506e70SRadim Krčmář         if (!kvm_enable_x2apic()) {
2961fb506e70SRadim Krčmář             error_setg(errp, "eim=on requires support on the KVM side"
2962fb506e70SRadim Krčmář                              "(X2APIC_API, first shipped in v4.7)");
2963fb506e70SRadim Krčmář             return false;
2964fb506e70SRadim Krčmář         }
2965fb506e70SRadim Krčmář     }
2966e6b6af05SRadim Krčmář 
29676333e93cSRadim Krčmář     return true;
29686333e93cSRadim Krčmář }
29696333e93cSRadim Krčmář 
29701da12ec4SLe Tan static void vtd_realize(DeviceState *dev, Error **errp)
29711da12ec4SLe Tan {
2972cb135f59SPeter Xu     PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
2973cb135f59SPeter Xu     PCIBus *bus = pcms->bus;
29741da12ec4SLe Tan     IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
29754684a204SPeter Xu     X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
29761da12ec4SLe Tan 
29771da12ec4SLe Tan     VTD_DPRINTF(GENERAL, "");
2978fb9f5926SDavid Kiarie     x86_iommu->type = TYPE_INTEL;
29796333e93cSRadim Krčmář 
2980e6b6af05SRadim Krčmář     if (!vtd_decide_config(s, errp)) {
29816333e93cSRadim Krčmář         return;
29826333e93cSRadim Krčmář     }
29836333e93cSRadim Krčmář 
2984dd4d607eSPeter Xu     QLIST_INIT(&s->notifiers_list);
29857df953bdSKnut Omang     memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
29861da12ec4SLe Tan     memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
29871da12ec4SLe Tan                           "intel_iommu", DMAR_REG_SIZE);
29881da12ec4SLe Tan     sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
2989b5a280c0SLe Tan     /* No corresponding destroy */
2990b5a280c0SLe Tan     s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
2991b5a280c0SLe Tan                                      g_free, g_free);
29927df953bdSKnut Omang     s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
29937df953bdSKnut Omang                                               g_free, g_free);
29941da12ec4SLe Tan     vtd_init(s);
2995621d983aSMarcel Apfelbaum     sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
2996621d983aSMarcel Apfelbaum     pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
2997cb135f59SPeter Xu     /* Pseudo address space under root PCI bus. */
2998cb135f59SPeter Xu     pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
29991da12ec4SLe Tan }
30001da12ec4SLe Tan 
30011da12ec4SLe Tan static void vtd_class_init(ObjectClass *klass, void *data)
30021da12ec4SLe Tan {
30031da12ec4SLe Tan     DeviceClass *dc = DEVICE_CLASS(klass);
30041c7955c4SPeter Xu     X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
30051da12ec4SLe Tan 
30061da12ec4SLe Tan     dc->reset = vtd_reset;
30071da12ec4SLe Tan     dc->vmsd = &vtd_vmstate;
30081da12ec4SLe Tan     dc->props = vtd_properties;
3009621d983aSMarcel Apfelbaum     dc->hotpluggable = false;
30101c7955c4SPeter Xu     x86_class->realize = vtd_realize;
30118b5ed7dfSPeter Xu     x86_class->int_remap = vtd_int_remap;
3012*e4f4fb1eSEduardo Habkost     /*
3013*e4f4fb1eSEduardo Habkost      * FIXME: Set only because we are not sure yet if this device
3014*e4f4fb1eSEduardo Habkost      * will be outside the q35 sysbus whitelist.
3015*e4f4fb1eSEduardo Habkost      */
3016*e4f4fb1eSEduardo Habkost     dc->user_creatable = true;
30171da12ec4SLe Tan }
30181da12ec4SLe Tan 
30191da12ec4SLe Tan static const TypeInfo vtd_info = {
30201da12ec4SLe Tan     .name          = TYPE_INTEL_IOMMU_DEVICE,
30211c7955c4SPeter Xu     .parent        = TYPE_X86_IOMMU_DEVICE,
30221da12ec4SLe Tan     .instance_size = sizeof(IntelIOMMUState),
30231da12ec4SLe Tan     .class_init    = vtd_class_init,
30241da12ec4SLe Tan };
30251da12ec4SLe Tan 
30261da12ec4SLe Tan static void vtd_register_types(void)
30271da12ec4SLe Tan {
30281da12ec4SLe Tan     VTD_DPRINTF(GENERAL, "");
30291da12ec4SLe Tan     type_register_static(&vtd_info);
30301da12ec4SLe Tan }
30311da12ec4SLe Tan 
30321da12ec4SLe Tan type_init(vtd_register_types)
3033