Lines Matching full:iommu

21 #include <linux/iommu-helper.h>
23 #include <linux/amd-iommu.h>
37 #include <asm/iommu.h>
42 #include "../dma-iommu.h"
73 * general struct to manage commands send to an IOMMU
122 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument
125 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
151 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
152 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
154 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
156 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
185 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
188 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
202 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
206 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
222 struct amd_iommu *iommu; in clone_alias() local
229 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
230 if (!iommu) in clone_alias()
233 amd_iommu_set_rlookup_table(iommu, alias); in clone_alias()
234 dev_table = get_dev_table(iommu); in clone_alias()
242 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) in clone_aliases() argument
255 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
260 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) in setup_aliases() argument
263 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
279 clone_aliases(iommu, dev); in setup_aliases()
282 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
286 dev_data = search_dev_data(iommu, devid); in find_dev_data()
289 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
293 if (translation_pre_enabled(iommu)) in find_dev_data()
301 * Find or create an IOMMU group for a acpihid device.
352 struct amd_iommu *iommu; in check_device() local
363 iommu = rlookup_amd_iommu(dev); in check_device()
364 if (!iommu) in check_device()
368 pci_seg = iommu->pci_seg; in check_device()
375 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) in iommu_init_device() argument
388 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
393 setup_aliases(iommu, dev); in iommu_init_device()
403 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
411 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) in iommu_ignore_device() argument
413 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
414 struct dev_table_entry *dev_table = get_dev_table(iommu); in iommu_ignore_device()
425 setup_aliases(iommu, dev); in iommu_ignore_device()
453 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
456 struct dev_table_entry *dev_table = get_dev_table(iommu); in dump_dte_entry()
471 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
483 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
495 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
503 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_fault() argument
516 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
528 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
542 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, in amd_iommu_report_page_fault() argument
549 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
565 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
584 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
593 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
595 struct device *dev = iommu->iommu.dev; in iommu_print_event()
621 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
628 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
630 dump_dte_entry(iommu, devid); in iommu_print_event()
635 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
640 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
653 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
658 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
662 amd_iommu_report_rmp_fault(iommu, event); in iommu_print_event()
665 amd_iommu_report_rmp_hw_error(iommu, event); in iommu_print_event()
671 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
689 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
693 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
694 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
697 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
701 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
704 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
715 fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0])); in iommu_handle_ppr_entry()
722 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
726 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
729 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
730 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
737 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
765 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
768 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
771 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
772 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
787 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
791 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
794 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
795 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
801 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
808 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
830 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
836 dev_set_msi_domain(dev, iommu->ir_domain); in amd_iommu_set_pci_msi_domain()
841 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
849 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_handle_irq() local
850 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
855 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
858 pr_devel("Processing IOMMU (ivhd%d) %s Log\n", in amd_iommu_handle_irq()
859 iommu->index, evt_type); in amd_iommu_handle_irq()
860 int_handler(iommu); in amd_iommu_handle_irq()
864 overflow_handler(iommu); in amd_iommu_handle_irq()
874 * Workaround: The IOMMU driver should read back the in amd_iommu_handle_irq()
879 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_handle_irq()
928 * IOMMU command queuing functions
932 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
936 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
949 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
956 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
957 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
961 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
963 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
964 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
968 struct amd_iommu *iommu, in build_completion_wait() argument
971 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1129 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1136 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1138 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1152 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1158 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1161 iommu->need_sync = sync; in __iommu_queue_command_sync()
1166 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1173 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1174 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1175 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1180 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1182 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1187 * buffer of an IOMMU
1189 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1196 if (!iommu->need_sync) in iommu_completion_wait()
1199 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_completion_wait()
1200 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1202 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1204 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1208 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1211 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1216 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1222 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1225 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1228 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1231 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1233 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1240 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1243 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1249 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1252 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1255 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1261 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1263 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1266 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1272 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1273 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1276 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1282 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1285 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1288 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1290 if (iommu->irtcachedis_enabled) in amd_iommu_flush_irt_all()
1294 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1296 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1299 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1301 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1302 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1304 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1305 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1306 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1316 struct amd_iommu *iommu; in device_flush_iotlb() local
1321 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_iotlb()
1322 if (!iommu) in device_flush_iotlb()
1327 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1332 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1334 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1342 struct amd_iommu *iommu; in device_flush_dte() local
1348 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_dte()
1349 if (!iommu) in device_flush_dte()
1357 device_flush_dte_alias, iommu); in device_flush_dte()
1359 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1363 pci_seg = iommu->pci_seg; in device_flush_dte()
1366 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1380 * page. Otherwise it flushes the whole TLB of the IOMMU.
1396 * Devices of this domain are behind this IOMMU in __domain_flush_pages()
1471 * Devices of this domain are behind this IOMMU in amd_iommu_domain_flush_complete()
1507 * allocated for every IOMMU as the default domain. If device isolation
1580 static void set_dte_entry(struct amd_iommu *iommu, u16 devid, in set_dte_entry() argument
1586 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_entry()
1597 * When SNP is enabled, Only set TV bit when IOMMU in set_dte_entry()
1609 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1660 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1664 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) in clear_dte_entry() argument
1666 struct dev_table_entry *dev_table = get_dev_table(iommu); in clear_dte_entry()
1676 amd_iommu_apply_erratum_63(iommu, devid); in clear_dte_entry()
1682 struct amd_iommu *iommu; in do_attach() local
1685 iommu = rlookup_amd_iommu(dev_data->dev); in do_attach()
1686 if (!iommu) in do_attach()
1699 domain->dev_iommu[iommu->index] += 1; in do_attach()
1703 set_dte_entry(iommu, dev_data->devid, domain, in do_attach()
1705 clone_aliases(iommu, dev_data->dev); in do_attach()
1713 struct amd_iommu *iommu; in do_detach() local
1715 iommu = rlookup_amd_iommu(dev_data->dev); in do_detach()
1716 if (!iommu) in do_detach()
1722 clear_dte_entry(iommu, dev_data->devid); in do_detach()
1723 clone_aliases(iommu, dev_data->dev); in do_detach()
1735 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1843 * left the caches in the IOMMU dirty. So we have to flush in attach_device()
1904 struct amd_iommu *iommu; in amd_iommu_probe_device() local
1910 iommu = rlookup_amd_iommu(dev); in amd_iommu_probe_device()
1911 if (!iommu) in amd_iommu_probe_device()
1915 if (!iommu->iommu.ops) in amd_iommu_probe_device()
1919 return &iommu->iommu; in amd_iommu_probe_device()
1921 ret = iommu_init_device(iommu, dev); in amd_iommu_probe_device()
1926 iommu_ignore_device(iommu, dev); in amd_iommu_probe_device()
1928 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
1929 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
1932 iommu_completion_wait(iommu); in amd_iommu_probe_device()
1946 struct amd_iommu *iommu; in amd_iommu_release_device() local
1951 iommu = rlookup_amd_iommu(dev); in amd_iommu_release_device()
1952 if (!iommu) in amd_iommu_release_device()
1956 iommu_completion_wait(iommu); in amd_iommu_release_device()
1978 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in update_device_table() local
1980 if (!iommu) in update_device_table()
1982 set_dte_entry(iommu, dev_data->devid, domain, in update_device_table()
1984 clone_aliases(iommu, dev_data->dev); in update_device_table()
2006 * The following functions belong to the exported interface of AMD IOMMU
2008 * This interface allows access to lower level functions of the IOMMU
2099 * Force IOMMU v1 page table when iommu=pt and in protection_domain_alloc()
2205 struct amd_iommu *iommu = rlookup_amd_iommu(dev); in amd_iommu_attach_device() local
2231 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2277 * AMD's IOMMU can flush as many pages as necessary in a single flush. in amd_iommu_iotlb_gather_add_page()
2282 * hypervisor needs to synchronize the host IOMMU PTEs with those of in amd_iommu_iotlb_gather_add_page()
2347 struct amd_iommu *iommu; in amd_iommu_get_resv_regions() local
2356 iommu = rlookup_amd_iommu(dev); in amd_iommu_get_resv_regions()
2357 if (!iommu) in amd_iommu_get_resv_regions()
2359 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
2490 * The next functions do a basic initialization of IOMMU for pass through
2493 * In passthrough mode the IOMMU is initialized and enabled but not used for
2588 * IOMMU TLB needs to be flushed before Device TLB to in __flush_pasid()
2589 * prevent device TLB refill from IOMMU TLB in __flush_pasid()
2600 /* Wait until IOMMU TLB flushes are complete */ in __flush_pasid()
2605 struct amd_iommu *iommu; in __flush_pasid() local
2616 iommu = rlookup_amd_iommu(dev_data->dev); in __flush_pasid()
2617 if (!iommu) in __flush_pasid()
2622 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
2776 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
2780 iommu = rlookup_amd_iommu(&pdev->dev); in amd_iommu_complete_ppr()
2781 if (!iommu) in amd_iommu_complete_ppr()
2787 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
2844 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt_and_complete() argument
2851 if (iommu->irtcachedis_enabled) in iommu_flush_irt_and_complete()
2855 data = atomic64_add_return(1, &iommu->cmd_sem_val); in iommu_flush_irt_and_complete()
2856 build_completion_wait(&cmd2, iommu, data); in iommu_flush_irt_and_complete()
2858 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2859 ret = __iommu_queue_command_sync(iommu, &cmd, true); in iommu_flush_irt_and_complete()
2862 ret = __iommu_queue_command_sync(iommu, &cmd2, false); in iommu_flush_irt_and_complete()
2865 wait_on_sem(iommu, data); in iommu_flush_irt_and_complete()
2867 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_flush_irt_and_complete()
2870 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
2874 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_irq_entry()
2886 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
2889 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
2892 "%s: no iommu for devid %x:%x\n", in get_irq_table()
2928 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
2931 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
2934 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
2935 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
2943 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias() local
2945 if (!iommu) in set_remap_table_entry_alias()
2948 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
2950 set_dte_irq_entry(iommu, alias, table); in set_remap_table_entry_alias()
2956 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, in alloc_irq_table() argument
2967 pci_seg = iommu->pci_seg; in alloc_irq_table()
2975 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2993 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3004 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3007 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3010 iommu_completion_wait(iommu); in alloc_irq_table()
3022 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
3029 table = alloc_irq_table(iommu, devid, pdev); in alloc_irq_index()
3041 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3051 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3068 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3076 table = get_irq_table(iommu, devid); in modify_irte_ga()
3096 iommu_flush_irt_and_complete(iommu, devid); in modify_irte_ga()
3101 static int modify_irte(struct amd_iommu *iommu, in modify_irte() argument
3107 table = get_irq_table(iommu, devid); in modify_irte()
3115 iommu_flush_irt_and_complete(iommu, devid); in modify_irte()
3120 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3125 table = get_irq_table(iommu, devid); in free_irte()
3130 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3133 iommu_flush_irt_and_complete(iommu, devid); in free_irte()
3166 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3171 modify_irte(iommu, devid, index, irte); in irte_activate()
3174 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3179 modify_irte_ga(iommu, devid, index, irte); in irte_ga_activate()
3182 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3187 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3190 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3195 modify_irte_ga(iommu, devid, index, irte); in irte_ga_deactivate()
3198 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3205 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3208 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3219 modify_irte_ga(iommu, devid, index, irte); in irte_ga_set_affinity()
3307 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte() local
3309 if (!iommu) in irq_remapping_prepare_irte()
3314 iommu->irte_ops->prepare(data->entry, apic->delivery_mode, in irq_remapping_prepare_irte()
3358 struct amd_iommu *iommu; in irq_remapping_alloc() local
3374 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3375 if (!iommu) in irq_remapping_alloc()
3385 table = alloc_irq_table(iommu, devid, NULL); in irq_remapping_alloc()
3394 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3405 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3408 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL); in irq_remapping_alloc()
3440 data->iommu = iommu; in irq_remapping_alloc()
3457 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3476 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3484 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3494 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate() local
3497 if (!iommu) in irq_remapping_activate()
3500 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3502 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3511 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate() local
3513 if (iommu) in irq_remapping_deactivate()
3514 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3521 struct amd_iommu *iommu; in irq_remapping_select() local
3534 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3536 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3568 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3598 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3612 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3615 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3660 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3670 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3682 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity() local
3685 if (!iommu) in amd_ir_set_affinity()
3692 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3733 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3737 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3740 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3741 fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3742 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3747 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); in amd_iommu_create_irq_domain()
3748 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in amd_iommu_create_irq_domain()
3752 iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3754 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; in amd_iommu_create_irq_domain()
3768 if (!ir_data->iommu) in amd_iommu_update_ga()
3779 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()