iommu.c (16e7483e6f02973972f832b18042fd6c45fe26c0) iommu.c (721612994f53ed600b39a80d912b10f51960e2e3)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt

--- 4 unchanged lines hidden (view full) ---

13#include <linux/acpi.h>
14#include <linux/amba/bus.h>
15#include <linux/platform_device.h>
16#include <linux/pci-ats.h>
17#include <linux/bitmap.h>
18#include <linux/slab.h>
19#include <linux/debugfs.h>
20#include <linux/scatterlist.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt

--- 4 unchanged lines hidden (view full) ---

13#include <linux/acpi.h>
14#include <linux/amba/bus.h>
15#include <linux/platform_device.h>
16#include <linux/pci-ats.h>
17#include <linux/bitmap.h>
18#include <linux/slab.h>
19#include <linux/debugfs.h>
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
21#include <linux/dma-map-ops.h>
22#include <linux/dma-direct.h>
23#include <linux/dma-iommu.h>
24#include <linux/iommu-helper.h>
25#include <linux/delay.h>
26#include <linux/amd-iommu.h>
27#include <linux/notifier.h>
28#include <linux/export.h>
29#include <linux/irq.h>
30#include <linux/msi.h>
22#include <linux/dma-direct.h>
23#include <linux/dma-iommu.h>
24#include <linux/iommu-helper.h>
25#include <linux/delay.h>
26#include <linux/amd-iommu.h>
27#include <linux/notifier.h>
28#include <linux/export.h>
29#include <linux/irq.h>
30#include <linux/msi.h>
31#include <linux/dma-contiguous.h>
32#include <linux/irqdomain.h>
33#include <linux/percpu.h>
34#include <linux/iova.h>
35#include <asm/irq_remapping.h>
36#include <asm/io_apic.h>
37#include <asm/apic.h>
38#include <asm/hw_irq.h>
39#include <asm/msidef.h>

--- 441 unchanged lines hidden (view full) ---

481{
482 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
483 int i;
484
485 for (i = 0; i < 4; ++i)
486 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
487}
488
31#include <linux/irqdomain.h>
32#include <linux/percpu.h>
33#include <linux/iova.h>
34#include <asm/irq_remapping.h>
35#include <asm/io_apic.h>
36#include <asm/apic.h>
37#include <asm/hw_irq.h>
38#include <asm/msidef.h>

--- 441 unchanged lines hidden (view full) ---

480{
481 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
482 int i;
483
484 for (i = 0; i < 4; ++i)
485 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
486}
487
488static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
489{
490 struct iommu_dev_data *dev_data = NULL;
491 int devid, vmg_tag, flags;
492 struct pci_dev *pdev;
493 u64 spa;
494
495 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
496 vmg_tag = (event[1]) & 0xFFFF;
497 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
498 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
499
500 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
501 devid & 0xff);
502 if (pdev)
503 dev_data = dev_iommu_priv_get(&pdev->dev);
504
505 if (dev_data && __ratelimit(&dev_data->rs)) {
506 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
507 vmg_tag, spa, flags);
508 } else {
509 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
510 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
511 vmg_tag, spa, flags);
512 }
513
514 if (pdev)
515 pci_dev_put(pdev);
516}
517
518static void amd_iommu_report_rmp_fault(volatile u32 *event)
519{
520 struct iommu_dev_data *dev_data = NULL;
521 int devid, flags_rmp, vmg_tag, flags;
522 struct pci_dev *pdev;
523 u64 gpa;
524
525 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
526 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
527 vmg_tag = (event[1]) & 0xFFFF;
528 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
529 gpa = ((u64)event[3] << 32) | event[2];
530
531 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
532 devid & 0xff);
533 if (pdev)
534 dev_data = dev_iommu_priv_get(&pdev->dev);
535
536 if (dev_data && __ratelimit(&dev_data->rs)) {
537 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
538 vmg_tag, gpa, flags_rmp, flags);
539 } else {
540 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
541 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
542 vmg_tag, gpa, flags_rmp, flags);
543 }
544
545 if (pdev)
546 pci_dev_put(pdev);
547}
548
489static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
490 u64 address, int flags)
491{
492 struct iommu_dev_data *dev_data = NULL;
493 struct pci_dev *pdev;
494
495 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
496 devid & 0xff);

--- 11 unchanged lines hidden (view full) ---

508
509 if (pdev)
510 pci_dev_put(pdev);
511}
512
513static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
514{
515 struct device *dev = iommu->iommu.dev;
549static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
550 u64 address, int flags)
551{
552 struct iommu_dev_data *dev_data = NULL;
553 struct pci_dev *pdev;
554
555 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
556 devid & 0xff);

--- 11 unchanged lines hidden (view full) ---

568
569 if (pdev)
570 pci_dev_put(pdev);
571}
572
573static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
574{
575 struct device *dev = iommu->iommu.dev;
516 int type, devid, pasid, flags, tag;
576 int type, devid, flags, tag;
517 volatile u32 *event = __evt;
518 int count = 0;
519 u64 address;
577 volatile u32 *event = __evt;
578 int count = 0;
579 u64 address;
580 u32 pasid;
520
521retry:
522 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
523 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
524 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
525 (event[1] & EVENT_DOMID_MASK_LO);
526 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
527 address = (u64)(((u64)event[3]) << 32) | event[2];

--- 44 unchanged lines hidden (view full) ---

572 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
573 address);
574 break;
575 case EVENT_TYPE_INV_DEV_REQ:
576 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
577 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
578 pasid, address, flags);
579 break;
581
582retry:
583 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
584 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
585 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
586 (event[1] & EVENT_DOMID_MASK_LO);
587 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
588 address = (u64)(((u64)event[3]) << 32) | event[2];

--- 44 unchanged lines hidden (view full) ---

633 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
634 address);
635 break;
636 case EVENT_TYPE_INV_DEV_REQ:
637 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
638 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
639 pasid, address, flags);
640 break;
641 case EVENT_TYPE_RMP_FAULT:
642 amd_iommu_report_rmp_fault(event);
643 break;
644 case EVENT_TYPE_RMP_HW_ERR:
645 amd_iommu_report_rmp_hw_error(event);
646 break;
580 case EVENT_TYPE_INV_PPR_REQ:
581 pasid = PPR_PASID(*((u64 *)__evt));
582 tag = event[1] & 0x03FF;
583 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
584 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
585 pasid, address, flags, tag);
586 break;
587 default:

--- 136 unchanged lines hidden (view full) ---

724 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
725 pr_err("GA log notifier failed.\n");
726 break;
727 default:
728 break;
729 }
730 }
731}
647 case EVENT_TYPE_INV_PPR_REQ:
648 pasid = PPR_PASID(*((u64 *)__evt));
649 tag = event[1] & 0x03FF;
650 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
651 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
652 pasid, address, flags, tag);
653 break;
654 default:

--- 136 unchanged lines hidden (view full) ---

791 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
792 pr_err("GA log notifier failed.\n");
793 break;
794 default:
795 break;
796 }
797 }
798}
732#endif /* CONFIG_IRQ_REMAP */
733
799
800static void
801amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
802{
803 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
804 pci_dev_has_special_msi_domain(to_pci_dev(dev)))
805 return;
806
807 dev_set_msi_domain(dev, iommu->msi_domain);
808}
809
810#else /* CONFIG_IRQ_REMAP */
811static inline void
812amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
813#endif /* !CONFIG_IRQ_REMAP */
814
734#define AMD_IOMMU_INT_MASK \
735 (MMIO_STATUS_EVT_INT_MASK | \
736 MMIO_STATUS_PPR_INT_MASK | \
737 MMIO_STATUS_GALOG_INT_MASK)
738
739irqreturn_t amd_iommu_int_thread(int irq, void *data)
740{
741 struct amd_iommu *iommu = (struct amd_iommu *) data;

--- 45 unchanged lines hidden (view full) ---

787}
788
789/****************************************************************************
790 *
791 * IOMMU command queuing functions
792 *
793 ****************************************************************************/
794
815#define AMD_IOMMU_INT_MASK \
816 (MMIO_STATUS_EVT_INT_MASK | \
817 MMIO_STATUS_PPR_INT_MASK | \
818 MMIO_STATUS_GALOG_INT_MASK)
819
820irqreturn_t amd_iommu_int_thread(int irq, void *data)
821{
822 struct amd_iommu *iommu = (struct amd_iommu *) data;

--- 45 unchanged lines hidden (view full) ---

868}
869
870/****************************************************************************
871 *
872 * IOMMU command queuing functions
873 *
874 ****************************************************************************/
875
795static int wait_on_sem(volatile u64 *sem)
876static int wait_on_sem(struct amd_iommu *iommu, u64 data)
796{
797 int i = 0;
798
877{
878 int i = 0;
879
799 while (*sem == 0 && i < LOOP_TIMEOUT) {
880 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
800 udelay(1);
801 i += 1;
802 }
803
804 if (i == LOOP_TIMEOUT) {
805 pr_alert("Completion-Wait loop timed out\n");
806 return -EIO;
807 }

--- 14 unchanged lines hidden (view full) ---

822
823 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
824 iommu->cmd_buf_tail = tail;
825
826 /* Tell the IOMMU about it */
827 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
828}
829
881 udelay(1);
882 i += 1;
883 }
884
885 if (i == LOOP_TIMEOUT) {
886 pr_alert("Completion-Wait loop timed out\n");
887 return -EIO;
888 }

--- 14 unchanged lines hidden (view full) ---

903
904 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
905 iommu->cmd_buf_tail = tail;
906
907 /* Tell the IOMMU about it */
908 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
909}
910
830static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
911static void build_completion_wait(struct iommu_cmd *cmd,
912 struct amd_iommu *iommu,
913 u64 data)
831{
914{
832 u64 paddr = iommu_virt_to_phys((void *)address);
915 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
833
916
834 WARN_ON(address & 0x7ULL);
835
836 memset(cmd, 0, sizeof(*cmd));
837 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
838 cmd->data[1] = upper_32_bits(paddr);
917 memset(cmd, 0, sizeof(*cmd));
918 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
919 cmd->data[1] = upper_32_bits(paddr);
839 cmd->data[2] = 1;
920 cmd->data[2] = data;
840 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
841}
842
843static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
844{
845 memset(cmd, 0, sizeof(*cmd));
846 cmd->data[0] = devid;
847 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);

--- 56 unchanged lines hidden (view full) ---

904 cmd->data[1] = devid;
905 cmd->data[2] = lower_32_bits(address);
906 cmd->data[3] = upper_32_bits(address);
907 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
908 if (s)
909 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
910}
911
921 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
922}
923
924static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
925{
926 memset(cmd, 0, sizeof(*cmd));
927 cmd->data[0] = devid;
928 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);

--- 56 unchanged lines hidden (view full) ---

985 cmd->data[1] = devid;
986 cmd->data[2] = lower_32_bits(address);
987 cmd->data[3] = upper_32_bits(address);
988 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
989 if (s)
990 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
991}
992
912static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
993static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
913 u64 address, bool size)
914{
915 memset(cmd, 0, sizeof(*cmd));
916
917 address &= ~(0xfffULL);
918
919 cmd->data[0] = pasid;
920 cmd->data[1] = domid;
921 cmd->data[2] = lower_32_bits(address);
922 cmd->data[3] = upper_32_bits(address);
923 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
924 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
925 if (size)
926 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
927 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
928}
929
994 u64 address, bool size)
995{
996 memset(cmd, 0, sizeof(*cmd));
997
998 address &= ~(0xfffULL);
999
1000 cmd->data[0] = pasid;
1001 cmd->data[1] = domid;
1002 cmd->data[2] = lower_32_bits(address);
1003 cmd->data[3] = upper_32_bits(address);
1004 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1005 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1006 if (size)
1007 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
1008 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1009}
1010
930static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
1011static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
931 int qdep, u64 address, bool size)
932{
933 memset(cmd, 0, sizeof(*cmd));
934
935 address &= ~(0xfffULL);
936
937 cmd->data[0] = devid;
938 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
939 cmd->data[0] |= (qdep & 0xff) << 24;
940 cmd->data[1] = devid;
941 cmd->data[1] |= (pasid & 0xff) << 16;
942 cmd->data[2] = lower_32_bits(address);
943 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
944 cmd->data[3] = upper_32_bits(address);
945 if (size)
946 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
947 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
948}
949
1012 int qdep, u64 address, bool size)
1013{
1014 memset(cmd, 0, sizeof(*cmd));
1015
1016 address &= ~(0xfffULL);
1017
1018 cmd->data[0] = devid;
1019 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1020 cmd->data[0] |= (qdep & 0xff) << 24;
1021 cmd->data[1] = devid;
1022 cmd->data[1] |= (pasid & 0xff) << 16;
1023 cmd->data[2] = lower_32_bits(address);
1024 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1025 cmd->data[3] = upper_32_bits(address);
1026 if (size)
1027 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
1028 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1029}
1030
950static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
1031static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
951 int status, int tag, bool gn)
952{
953 memset(cmd, 0, sizeof(*cmd));
954
955 cmd->data[0] = devid;
956 if (gn) {
957 cmd->data[1] = pasid;
958 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;

--- 81 unchanged lines hidden (view full) ---

1040 * This function queues a completion wait command into the command
1041 * buffer of an IOMMU
1042 */
1043static int iommu_completion_wait(struct amd_iommu *iommu)
1044{
1045 struct iommu_cmd cmd;
1046 unsigned long flags;
1047 int ret;
1032 int status, int tag, bool gn)
1033{
1034 memset(cmd, 0, sizeof(*cmd));
1035
1036 cmd->data[0] = devid;
1037 if (gn) {
1038 cmd->data[1] = pasid;
1039 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;

--- 81 unchanged lines hidden (view full) ---

1121 * This function queues a completion wait command into the command
1122 * buffer of an IOMMU
1123 */
1124static int iommu_completion_wait(struct amd_iommu *iommu)
1125{
1126 struct iommu_cmd cmd;
1127 unsigned long flags;
1128 int ret;
1129 u64 data;
1048
1049 if (!iommu->need_sync)
1050 return 0;
1051
1130
1131 if (!iommu->need_sync)
1132 return 0;
1133
1052
1053 build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
1054
1055 raw_spin_lock_irqsave(&iommu->lock, flags);
1056
1134 raw_spin_lock_irqsave(&iommu->lock, flags);
1135
1057 iommu->cmd_sem = 0;
1136 data = ++iommu->cmd_sem_val;
1137 build_completion_wait(&cmd, iommu, data);
1058
1059 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1060 if (ret)
1061 goto out_unlock;
1062
1138
1139 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1140 if (ret)
1141 goto out_unlock;
1142
1063 ret = wait_on_sem(&iommu->cmd_sem);
1143 ret = wait_on_sem(iommu, data);
1064
1065out_unlock:
1066 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1067
1068 return ret;
1069}
1070
1071static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)

--- 1080 unchanged lines hidden (view full) ---

2152
2153 ret = iommu_init_device(dev);
2154 if (ret) {
2155 if (ret != -ENOTSUPP)
2156 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
2157 iommu_dev = ERR_PTR(ret);
2158 iommu_ignore_device(dev);
2159 } else {
1144
1145out_unlock:
1146 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1147
1148 return ret;
1149}
1150
1151static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)

--- 1080 unchanged lines hidden (view full) ---

2232
2233 ret = iommu_init_device(dev);
2234 if (ret) {
2235 if (ret != -ENOTSUPP)
2236 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
2237 iommu_dev = ERR_PTR(ret);
2238 iommu_ignore_device(dev);
2239 } else {
2240 amd_iommu_set_pci_msi_domain(dev, iommu);
2160 iommu_dev = &iommu->iommu;
2161 }
2162
2163 iommu_completion_wait(iommu);
2164
2165 return iommu_dev;
2166}
2167

--- 613 unchanged lines hidden (view full) ---

2781
2782out:
2783 spin_unlock_irqrestore(&domain->lock, flags);
2784
2785 return ret;
2786}
2787EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
2788
2241 iommu_dev = &iommu->iommu;
2242 }
2243
2244 iommu_completion_wait(iommu);
2245
2246 return iommu_dev;
2247}
2248

--- 613 unchanged lines hidden (view full) ---

2862
2863out:
2864 spin_unlock_irqrestore(&domain->lock, flags);
2865
2866 return ret;
2867}
2868EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
2869
2789static int __flush_pasid(struct protection_domain *domain, int pasid,
2870static int __flush_pasid(struct protection_domain *domain, u32 pasid,
2790 u64 address, bool size)
2791{
2792 struct iommu_dev_data *dev_data;
2793 struct iommu_cmd cmd;
2794 int i, ret;
2795
2796 if (!(domain->flags & PD_IOMMUV2_MASK))
2797 return -EINVAL;

--- 44 unchanged lines hidden (view full) ---

2842
2843 ret = 0;
2844
2845out:
2846
2847 return ret;
2848}
2849
2871 u64 address, bool size)
2872{
2873 struct iommu_dev_data *dev_data;
2874 struct iommu_cmd cmd;
2875 int i, ret;
2876
2877 if (!(domain->flags & PD_IOMMUV2_MASK))
2878 return -EINVAL;

--- 44 unchanged lines hidden (view full) ---

2923
2924 ret = 0;
2925
2926out:
2927
2928 return ret;
2929}
2930
2850static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
2931static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
2851 u64 address)
2852{
2853 return __flush_pasid(domain, pasid, address, false);
2854}
2855
2932 u64 address)
2933{
2934 return __flush_pasid(domain, pasid, address, false);
2935}
2936
2856int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
2937int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
2857 u64 address)
2858{
2859 struct protection_domain *domain = to_pdomain(dom);
2860 unsigned long flags;
2861 int ret;
2862
2863 spin_lock_irqsave(&domain->lock, flags);
2864 ret = __amd_iommu_flush_page(domain, pasid, address);
2865 spin_unlock_irqrestore(&domain->lock, flags);
2866
2867 return ret;
2868}
2869EXPORT_SYMBOL(amd_iommu_flush_page);
2870
2938 u64 address)
2939{
2940 struct protection_domain *domain = to_pdomain(dom);
2941 unsigned long flags;
2942 int ret;
2943
2944 spin_lock_irqsave(&domain->lock, flags);
2945 ret = __amd_iommu_flush_page(domain, pasid, address);
2946 spin_unlock_irqrestore(&domain->lock, flags);
2947
2948 return ret;
2949}
2950EXPORT_SYMBOL(amd_iommu_flush_page);
2951
2871static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
2952static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
2872{
2873 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2874 true);
2875}
2876
2953{
2954 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2955 true);
2956}
2957
2877int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
2958int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
2878{
2879 struct protection_domain *domain = to_pdomain(dom);
2880 unsigned long flags;
2881 int ret;
2882
2883 spin_lock_irqsave(&domain->lock, flags);
2884 ret = __amd_iommu_flush_tlb(domain, pasid);
2885 spin_unlock_irqrestore(&domain->lock, flags);
2886
2887 return ret;
2888}
2889EXPORT_SYMBOL(amd_iommu_flush_tlb);
2890
2959{
2960 struct protection_domain *domain = to_pdomain(dom);
2961 unsigned long flags;
2962 int ret;
2963
2964 spin_lock_irqsave(&domain->lock, flags);
2965 ret = __amd_iommu_flush_tlb(domain, pasid);
2966 spin_unlock_irqrestore(&domain->lock, flags);
2967
2968 return ret;
2969}
2970EXPORT_SYMBOL(amd_iommu_flush_tlb);
2971
2891static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
2972static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
2892{
2893 int index;
2894 u64 *pte;
2895
2896 while (true) {
2897
2898 index = (pasid >> (9 * level)) & 0x1ff;
2899 pte = &root[index];

--- 15 unchanged lines hidden (view full) ---

2915 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2916
2917 level -= 1;
2918 }
2919
2920 return pte;
2921}
2922
2973{
2974 int index;
2975 u64 *pte;
2976
2977 while (true) {
2978
2979 index = (pasid >> (9 * level)) & 0x1ff;
2980 pte = &root[index];

--- 15 unchanged lines hidden (view full) ---

2996 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2997
2998 level -= 1;
2999 }
3000
3001 return pte;
3002}
3003
2923static int __set_gcr3(struct protection_domain *domain, int pasid,
3004static int __set_gcr3(struct protection_domain *domain, u32 pasid,
2924 unsigned long cr3)
2925{
2926 struct domain_pgtable pgtable;
2927 u64 *pte;
2928
2929 amd_iommu_domain_get_pgtable(domain, &pgtable);
2930 if (pgtable.mode != PAGE_MODE_NONE)
2931 return -EINVAL;
2932
2933 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2934 if (pte == NULL)
2935 return -ENOMEM;
2936
2937 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2938
2939 return __amd_iommu_flush_tlb(domain, pasid);
2940}
2941
3005 unsigned long cr3)
3006{
3007 struct domain_pgtable pgtable;
3008 u64 *pte;
3009
3010 amd_iommu_domain_get_pgtable(domain, &pgtable);
3011 if (pgtable.mode != PAGE_MODE_NONE)
3012 return -EINVAL;
3013
3014 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3015 if (pte == NULL)
3016 return -ENOMEM;
3017
3018 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3019
3020 return __amd_iommu_flush_tlb(domain, pasid);
3021}
3022
2942static int __clear_gcr3(struct protection_domain *domain, int pasid)
3023static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
2943{
2944 struct domain_pgtable pgtable;
2945 u64 *pte;
2946
2947 amd_iommu_domain_get_pgtable(domain, &pgtable);
2948 if (pgtable.mode != PAGE_MODE_NONE)
2949 return -EINVAL;
2950
2951 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2952 if (pte == NULL)
2953 return 0;
2954
2955 *pte = 0;
2956
2957 return __amd_iommu_flush_tlb(domain, pasid);
2958}
2959
3024{
3025 struct domain_pgtable pgtable;
3026 u64 *pte;
3027
3028 amd_iommu_domain_get_pgtable(domain, &pgtable);
3029 if (pgtable.mode != PAGE_MODE_NONE)
3030 return -EINVAL;
3031
3032 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3033 if (pte == NULL)
3034 return 0;
3035
3036 *pte = 0;
3037
3038 return __amd_iommu_flush_tlb(domain, pasid);
3039}
3040
2960int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3041int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
2961 unsigned long cr3)
2962{
2963 struct protection_domain *domain = to_pdomain(dom);
2964 unsigned long flags;
2965 int ret;
2966
2967 spin_lock_irqsave(&domain->lock, flags);
2968 ret = __set_gcr3(domain, pasid, cr3);
2969 spin_unlock_irqrestore(&domain->lock, flags);
2970
2971 return ret;
2972}
2973EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
2974
3042 unsigned long cr3)
3043{
3044 struct protection_domain *domain = to_pdomain(dom);
3045 unsigned long flags;
3046 int ret;
3047
3048 spin_lock_irqsave(&domain->lock, flags);
3049 ret = __set_gcr3(domain, pasid, cr3);
3050 spin_unlock_irqrestore(&domain->lock, flags);
3051
3052 return ret;
3053}
3054EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3055
2975int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3056int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
2976{
2977 struct protection_domain *domain = to_pdomain(dom);
2978 unsigned long flags;
2979 int ret;
2980
2981 spin_lock_irqsave(&domain->lock, flags);
2982 ret = __clear_gcr3(domain, pasid);
2983 spin_unlock_irqrestore(&domain->lock, flags);
2984
2985 return ret;
2986}
2987EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
2988
3057{
3058 struct protection_domain *domain = to_pdomain(dom);
3059 unsigned long flags;
3060 int ret;
3061
3062 spin_lock_irqsave(&domain->lock, flags);
3063 ret = __clear_gcr3(domain, pasid);
3064 spin_unlock_irqrestore(&domain->lock, flags);
3065
3066 return ret;
3067}
3068EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3069
2989int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3070int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
2990 int status, int tag)
2991{
2992 struct iommu_dev_data *dev_data;
2993 struct amd_iommu *iommu;
2994 struct iommu_cmd cmd;
2995
2996 dev_data = dev_iommu_priv_get(&pdev->dev);
2997 iommu = amd_iommu_rlookup_table[dev_data->devid];

--- 516 unchanged lines hidden (view full) ---

3514 struct irte_ga *irte = &ptr[index];
3515
3516 memset(&irte->lo.val, 0, sizeof(u64));
3517 memset(&irte->hi.val, 0, sizeof(u64));
3518}
3519
3520static int get_devid(struct irq_alloc_info *info)
3521{
3071 int status, int tag)
3072{
3073 struct iommu_dev_data *dev_data;
3074 struct amd_iommu *iommu;
3075 struct iommu_cmd cmd;
3076
3077 dev_data = dev_iommu_priv_get(&pdev->dev);
3078 iommu = amd_iommu_rlookup_table[dev_data->devid];

--- 516 unchanged lines hidden (view full) ---

3595 struct irte_ga *irte = &ptr[index];
3596
3597 memset(&irte->lo.val, 0, sizeof(u64));
3598 memset(&irte->hi.val, 0, sizeof(u64));
3599}
3600
3601static int get_devid(struct irq_alloc_info *info)
3602{
3522 int devid = -1;
3523
3524 switch (info->type) {
3525 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3603 switch (info->type) {
3604 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3526 devid = get_ioapic_devid(info->ioapic_id);
3527 break;
3605 case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
3606 return get_ioapic_devid(info->devid);
3528 case X86_IRQ_ALLOC_TYPE_HPET:
3607 case X86_IRQ_ALLOC_TYPE_HPET:
3529 devid = get_hpet_devid(info->hpet_id);
3530 break;
3531 case X86_IRQ_ALLOC_TYPE_MSI:
3532 case X86_IRQ_ALLOC_TYPE_MSIX:
3533 devid = get_device_id(&info->msi_dev->dev);
3534 break;
3608 case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
3609 return get_hpet_devid(info->devid);
3610 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3611 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3612 return get_device_id(msi_desc_to_dev(info->desc));
3535 default:
3613 default:
3536 BUG_ON(1);
3537 break;
3614 WARN_ON_ONCE(1);
3615 return -1;
3538 }
3616 }
3539
3540 return devid;
3541}
3542
3617}
3618
3543static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
3619static struct irq_domain *get_irq_domain_for_devid(struct irq_alloc_info *info,
3620 int devid)
3544{
3621{
3545 struct amd_iommu *iommu;
3546 int devid;
3622 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3547
3623
3548 if (!info)
3624 if (!iommu)
3549 return NULL;
3550
3625 return NULL;
3626
3551 devid = get_devid(info);
3552 if (devid >= 0) {
3553 iommu = amd_iommu_rlookup_table[devid];
3554 if (iommu)
3555 return iommu->ir_domain;
3627 switch (info->type) {
3628 case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
3629 case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
3630 return iommu->ir_domain;
3631 default:
3632 WARN_ON_ONCE(1);
3633 return NULL;
3556 }
3634 }
3557
3558 return NULL;
3559}
3560
3561static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3562{
3635}
3636
3637static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3638{
3563 struct amd_iommu *iommu;
3564 int devid;
3565
3566 if (!info)
3567 return NULL;
3568
3639 int devid;
3640
3641 if (!info)
3642 return NULL;
3643
3569 switch (info->type) {
3570 case X86_IRQ_ALLOC_TYPE_MSI:
3571 case X86_IRQ_ALLOC_TYPE_MSIX:
3572 devid = get_device_id(&info->msi_dev->dev);
3573 if (devid < 0)
3574 return NULL;
3575
3576 iommu = amd_iommu_rlookup_table[devid];
3577 if (iommu)
3578 return iommu->msi_domain;
3579 break;
3580 default:
3581 break;
3582 }
3583
3584 return NULL;
3644 devid = get_devid(info);
3645 if (devid < 0)
3646 return NULL;
3647 return get_irq_domain_for_devid(info, devid);
3585}
3586
3587struct irq_remap_ops amd_iommu_irq_ops = {
3588 .prepare = amd_iommu_prepare,
3589 .enable = amd_iommu_enable,
3590 .disable = amd_iommu_disable,
3591 .reenable = amd_iommu_reenable,
3592 .enable_faulting = amd_iommu_enable_faulting,
3648}
3649
3650struct irq_remap_ops amd_iommu_irq_ops = {
3651 .prepare = amd_iommu_prepare,
3652 .enable = amd_iommu_enable,
3653 .disable = amd_iommu_disable,
3654 .reenable = amd_iommu_reenable,
3655 .enable_faulting = amd_iommu_enable_faulting,
3593 .get_ir_irq_domain = get_ir_irq_domain,
3594 .get_irq_domain = get_irq_domain,
3595};
3596
3597static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3598 struct irq_cfg *irq_cfg,
3599 struct irq_alloc_info *info,
3600 int devid, int index, int sub_handle)
3601{
3602 struct irq_2_irte *irte_info = &data->irq_2_irte;
3603 struct msi_msg *msg = &data->msi_entry;
3604 struct IO_APIC_route_entry *entry;
3605 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3606
3607 if (!iommu)
3608 return;
3609
3610 data->irq_2_irte.devid = devid;
3611 data->irq_2_irte.index = index + sub_handle;
3656 .get_irq_domain = get_irq_domain,
3657};
3658
3659static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3660 struct irq_cfg *irq_cfg,
3661 struct irq_alloc_info *info,
3662 int devid, int index, int sub_handle)
3663{
3664 struct irq_2_irte *irte_info = &data->irq_2_irte;
3665 struct msi_msg *msg = &data->msi_entry;
3666 struct IO_APIC_route_entry *entry;
3667 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3668
3669 if (!iommu)
3670 return;
3671
3672 data->irq_2_irte.devid = devid;
3673 data->irq_2_irte.index = index + sub_handle;
3612 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
3674 iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
3613 apic->irq_dest_mode, irq_cfg->vector,
3614 irq_cfg->dest_apicid, devid);
3615
3616 switch (info->type) {
3617 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3618 /* Setup IOAPIC entry */
3675 apic->irq_dest_mode, irq_cfg->vector,
3676 irq_cfg->dest_apicid, devid);
3677
3678 switch (info->type) {
3679 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3680 /* Setup IOAPIC entry */
3619 entry = info->ioapic_entry;
3620 info->ioapic_entry = NULL;
3681 entry = info->ioapic.entry;
3682 info->ioapic.entry = NULL;
3621 memset(entry, 0, sizeof(*entry));
3622 entry->vector = index;
3623 entry->mask = 0;
3683 memset(entry, 0, sizeof(*entry));
3684 entry->vector = index;
3685 entry->mask = 0;
3624 entry->trigger = info->ioapic_trigger;
3625 entry->polarity = info->ioapic_polarity;
3686 entry->trigger = info->ioapic.trigger;
3687 entry->polarity = info->ioapic.polarity;
3626 /* Mask level triggered irqs. */
3688 /* Mask level triggered irqs. */
3627 if (info->ioapic_trigger)
3689 if (info->ioapic.trigger)
3628 entry->mask = 1;
3629 break;
3630
3631 case X86_IRQ_ALLOC_TYPE_HPET:
3690 entry->mask = 1;
3691 break;
3692
3693 case X86_IRQ_ALLOC_TYPE_HPET:
3632 case X86_IRQ_ALLOC_TYPE_MSI:
3633 case X86_IRQ_ALLOC_TYPE_MSIX:
3694 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3695 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3634 msg->address_hi = MSI_ADDR_BASE_HI;
3635 msg->address_lo = MSI_ADDR_BASE_LO;
3636 msg->data = irte_info->index;
3637 break;
3638
3639 default:
3640 BUG_ON(1);
3641 break;

--- 27 unchanged lines hidden (view full) ---

3669 struct irq_data *irq_data;
3670 struct amd_ir_data *data = NULL;
3671 struct irq_cfg *cfg;
3672 int i, ret, devid;
3673 int index;
3674
3675 if (!info)
3676 return -EINVAL;
3696 msg->address_hi = MSI_ADDR_BASE_HI;
3697 msg->address_lo = MSI_ADDR_BASE_LO;
3698 msg->data = irte_info->index;
3699 break;
3700
3701 default:
3702 BUG_ON(1);
3703 break;

--- 27 unchanged lines hidden (view full) ---

3731 struct irq_data *irq_data;
3732 struct amd_ir_data *data = NULL;
3733 struct irq_cfg *cfg;
3734 int i, ret, devid;
3735 int index;
3736
3737 if (!info)
3738 return -EINVAL;
3677 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
3678 info->type != X86_IRQ_ALLOC_TYPE_MSIX)
3739 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
3740 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
3679 return -EINVAL;
3680
3681 /*
3682 * With IRQ remapping enabled, don't need contiguous CPU vectors
3683 * to support multiple MSI interrupts.
3684 */
3741 return -EINVAL;
3742
3743 /*
3744 * With IRQ remapping enabled, don't need contiguous CPU vectors
3745 * to support multiple MSI interrupts.
3746 */
3685 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
3747 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
3686 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3687
3688 devid = get_devid(info);
3689 if (devid < 0)
3690 return -EINVAL;
3691
3692 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3693 if (ret < 0)

--- 11 unchanged lines hidden (view full) ---

3705 * interrupts.
3706 */
3707 table->min_index = 32;
3708 iommu = amd_iommu_rlookup_table[devid];
3709 for (i = 0; i < 32; ++i)
3710 iommu->irte_ops->set_allocated(table, i);
3711 }
3712 WARN_ON(table->min_index != 32);
3748 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3749
3750 devid = get_devid(info);
3751 if (devid < 0)
3752 return -EINVAL;
3753
3754 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3755 if (ret < 0)

--- 11 unchanged lines hidden (view full) ---

3767 * interrupts.
3768 */
3769 table->min_index = 32;
3770 iommu = amd_iommu_rlookup_table[devid];
3771 for (i = 0; i < 32; ++i)
3772 iommu->irte_ops->set_allocated(table, i);
3773 }
3774 WARN_ON(table->min_index != 32);
3713 index = info->ioapic_pin;
3775 index = info->ioapic.pin;
3714 } else {
3715 index = -ENOMEM;
3716 }
3776 } else {
3777 index = -ENOMEM;
3778 }
3717 } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
3718 info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
3719 bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
3779 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3780 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3781 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3720
3782
3721 index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
3783 index = alloc_irq_index(devid, nr_irqs, align,
3784 msi_desc_to_pci_dev(info->desc));
3722 } else {
3723 index = alloc_irq_index(devid, nr_irqs, false, NULL);
3724 }
3725
3726 if (index < 0) {
3727 pr_warn("Failed to allocate IRTE\n");
3728 ret = index;
3729 goto out_free_parent;
3730 }
3731
3732 for (i = 0; i < nr_irqs; i++) {
3733 irq_data = irq_domain_get_irq_data(domain, virq + i);
3785 } else {
3786 index = alloc_irq_index(devid, nr_irqs, false, NULL);
3787 }
3788
3789 if (index < 0) {
3790 pr_warn("Failed to allocate IRTE\n");
3791 ret = index;
3792 goto out_free_parent;
3793 }
3794
3795 for (i = 0; i < nr_irqs; i++) {
3796 irq_data = irq_domain_get_irq_data(domain, virq + i);
3734 cfg = irqd_cfg(irq_data);
3735 if (!irq_data || !cfg) {
3797 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3798 if (!cfg) {
3736 ret = -EINVAL;
3737 goto out_free_data;
3738 }
3739
3740 ret = -ENOMEM;
3741 data = kzalloc(sizeof(*data), GFP_KERNEL);
3742 if (!data)
3743 goto out_free_data;

--- 132 unchanged lines hidden (view full) ---

3876
3877 valid = entry->lo.fields_remap.valid;
3878
3879 entry->lo.val = 0;
3880 entry->hi.val = 0;
3881
3882 entry->lo.fields_remap.valid = valid;
3883 entry->lo.fields_remap.dm = apic->irq_dest_mode;
3799 ret = -EINVAL;
3800 goto out_free_data;
3801 }
3802
3803 ret = -ENOMEM;
3804 data = kzalloc(sizeof(*data), GFP_KERNEL);
3805 if (!data)
3806 goto out_free_data;

--- 132 unchanged lines hidden (view full) ---

3939
3940 valid = entry->lo.fields_remap.valid;
3941
3942 entry->lo.val = 0;
3943 entry->hi.val = 0;
3944
3945 entry->lo.fields_remap.valid = valid;
3946 entry->lo.fields_remap.dm = apic->irq_dest_mode;
3884 entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
3947 entry->lo.fields_remap.int_type = apic->delivery_mode;
3885 entry->hi.fields.vector = cfg->vector;
3886 entry->lo.fields_remap.destination =
3887 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3888 entry->hi.fields.destination =
3889 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3890
3891 return modify_irte_ga(ir_data->irq_2_irte.devid,
3892 ir_data->irq_2_irte.index, entry, ir_data);

--- 181 unchanged lines hidden ---
3948 entry->hi.fields.vector = cfg->vector;
3949 entry->lo.fields_remap.destination =
3950 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3951 entry->hi.fields.destination =
3952 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3953
3954 return modify_irte_ga(ir_data->irq_2_irte.devid,
3955 ir_data->irq_2_irte.index, entry, ir_data);

--- 181 unchanged lines hidden ---