1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtual I/O topology 4 * 5 * The Virtual I/O Translation Table (VIOT) describes the topology of 6 * para-virtual IOMMUs and the endpoints they manage. The OS uses it to 7 * initialize devices in the right order, preventing endpoints from issuing DMA 8 * before their IOMMU is ready. 9 * 10 * When binding a driver to a device, before calling the device driver's probe() 11 * method, the driver infrastructure calls dma_configure(). At that point the 12 * VIOT driver looks for an IOMMU associated to the device in the VIOT table. 13 * If an IOMMU exists and has been initialized, the VIOT driver initializes the 14 * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU 15 * ops when the device driver configures DMA mappings. If an IOMMU exists and 16 * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing 17 * the device until the IOMMU is available. 18 */ 19 #define pr_fmt(fmt) "ACPI: VIOT: " fmt 20 21 #include <linux/acpi_viot.h> 22 #include <linux/dma-iommu.h> 23 #include <linux/fwnode.h> 24 #include <linux/iommu.h> 25 #include <linux/list.h> 26 #include <linux/pci.h> 27 #include <linux/platform_device.h> 28 29 struct viot_iommu { 30 /* Node offset within the table */ 31 unsigned int offset; 32 struct fwnode_handle *fwnode; 33 struct list_head list; 34 }; 35 36 struct viot_endpoint { 37 union { 38 /* PCI range */ 39 struct { 40 u16 segment_start; 41 u16 segment_end; 42 u16 bdf_start; 43 u16 bdf_end; 44 }; 45 /* MMIO */ 46 u64 address; 47 }; 48 u32 endpoint_id; 49 struct viot_iommu *viommu; 50 struct list_head list; 51 }; 52 53 static struct acpi_table_viot *viot; 54 static LIST_HEAD(viot_iommus); 55 static LIST_HEAD(viot_pci_ranges); 56 static LIST_HEAD(viot_mmio_endpoints); 57 58 static int __init viot_check_bounds(const struct acpi_viot_header *hdr) 59 { 60 struct acpi_viot_header *start, *end, *hdr_end; 61 62 start = ACPI_ADD_PTR(struct acpi_viot_header, viot, 63 max_t(size_t, sizeof(*viot), viot->node_offset)); 64 end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length); 65 hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr)); 66 67 if (hdr < start || hdr_end > end) { 68 pr_err(FW_BUG "Node pointer overflows\n"); 69 return -EOVERFLOW; 70 } 71 if (hdr->length < sizeof(*hdr)) { 72 pr_err(FW_BUG "Empty node\n"); 73 return -EINVAL; 74 } 75 return 0; 76 } 77 78 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu, 79 u16 segment, u16 bdf) 80 { 81 struct pci_dev *pdev; 82 struct fwnode_handle *fwnode; 83 84 pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf), 85 bdf & 0xff); 86 if (!pdev) { 87 pr_err("Could not find PCI IOMMU\n"); 88 return -ENODEV; 89 } 90 91 fwnode = dev_fwnode(&pdev->dev); 92 if (!fwnode) { 93 /* 94 * PCI devices aren't necessarily described by ACPI. Create a 95 * fwnode so the IOMMU subsystem can identify this device. 96 */ 97 fwnode = acpi_alloc_fwnode_static(); 98 if (!fwnode) { 99 pci_dev_put(pdev); 100 return -ENOMEM; 101 } 102 set_primary_fwnode(&pdev->dev, fwnode); 103 } 104 viommu->fwnode = dev_fwnode(&pdev->dev); 105 pci_dev_put(pdev); 106 return 0; 107 } 108 109 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu, 110 u64 address) 111 { 112 struct acpi_device *adev; 113 struct resource res = { 114 .start = address, 115 .end = address, 116 .flags = IORESOURCE_MEM, 117 }; 118 119 adev = acpi_resource_consumer(&res); 120 if (!adev) { 121 pr_err("Could not find MMIO IOMMU\n"); 122 return -EINVAL; 123 } 124 viommu->fwnode = &adev->fwnode; 125 return 0; 126 } 127 128 static struct viot_iommu * __init viot_get_iommu(unsigned int offset) 129 { 130 int ret; 131 struct viot_iommu *viommu; 132 struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header, 133 viot, offset); 134 union { 135 struct acpi_viot_virtio_iommu_pci pci; 136 struct acpi_viot_virtio_iommu_mmio mmio; 137 } *node = (void *)hdr; 138 139 list_for_each_entry(viommu, &viot_iommus, list) 140 if (viommu->offset == offset) 141 return viommu; 142 143 if (viot_check_bounds(hdr)) 144 return NULL; 145 146 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL); 147 if (!viommu) 148 return NULL; 149 150 viommu->offset = offset; 151 switch (hdr->type) { 152 case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI: 153 if (hdr->length < sizeof(node->pci)) 154 goto err_free; 155 156 ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment, 157 node->pci.bdf); 158 break; 159 case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO: 160 if (hdr->length < sizeof(node->mmio)) 161 goto err_free; 162 163 ret = viot_get_mmio_iommu_fwnode(viommu, 164 node->mmio.base_address); 165 break; 166 default: 167 ret = -EINVAL; 168 } 169 if (ret) 170 goto err_free; 171 172 list_add(&viommu->list, &viot_iommus); 173 return viommu; 174 175 err_free: 176 kfree(viommu); 177 return NULL; 178 } 179 180 static int __init viot_parse_node(const struct acpi_viot_header *hdr) 181 { 182 int ret = -EINVAL; 183 struct list_head *list; 184 struct viot_endpoint *ep; 185 union { 186 struct acpi_viot_mmio mmio; 187 struct acpi_viot_pci_range pci; 188 } *node = (void *)hdr; 189 190 if (viot_check_bounds(hdr)) 191 return -EINVAL; 192 193 if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI || 194 hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO) 195 return 0; 196 197 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 198 if (!ep) 199 return -ENOMEM; 200 201 switch (hdr->type) { 202 case ACPI_VIOT_NODE_PCI_RANGE: 203 if (hdr->length < sizeof(node->pci)) { 204 pr_err(FW_BUG "Invalid PCI node size\n"); 205 goto err_free; 206 } 207 208 ep->segment_start = node->pci.segment_start; 209 ep->segment_end = node->pci.segment_end; 210 ep->bdf_start = node->pci.bdf_start; 211 ep->bdf_end = node->pci.bdf_end; 212 ep->endpoint_id = node->pci.endpoint_start; 213 ep->viommu = viot_get_iommu(node->pci.output_node); 214 list = &viot_pci_ranges; 215 break; 216 case ACPI_VIOT_NODE_MMIO: 217 if (hdr->length < sizeof(node->mmio)) { 218 pr_err(FW_BUG "Invalid MMIO node size\n"); 219 goto err_free; 220 } 221 222 ep->address = node->mmio.base_address; 223 ep->endpoint_id = node->mmio.endpoint; 224 ep->viommu = viot_get_iommu(node->mmio.output_node); 225 list = &viot_mmio_endpoints; 226 break; 227 default: 228 pr_warn("Unsupported node %x\n", hdr->type); 229 ret = 0; 230 goto err_free; 231 } 232 233 if (!ep->viommu) { 234 pr_warn("No IOMMU node found\n"); 235 /* 236 * A future version of the table may use the node for other 237 * purposes. Keep parsing. 238 */ 239 ret = 0; 240 goto err_free; 241 } 242 243 list_add(&ep->list, list); 244 return 0; 245 246 err_free: 247 kfree(ep); 248 return ret; 249 } 250 251 /** 252 * acpi_viot_early_init - Test the presence of VIOT and enable ACS 253 * 254 * If the VIOT does exist, ACS must be enabled. This cannot be 255 * done in acpi_viot_init() which is called after the bus scan 256 */ 257 void __init acpi_viot_early_init(void) 258 { 259 #ifdef CONFIG_PCI 260 acpi_status status; 261 struct acpi_table_header *hdr; 262 263 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); 264 if (ACPI_FAILURE(status)) 265 return; 266 pci_request_acs(); 267 acpi_put_table(hdr); 268 #endif 269 } 270 271 /** 272 * acpi_viot_init - Parse the VIOT table 273 * 274 * Parse the VIOT table, prepare the list of endpoints to be used during DMA 275 * setup of devices. 276 */ 277 void __init acpi_viot_init(void) 278 { 279 int i; 280 acpi_status status; 281 struct acpi_table_header *hdr; 282 struct acpi_viot_header *node; 283 284 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); 285 if (ACPI_FAILURE(status)) { 286 if (status != AE_NOT_FOUND) { 287 const char *msg = acpi_format_exception(status); 288 289 pr_err("Failed to get table, %s\n", msg); 290 } 291 return; 292 } 293 294 viot = (void *)hdr; 295 296 node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset); 297 for (i = 0; i < viot->node_count; i++) { 298 if (viot_parse_node(node)) 299 return; 300 301 node = ACPI_ADD_PTR(struct acpi_viot_header, node, 302 node->length); 303 } 304 305 acpi_put_table(hdr); 306 } 307 308 static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu, 309 u32 epid) 310 { 311 const struct iommu_ops *ops; 312 313 if (!viommu) 314 return -ENODEV; 315 316 /* We're not translating ourself */ 317 if (device_match_fwnode(dev, viommu->fwnode)) 318 return -EINVAL; 319 320 ops = iommu_ops_from_fwnode(viommu->fwnode); 321 if (!ops) 322 return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ? 323 -EPROBE_DEFER : -ENODEV; 324 325 return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops); 326 } 327 328 static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) 329 { 330 u32 epid; 331 struct viot_endpoint *ep; 332 u32 domain_nr = pci_domain_nr(pdev->bus); 333 334 list_for_each_entry(ep, &viot_pci_ranges, list) { 335 if (domain_nr >= ep->segment_start && 336 domain_nr <= ep->segment_end && 337 dev_id >= ep->bdf_start && 338 dev_id <= ep->bdf_end) { 339 epid = ((domain_nr - ep->segment_start) << 16) + 340 dev_id - ep->bdf_start + ep->endpoint_id; 341 342 return viot_dev_iommu_init(&pdev->dev, ep->viommu, 343 epid); 344 } 345 } 346 return -ENODEV; 347 } 348 349 static int viot_mmio_dev_iommu_init(struct platform_device *pdev) 350 { 351 struct resource *mem; 352 struct viot_endpoint *ep; 353 354 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 355 if (!mem) 356 return -ENODEV; 357 358 list_for_each_entry(ep, &viot_mmio_endpoints, list) { 359 if (ep->address == mem->start) 360 return viot_dev_iommu_init(&pdev->dev, ep->viommu, 361 ep->endpoint_id); 362 } 363 return -ENODEV; 364 } 365 366 /** 367 * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT 368 * @dev: the endpoint 369 * 370 * Return: 0 on success, <0 on failure 371 */ 372 int viot_iommu_configure(struct device *dev) 373 { 374 if (dev_is_pci(dev)) 375 return pci_for_each_dma_alias(to_pci_dev(dev), 376 viot_pci_dev_iommu_init, NULL); 377 else if (dev_is_platform(dev)) 378 return viot_mmio_dev_iommu_init(to_platform_device(dev)); 379 return -ENODEV; 380 } 381