1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for s390 PCI devices 4 * 5 * Copyright IBM Corp. 2015 6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/iommu.h> 11 #include <linux/iommu-helper.h> 12 #include <linux/sizes.h> 13 #include <asm/pci_dma.h> 14 15 /* 16 * Physically contiguous memory regions can be mapped with 4 KiB alignment, 17 * we allow all page sizes that are an order of 4KiB (no special large page 18 * support so far). 19 */ 20 #define S390_IOMMU_PGSIZES (~0xFFFUL) 21 22 static const struct iommu_ops s390_iommu_ops; 23 24 struct s390_domain { 25 struct iommu_domain domain; 26 struct list_head devices; 27 unsigned long *dma_table; 28 spinlock_t dma_table_lock; 29 spinlock_t list_lock; 30 }; 31 32 struct s390_domain_device { 33 struct list_head list; 34 struct zpci_dev *zdev; 35 }; 36 37 static struct s390_domain *to_s390_domain(struct iommu_domain *dom) 38 { 39 return container_of(dom, struct s390_domain, domain); 40 } 41 42 static bool s390_iommu_capable(enum iommu_cap cap) 43 { 44 switch (cap) { 45 case IOMMU_CAP_CACHE_COHERENCY: 46 return true; 47 case IOMMU_CAP_INTR_REMAP: 48 return true; 49 default: 50 return false; 51 } 52 } 53 54 static struct iommu_domain *s390_domain_alloc(unsigned domain_type) 55 { 56 struct s390_domain *s390_domain; 57 58 if (domain_type != IOMMU_DOMAIN_UNMANAGED) 59 return NULL; 60 61 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); 62 if (!s390_domain) 63 return NULL; 64 65 s390_domain->dma_table = dma_alloc_cpu_table(); 66 if (!s390_domain->dma_table) { 67 kfree(s390_domain); 68 return NULL; 69 } 70 71 spin_lock_init(&s390_domain->dma_table_lock); 72 spin_lock_init(&s390_domain->list_lock); 73 INIT_LIST_HEAD(&s390_domain->devices); 74 75 return &s390_domain->domain; 76 } 77 78 static void s390_domain_free(struct iommu_domain *domain) 79 { 80 struct s390_domain *s390_domain = to_s390_domain(domain); 81 82 dma_cleanup_tables(s390_domain->dma_table); 83 kfree(s390_domain); 84 } 85 86 static int s390_iommu_attach_device(struct iommu_domain *domain, 87 struct device *dev) 88 { 89 struct s390_domain *s390_domain = to_s390_domain(domain); 90 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 91 struct s390_domain_device *domain_device; 92 unsigned long flags; 93 int rc; 94 95 if (!zdev) 96 return -ENODEV; 97 98 domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL); 99 if (!domain_device) 100 return -ENOMEM; 101 102 if (zdev->dma_table) 103 zpci_dma_exit_device(zdev); 104 105 zdev->dma_table = s390_domain->dma_table; 106 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 107 (u64) zdev->dma_table); 108 if (rc) 109 goto out_restore; 110 111 spin_lock_irqsave(&s390_domain->list_lock, flags); 112 /* First device defines the DMA range limits */ 113 if (list_empty(&s390_domain->devices)) { 114 domain->geometry.aperture_start = zdev->start_dma; 115 domain->geometry.aperture_end = zdev->end_dma; 116 domain->geometry.force_aperture = true; 117 /* Allow only devices with identical DMA range limits */ 118 } else if (domain->geometry.aperture_start != zdev->start_dma || 119 domain->geometry.aperture_end != zdev->end_dma) { 120 rc = -EINVAL; 121 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 122 goto out_restore; 123 } 124 domain_device->zdev = zdev; 125 zdev->s390_domain = s390_domain; 126 list_add(&domain_device->list, &s390_domain->devices); 127 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 128 129 return 0; 130 131 out_restore: 132 zpci_dma_init_device(zdev); 133 kfree(domain_device); 134 135 return rc; 136 } 137 138 static void s390_iommu_detach_device(struct iommu_domain *domain, 139 struct device *dev) 140 { 141 struct s390_domain *s390_domain = to_s390_domain(domain); 142 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 143 struct s390_domain_device *domain_device, *tmp; 144 unsigned long flags; 145 int found = 0; 146 147 if (!zdev) 148 return; 149 150 spin_lock_irqsave(&s390_domain->list_lock, flags); 151 list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices, 152 list) { 153 if (domain_device->zdev == zdev) { 154 list_del(&domain_device->list); 155 kfree(domain_device); 156 found = 1; 157 break; 158 } 159 } 160 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 161 162 if (found) { 163 zdev->s390_domain = NULL; 164 zpci_unregister_ioat(zdev, 0); 165 zpci_dma_init_device(zdev); 166 } 167 } 168 169 static int s390_iommu_add_device(struct device *dev) 170 { 171 struct iommu_group *group = iommu_group_get_for_dev(dev); 172 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 173 174 if (IS_ERR(group)) 175 return PTR_ERR(group); 176 177 iommu_group_put(group); 178 iommu_device_link(&zdev->iommu_dev, dev); 179 180 return 0; 181 } 182 183 static void s390_iommu_remove_device(struct device *dev) 184 { 185 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 186 struct iommu_domain *domain; 187 188 /* 189 * This is a workaround for a scenario where the IOMMU API common code 190 * "forgets" to call the detach_dev callback: After binding a device 191 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers 192 * the attach_dev), removing the device via 193 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev, 194 * only remove_device will be called via the BUS_NOTIFY_REMOVED_DEVICE 195 * notifier. 196 * 197 * So let's call detach_dev from here if it hasn't been called before. 198 */ 199 if (zdev && zdev->s390_domain) { 200 domain = iommu_get_domain_for_dev(dev); 201 if (domain) 202 s390_iommu_detach_device(domain, dev); 203 } 204 205 iommu_device_unlink(&zdev->iommu_dev, dev); 206 iommu_group_remove_device(dev); 207 } 208 209 static int s390_iommu_update_trans(struct s390_domain *s390_domain, 210 unsigned long pa, dma_addr_t dma_addr, 211 size_t size, int flags) 212 { 213 struct s390_domain_device *domain_device; 214 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 215 dma_addr_t start_dma_addr = dma_addr; 216 unsigned long irq_flags, nr_pages, i; 217 unsigned long *entry; 218 int rc = 0; 219 220 if (dma_addr < s390_domain->domain.geometry.aperture_start || 221 dma_addr + size > s390_domain->domain.geometry.aperture_end) 222 return -EINVAL; 223 224 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 225 if (!nr_pages) 226 return 0; 227 228 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 229 for (i = 0; i < nr_pages; i++) { 230 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); 231 if (!entry) { 232 rc = -ENOMEM; 233 goto undo_cpu_trans; 234 } 235 dma_update_cpu_trans(entry, page_addr, flags); 236 page_addr += PAGE_SIZE; 237 dma_addr += PAGE_SIZE; 238 } 239 240 spin_lock(&s390_domain->list_lock); 241 list_for_each_entry(domain_device, &s390_domain->devices, list) { 242 rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32, 243 start_dma_addr, nr_pages * PAGE_SIZE); 244 if (rc) 245 break; 246 } 247 spin_unlock(&s390_domain->list_lock); 248 249 undo_cpu_trans: 250 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { 251 flags = ZPCI_PTE_INVALID; 252 while (i-- > 0) { 253 page_addr -= PAGE_SIZE; 254 dma_addr -= PAGE_SIZE; 255 entry = dma_walk_cpu_trans(s390_domain->dma_table, 256 dma_addr); 257 if (!entry) 258 break; 259 dma_update_cpu_trans(entry, page_addr, flags); 260 } 261 } 262 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 263 264 return rc; 265 } 266 267 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, 268 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 269 { 270 struct s390_domain *s390_domain = to_s390_domain(domain); 271 int flags = ZPCI_PTE_VALID, rc = 0; 272 273 if (!(prot & IOMMU_READ)) 274 return -EINVAL; 275 276 if (!(prot & IOMMU_WRITE)) 277 flags |= ZPCI_TABLE_PROTECTED; 278 279 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, 280 size, flags); 281 282 return rc; 283 } 284 285 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, 286 dma_addr_t iova) 287 { 288 struct s390_domain *s390_domain = to_s390_domain(domain); 289 unsigned long *sto, *pto, *rto, flags; 290 unsigned int rtx, sx, px; 291 phys_addr_t phys = 0; 292 293 if (iova < domain->geometry.aperture_start || 294 iova > domain->geometry.aperture_end) 295 return 0; 296 297 rtx = calc_rtx(iova); 298 sx = calc_sx(iova); 299 px = calc_px(iova); 300 rto = s390_domain->dma_table; 301 302 spin_lock_irqsave(&s390_domain->dma_table_lock, flags); 303 if (rto && reg_entry_isvalid(rto[rtx])) { 304 sto = get_rt_sto(rto[rtx]); 305 if (sto && reg_entry_isvalid(sto[sx])) { 306 pto = get_st_pto(sto[sx]); 307 if (pto && pt_entry_isvalid(pto[px])) 308 phys = pto[px] & ZPCI_PTE_ADDR_MASK; 309 } 310 } 311 spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags); 312 313 return phys; 314 } 315 316 static size_t s390_iommu_unmap(struct iommu_domain *domain, 317 unsigned long iova, size_t size, 318 struct iommu_iotlb_gather *gather) 319 { 320 struct s390_domain *s390_domain = to_s390_domain(domain); 321 int flags = ZPCI_PTE_INVALID; 322 phys_addr_t paddr; 323 int rc; 324 325 paddr = s390_iommu_iova_to_phys(domain, iova); 326 if (!paddr) 327 return 0; 328 329 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, 330 size, flags); 331 if (rc) 332 return 0; 333 334 return size; 335 } 336 337 int zpci_init_iommu(struct zpci_dev *zdev) 338 { 339 int rc = 0; 340 341 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL, 342 "s390-iommu.%08x", zdev->fid); 343 if (rc) 344 goto out_err; 345 346 iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops); 347 348 rc = iommu_device_register(&zdev->iommu_dev); 349 if (rc) 350 goto out_sysfs; 351 352 return 0; 353 354 out_sysfs: 355 iommu_device_sysfs_remove(&zdev->iommu_dev); 356 357 out_err: 358 return rc; 359 } 360 361 void zpci_destroy_iommu(struct zpci_dev *zdev) 362 { 363 iommu_device_unregister(&zdev->iommu_dev); 364 iommu_device_sysfs_remove(&zdev->iommu_dev); 365 } 366 367 static const struct iommu_ops s390_iommu_ops = { 368 .capable = s390_iommu_capable, 369 .domain_alloc = s390_domain_alloc, 370 .domain_free = s390_domain_free, 371 .attach_dev = s390_iommu_attach_device, 372 .detach_dev = s390_iommu_detach_device, 373 .map = s390_iommu_map, 374 .unmap = s390_iommu_unmap, 375 .iova_to_phys = s390_iommu_iova_to_phys, 376 .add_device = s390_iommu_add_device, 377 .remove_device = s390_iommu_remove_device, 378 .device_group = generic_device_group, 379 .pgsize_bitmap = S390_IOMMU_PGSIZES, 380 }; 381 382 static int __init s390_iommu_init(void) 383 { 384 return bus_set_iommu(&pci_bus_type, &s390_iommu_ops); 385 } 386 subsys_initcall(s390_iommu_init); 387