1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 2013 Freescale Semiconductor, Inc. 5 * Author: Varun Sethi <varun.sethi@freescale.com> 6 */ 7 8 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 9 10 #include "fsl_pamu_domain.h" 11 12 #include <sysdev/fsl_pci.h> 13 14 /* 15 * Global spinlock that needs to be held while 16 * configuring PAMU. 17 */ 18 static DEFINE_SPINLOCK(iommu_lock); 19 20 static struct kmem_cache *fsl_pamu_domain_cache; 21 static struct kmem_cache *iommu_devinfo_cache; 22 static DEFINE_SPINLOCK(device_domain_lock); 23 24 struct iommu_device pamu_iommu; /* IOMMU core code handle */ 25 26 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) 27 { 28 return container_of(dom, struct fsl_dma_domain, iommu_domain); 29 } 30 31 static int __init iommu_init_mempool(void) 32 { 33 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 34 sizeof(struct fsl_dma_domain), 35 0, 36 SLAB_HWCACHE_ALIGN, 37 NULL); 38 if (!fsl_pamu_domain_cache) { 39 pr_debug("Couldn't create fsl iommu_domain cache\n"); 40 return -ENOMEM; 41 } 42 43 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 44 sizeof(struct device_domain_info), 45 0, 46 SLAB_HWCACHE_ALIGN, 47 NULL); 48 if (!iommu_devinfo_cache) { 49 pr_debug("Couldn't create devinfo cache\n"); 50 kmem_cache_destroy(fsl_pamu_domain_cache); 51 return -ENOMEM; 52 } 53 54 return 0; 55 } 56 57 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 58 u32 val) 59 { 60 int ret = 0; 61 unsigned long flags; 62 63 spin_lock_irqsave(&iommu_lock, flags); 64 ret = pamu_update_paace_stash(liodn, val); 65 if (ret) { 66 pr_debug("Failed to update SPAACE for liodn %d\n ", liodn); 67 spin_unlock_irqrestore(&iommu_lock, flags); 68 return ret; 69 } 70 71 spin_unlock_irqrestore(&iommu_lock, flags); 72 73 return ret; 74 } 75 76 /* Set the geometry parameters for a LIODN */ 77 static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, 78 int liodn) 79 { 80 u32 omi_index = ~(u32)0; 81 unsigned long flags; 82 int ret; 83 84 /* 85 * Configure the omi_index at the geometry setup time. 86 * This is a static value which depends on the type of 87 * device and would not change thereafter. 88 */ 89 get_ome_index(&omi_index, dev); 90 91 spin_lock_irqsave(&iommu_lock, flags); 92 ret = pamu_disable_liodn(liodn); 93 if (ret) 94 goto out_unlock; 95 ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0); 96 if (ret) 97 goto out_unlock; 98 ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id, 99 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); 100 out_unlock: 101 spin_unlock_irqrestore(&iommu_lock, flags); 102 if (ret) { 103 pr_debug("PAACE configuration failed for liodn %d\n", 104 liodn); 105 } 106 return ret; 107 } 108 109 static void remove_device_ref(struct device_domain_info *info) 110 { 111 unsigned long flags; 112 113 list_del(&info->link); 114 spin_lock_irqsave(&iommu_lock, flags); 115 pamu_disable_liodn(info->liodn); 116 spin_unlock_irqrestore(&iommu_lock, flags); 117 spin_lock_irqsave(&device_domain_lock, flags); 118 dev_iommu_priv_set(info->dev, NULL); 119 kmem_cache_free(iommu_devinfo_cache, info); 120 spin_unlock_irqrestore(&device_domain_lock, flags); 121 } 122 123 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) 124 { 125 struct device_domain_info *info, *tmp; 126 unsigned long flags; 127 128 spin_lock_irqsave(&dma_domain->domain_lock, flags); 129 /* Remove the device from the domain device list */ 130 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { 131 if (!dev || (info->dev == dev)) 132 remove_device_ref(info); 133 } 134 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 135 } 136 137 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) 138 { 139 struct device_domain_info *info, *old_domain_info; 140 unsigned long flags; 141 142 spin_lock_irqsave(&device_domain_lock, flags); 143 /* 144 * Check here if the device is already attached to domain or not. 145 * If the device is already attached to a domain detach it. 146 */ 147 old_domain_info = dev_iommu_priv_get(dev); 148 if (old_domain_info && old_domain_info->domain != dma_domain) { 149 spin_unlock_irqrestore(&device_domain_lock, flags); 150 detach_device(dev, old_domain_info->domain); 151 spin_lock_irqsave(&device_domain_lock, flags); 152 } 153 154 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); 155 156 info->dev = dev; 157 info->liodn = liodn; 158 info->domain = dma_domain; 159 160 list_add(&info->link, &dma_domain->devices); 161 /* 162 * In case of devices with multiple LIODNs just store 163 * the info for the first LIODN as all 164 * LIODNs share the same domain 165 */ 166 if (!dev_iommu_priv_get(dev)) 167 dev_iommu_priv_set(dev, info); 168 spin_unlock_irqrestore(&device_domain_lock, flags); 169 } 170 171 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 172 dma_addr_t iova) 173 { 174 if (iova < domain->geometry.aperture_start || 175 iova > domain->geometry.aperture_end) 176 return 0; 177 return iova; 178 } 179 180 static bool fsl_pamu_capable(enum iommu_cap cap) 181 { 182 return cap == IOMMU_CAP_CACHE_COHERENCY; 183 } 184 185 static void fsl_pamu_domain_free(struct iommu_domain *domain) 186 { 187 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 188 189 /* remove all the devices from the device list */ 190 detach_device(NULL, dma_domain); 191 kmem_cache_free(fsl_pamu_domain_cache, dma_domain); 192 } 193 194 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) 195 { 196 struct fsl_dma_domain *dma_domain; 197 198 if (type != IOMMU_DOMAIN_UNMANAGED) 199 return NULL; 200 201 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); 202 if (!dma_domain) 203 return NULL; 204 205 dma_domain->stash_id = ~(u32)0; 206 INIT_LIST_HEAD(&dma_domain->devices); 207 spin_lock_init(&dma_domain->domain_lock); 208 209 /* default geometry 64 GB i.e. maximum system address */ 210 dma_domain->iommu_domain. geometry.aperture_start = 0; 211 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; 212 dma_domain->iommu_domain.geometry.force_aperture = true; 213 214 return &dma_domain->iommu_domain; 215 } 216 217 /* Update stash destination for all LIODNs associated with the domain */ 218 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) 219 { 220 struct device_domain_info *info; 221 int ret = 0; 222 223 list_for_each_entry(info, &dma_domain->devices, link) { 224 ret = update_liodn_stash(info->liodn, dma_domain, val); 225 if (ret) 226 break; 227 } 228 229 return ret; 230 } 231 232 static int fsl_pamu_attach_device(struct iommu_domain *domain, 233 struct device *dev) 234 { 235 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 236 unsigned long flags; 237 int len, ret = 0, i; 238 const u32 *liodn; 239 struct pci_dev *pdev = NULL; 240 struct pci_controller *pci_ctl; 241 242 /* 243 * Use LIODN of the PCI controller while attaching a 244 * PCI device. 245 */ 246 if (dev_is_pci(dev)) { 247 pdev = to_pci_dev(dev); 248 pci_ctl = pci_bus_to_host(pdev->bus); 249 /* 250 * make dev point to pci controller device 251 * so we can get the LIODN programmed by 252 * u-boot. 253 */ 254 dev = pci_ctl->parent; 255 } 256 257 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 258 if (!liodn) { 259 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 260 return -EINVAL; 261 } 262 263 spin_lock_irqsave(&dma_domain->domain_lock, flags); 264 for (i = 0; i < len / sizeof(u32); i++) { 265 /* Ensure that LIODN value is valid */ 266 if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 267 pr_debug("Invalid liodn %d, attach device failed for %pOF\n", 268 liodn[i], dev->of_node); 269 ret = -EINVAL; 270 break; 271 } 272 273 attach_device(dma_domain, liodn[i], dev); 274 ret = pamu_set_liodn(dma_domain, dev, liodn[i]); 275 if (ret) 276 break; 277 ret = pamu_enable_liodn(liodn[i]); 278 if (ret) 279 break; 280 } 281 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 282 return ret; 283 } 284 285 static void fsl_pamu_detach_device(struct iommu_domain *domain, 286 struct device *dev) 287 { 288 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 289 const u32 *prop; 290 int len; 291 struct pci_dev *pdev = NULL; 292 struct pci_controller *pci_ctl; 293 294 /* 295 * Use LIODN of the PCI controller while detaching a 296 * PCI device. 297 */ 298 if (dev_is_pci(dev)) { 299 pdev = to_pci_dev(dev); 300 pci_ctl = pci_bus_to_host(pdev->bus); 301 /* 302 * make dev point to pci controller device 303 * so we can get the LIODN programmed by 304 * u-boot. 305 */ 306 dev = pci_ctl->parent; 307 } 308 309 prop = of_get_property(dev->of_node, "fsl,liodn", &len); 310 if (prop) 311 detach_device(dev, dma_domain); 312 else 313 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 314 } 315 316 /* Set the domain stash attribute */ 317 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) 318 { 319 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 320 unsigned long flags; 321 int ret; 322 323 spin_lock_irqsave(&dma_domain->domain_lock, flags); 324 dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu); 325 if (dma_domain->stash_id == ~(u32)0) { 326 pr_debug("Invalid stash attributes\n"); 327 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 328 return -EINVAL; 329 } 330 ret = update_domain_stash(dma_domain, dma_domain->stash_id); 331 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 332 333 return ret; 334 } 335 336 static struct iommu_group *get_device_iommu_group(struct device *dev) 337 { 338 struct iommu_group *group; 339 340 group = iommu_group_get(dev); 341 if (!group) 342 group = iommu_group_alloc(); 343 344 return group; 345 } 346 347 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) 348 { 349 u32 version; 350 351 /* Check the PCI controller version number by readding BRR1 register */ 352 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 353 version &= PCI_FSL_BRR1_VER; 354 /* If PCI controller version is >= 0x204 we can partition endpoints */ 355 return version >= 0x204; 356 } 357 358 /* Get iommu group information from peer devices or devices on the parent bus */ 359 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) 360 { 361 struct pci_dev *tmp; 362 struct iommu_group *group; 363 struct pci_bus *bus = pdev->bus; 364 365 /* 366 * Traverese the pci bus device list to get 367 * the shared iommu group. 368 */ 369 while (bus) { 370 list_for_each_entry(tmp, &bus->devices, bus_list) { 371 if (tmp == pdev) 372 continue; 373 group = iommu_group_get(&tmp->dev); 374 if (group) 375 return group; 376 } 377 378 bus = bus->parent; 379 } 380 381 return NULL; 382 } 383 384 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) 385 { 386 struct pci_controller *pci_ctl; 387 bool pci_endpt_partitioning; 388 struct iommu_group *group = NULL; 389 390 pci_ctl = pci_bus_to_host(pdev->bus); 391 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); 392 /* We can partition PCIe devices so assign device group to the device */ 393 if (pci_endpt_partitioning) { 394 group = pci_device_group(&pdev->dev); 395 396 /* 397 * PCIe controller is not a paritionable entity 398 * free the controller device iommu_group. 399 */ 400 if (pci_ctl->parent->iommu_group) 401 iommu_group_remove_device(pci_ctl->parent); 402 } else { 403 /* 404 * All devices connected to the controller will share the 405 * PCI controllers device group. If this is the first 406 * device to be probed for the pci controller, copy the 407 * device group information from the PCI controller device 408 * node and remove the PCI controller iommu group. 409 * For subsequent devices, the iommu group information can 410 * be obtained from sibling devices (i.e. from the bus_devices 411 * link list). 412 */ 413 if (pci_ctl->parent->iommu_group) { 414 group = get_device_iommu_group(pci_ctl->parent); 415 iommu_group_remove_device(pci_ctl->parent); 416 } else { 417 group = get_shared_pci_device_group(pdev); 418 } 419 } 420 421 if (!group) 422 group = ERR_PTR(-ENODEV); 423 424 return group; 425 } 426 427 static struct iommu_group *fsl_pamu_device_group(struct device *dev) 428 { 429 struct iommu_group *group = ERR_PTR(-ENODEV); 430 int len; 431 432 /* 433 * For platform devices we allocate a separate group for 434 * each of the devices. 435 */ 436 if (dev_is_pci(dev)) 437 group = get_pci_device_group(to_pci_dev(dev)); 438 else if (of_get_property(dev->of_node, "fsl,liodn", &len)) 439 group = get_device_iommu_group(dev); 440 441 return group; 442 } 443 444 static struct iommu_device *fsl_pamu_probe_device(struct device *dev) 445 { 446 return &pamu_iommu; 447 } 448 449 static void fsl_pamu_release_device(struct device *dev) 450 { 451 } 452 453 static const struct iommu_ops fsl_pamu_ops = { 454 .capable = fsl_pamu_capable, 455 .domain_alloc = fsl_pamu_domain_alloc, 456 .probe_device = fsl_pamu_probe_device, 457 .release_device = fsl_pamu_release_device, 458 .device_group = fsl_pamu_device_group, 459 .default_domain_ops = &(const struct iommu_domain_ops) { 460 .attach_dev = fsl_pamu_attach_device, 461 .detach_dev = fsl_pamu_detach_device, 462 .iova_to_phys = fsl_pamu_iova_to_phys, 463 .free = fsl_pamu_domain_free, 464 } 465 }; 466 467 int __init pamu_domain_init(void) 468 { 469 int ret = 0; 470 471 ret = iommu_init_mempool(); 472 if (ret) 473 return ret; 474 475 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); 476 if (ret) 477 return ret; 478 479 ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL); 480 if (ret) { 481 iommu_device_sysfs_remove(&pamu_iommu); 482 pr_err("Can't register iommu device\n"); 483 return ret; 484 } 485 486 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); 487 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); 488 489 return ret; 490 } 491