1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2695093e3SVarun Sethi /* 3695093e3SVarun Sethi * 4695093e3SVarun Sethi * Copyright (C) 2013 Freescale Semiconductor, Inc. 5695093e3SVarun Sethi * Author: Varun Sethi <varun.sethi@freescale.com> 6695093e3SVarun Sethi */ 7695093e3SVarun Sethi 8695093e3SVarun Sethi #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 9695093e3SVarun Sethi 10695093e3SVarun Sethi #include "fsl_pamu_domain.h" 11695093e3SVarun Sethi 12cd70d465SEmil Medve #include <sysdev/fsl_pci.h> 13cd70d465SEmil Medve 14695093e3SVarun Sethi /* 15695093e3SVarun Sethi * Global spinlock that needs to be held while 16695093e3SVarun Sethi * configuring PAMU. 17695093e3SVarun Sethi */ 18695093e3SVarun Sethi static DEFINE_SPINLOCK(iommu_lock); 19695093e3SVarun Sethi 20695093e3SVarun Sethi static struct kmem_cache *fsl_pamu_domain_cache; 21695093e3SVarun Sethi static struct kmem_cache *iommu_devinfo_cache; 22695093e3SVarun Sethi static DEFINE_SPINLOCK(device_domain_lock); 23695093e3SVarun Sethi 243ff2dcc0SJoerg Roedel struct iommu_device pamu_iommu; /* IOMMU core code handle */ 253ff2dcc0SJoerg Roedel 268d4bfe40SJoerg Roedel static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) 278d4bfe40SJoerg Roedel { 288d4bfe40SJoerg Roedel return container_of(dom, struct fsl_dma_domain, iommu_domain); 298d4bfe40SJoerg Roedel } 308d4bfe40SJoerg Roedel 31695093e3SVarun Sethi static int __init iommu_init_mempool(void) 32695093e3SVarun Sethi { 33695093e3SVarun Sethi fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 34695093e3SVarun Sethi sizeof(struct fsl_dma_domain), 35695093e3SVarun Sethi 0, 36695093e3SVarun Sethi SLAB_HWCACHE_ALIGN, 37695093e3SVarun Sethi NULL); 38695093e3SVarun Sethi if (!fsl_pamu_domain_cache) { 39695093e3SVarun Sethi pr_debug("Couldn't create fsl iommu_domain cache\n"); 40695093e3SVarun Sethi return -ENOMEM; 41695093e3SVarun Sethi } 42695093e3SVarun Sethi 43695093e3SVarun Sethi iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 44695093e3SVarun Sethi sizeof(struct device_domain_info), 45695093e3SVarun Sethi 0, 46695093e3SVarun Sethi SLAB_HWCACHE_ALIGN, 47695093e3SVarun Sethi NULL); 48695093e3SVarun Sethi if (!iommu_devinfo_cache) { 49695093e3SVarun Sethi pr_debug("Couldn't create devinfo cache\n"); 50695093e3SVarun Sethi kmem_cache_destroy(fsl_pamu_domain_cache); 51695093e3SVarun Sethi return -ENOMEM; 52695093e3SVarun Sethi } 53695093e3SVarun Sethi 54695093e3SVarun Sethi return 0; 55695093e3SVarun Sethi } 56695093e3SVarun Sethi 57695093e3SVarun Sethi static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) 58695093e3SVarun Sethi { 59cd70d465SEmil Medve struct dma_window *win_ptr = &dma_domain->win_arr[0]; 60695093e3SVarun Sethi struct iommu_domain_geometry *geom; 61695093e3SVarun Sethi 628d4bfe40SJoerg Roedel geom = &dma_domain->iommu_domain.geometry; 63695093e3SVarun Sethi 64695093e3SVarun Sethi if (win_ptr->valid) 65cd70d465SEmil Medve return win_ptr->paddr + (iova & (win_ptr->size - 1)); 66695093e3SVarun Sethi 67695093e3SVarun Sethi return 0; 68695093e3SVarun Sethi } 69695093e3SVarun Sethi 70*ba58d121SChristoph Hellwig /* Map the DMA window corresponding to the LIODN */ 71*ba58d121SChristoph Hellwig static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) 72695093e3SVarun Sethi { 73695093e3SVarun Sethi int ret; 74695093e3SVarun Sethi struct dma_window *wnd = &dma_domain->win_arr[0]; 758d4bfe40SJoerg Roedel phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; 76695093e3SVarun Sethi unsigned long flags; 77695093e3SVarun Sethi 78695093e3SVarun Sethi spin_lock_irqsave(&iommu_lock, flags); 79695093e3SVarun Sethi ret = pamu_config_ppaace(liodn, wnd_addr, 80695093e3SVarun Sethi wnd->size, 81695093e3SVarun Sethi ~(u32)0, 82695093e3SVarun Sethi wnd->paddr >> PAMU_PAGE_SHIFT, 83695093e3SVarun Sethi dma_domain->snoop_id, dma_domain->stash_id, 84*ba58d121SChristoph Hellwig wnd->prot); 85695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 86695093e3SVarun Sethi if (ret) 87cd70d465SEmil Medve pr_debug("PAACE configuration failed for liodn %d\n", liodn); 88695093e3SVarun Sethi 89695093e3SVarun Sethi return ret; 90695093e3SVarun Sethi } 91695093e3SVarun Sethi 92695093e3SVarun Sethi /* Update window/subwindow mapping for the LIODN */ 93695093e3SVarun Sethi static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) 94695093e3SVarun Sethi { 95695093e3SVarun Sethi int ret; 96695093e3SVarun Sethi struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; 97*ba58d121SChristoph Hellwig phys_addr_t wnd_addr; 98695093e3SVarun Sethi unsigned long flags; 99695093e3SVarun Sethi 100695093e3SVarun Sethi spin_lock_irqsave(&iommu_lock, flags); 101695093e3SVarun Sethi 1028d4bfe40SJoerg Roedel wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; 103695093e3SVarun Sethi 104695093e3SVarun Sethi ret = pamu_config_ppaace(liodn, wnd_addr, 105695093e3SVarun Sethi wnd->size, 106695093e3SVarun Sethi ~(u32)0, 107695093e3SVarun Sethi wnd->paddr >> PAMU_PAGE_SHIFT, 108695093e3SVarun Sethi dma_domain->snoop_id, dma_domain->stash_id, 109*ba58d121SChristoph Hellwig wnd->prot); 110695093e3SVarun Sethi if (ret) 111cd70d465SEmil Medve pr_debug("Window reconfiguration failed for liodn %d\n", 112cd70d465SEmil Medve liodn); 113695093e3SVarun Sethi 114695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 115695093e3SVarun Sethi 116695093e3SVarun Sethi return ret; 117695093e3SVarun Sethi } 118695093e3SVarun Sethi 119695093e3SVarun Sethi static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 120695093e3SVarun Sethi u32 val) 121695093e3SVarun Sethi { 122695093e3SVarun Sethi int ret = 0, i; 123695093e3SVarun Sethi unsigned long flags; 124695093e3SVarun Sethi 125695093e3SVarun Sethi spin_lock_irqsave(&iommu_lock, flags); 126*ba58d121SChristoph Hellwig ret = pamu_update_paace_stash(liodn, val); 127695093e3SVarun Sethi if (ret) { 128cd70d465SEmil Medve pr_debug("Failed to update SPAACE %d field for liodn %d\n ", 129cd70d465SEmil Medve i, liodn); 130695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 131695093e3SVarun Sethi return ret; 132695093e3SVarun Sethi } 133695093e3SVarun Sethi 134695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 135695093e3SVarun Sethi 136695093e3SVarun Sethi return ret; 137695093e3SVarun Sethi } 138695093e3SVarun Sethi 139695093e3SVarun Sethi /* Set the geometry parameters for a LIODN */ 140695093e3SVarun Sethi static int pamu_set_liodn(int liodn, struct device *dev, 141695093e3SVarun Sethi struct fsl_dma_domain *dma_domain, 142*ba58d121SChristoph Hellwig struct iommu_domain_geometry *geom_attr) 143695093e3SVarun Sethi { 144695093e3SVarun Sethi phys_addr_t window_addr, window_size; 145695093e3SVarun Sethi u32 omi_index = ~(u32)0; 146695093e3SVarun Sethi unsigned long flags; 147*ba58d121SChristoph Hellwig int ret; 148695093e3SVarun Sethi 149695093e3SVarun Sethi /* 150695093e3SVarun Sethi * Configure the omi_index at the geometry setup time. 151695093e3SVarun Sethi * This is a static value which depends on the type of 152695093e3SVarun Sethi * device and would not change thereafter. 153695093e3SVarun Sethi */ 154695093e3SVarun Sethi get_ome_index(&omi_index, dev); 155695093e3SVarun Sethi 156695093e3SVarun Sethi window_addr = geom_attr->aperture_start; 157f7641bb7SChristoph Hellwig window_size = geom_attr->aperture_end + 1; 158695093e3SVarun Sethi 159695093e3SVarun Sethi spin_lock_irqsave(&iommu_lock, flags); 160695093e3SVarun Sethi ret = pamu_disable_liodn(liodn); 161695093e3SVarun Sethi if (!ret) 162695093e3SVarun Sethi ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 163695093e3SVarun Sethi 0, dma_domain->snoop_id, 164*ba58d121SChristoph Hellwig dma_domain->stash_id, 0); 165695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 166695093e3SVarun Sethi if (ret) { 167*ba58d121SChristoph Hellwig pr_debug("PAACE configuration failed for liodn %d\n", 168cd70d465SEmil Medve liodn); 169695093e3SVarun Sethi return ret; 170695093e3SVarun Sethi } 171695093e3SVarun Sethi 172695093e3SVarun Sethi return ret; 173695093e3SVarun Sethi } 174695093e3SVarun Sethi 175695093e3SVarun Sethi static int check_size(u64 size, dma_addr_t iova) 176695093e3SVarun Sethi { 177695093e3SVarun Sethi /* 178695093e3SVarun Sethi * Size must be a power of two and at least be equal 179695093e3SVarun Sethi * to PAMU page size. 180695093e3SVarun Sethi */ 181d033f48fSVarun Sethi if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { 182cd70d465SEmil Medve pr_debug("Size too small or not a power of two\n"); 183695093e3SVarun Sethi return -EINVAL; 184695093e3SVarun Sethi } 185695093e3SVarun Sethi 186695093e3SVarun Sethi /* iova must be page size aligned */ 187695093e3SVarun Sethi if (iova & (size - 1)) { 188cd70d465SEmil Medve pr_debug("Address is not aligned with window size\n"); 189695093e3SVarun Sethi return -EINVAL; 190695093e3SVarun Sethi } 191695093e3SVarun Sethi 192695093e3SVarun Sethi return 0; 193695093e3SVarun Sethi } 194695093e3SVarun Sethi 195*ba58d121SChristoph Hellwig static void remove_device_ref(struct device_domain_info *info) 196695093e3SVarun Sethi { 197695093e3SVarun Sethi unsigned long flags; 198695093e3SVarun Sethi 199695093e3SVarun Sethi list_del(&info->link); 200695093e3SVarun Sethi spin_lock_irqsave(&iommu_lock, flags); 201695093e3SVarun Sethi pamu_disable_liodn(info->liodn); 202695093e3SVarun Sethi spin_unlock_irqrestore(&iommu_lock, flags); 203695093e3SVarun Sethi spin_lock_irqsave(&device_domain_lock, flags); 2042263d818SJoerg Roedel dev_iommu_priv_set(info->dev, NULL); 205695093e3SVarun Sethi kmem_cache_free(iommu_devinfo_cache, info); 206695093e3SVarun Sethi spin_unlock_irqrestore(&device_domain_lock, flags); 207695093e3SVarun Sethi } 208695093e3SVarun Sethi 209695093e3SVarun Sethi static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) 210695093e3SVarun Sethi { 211695093e3SVarun Sethi struct device_domain_info *info, *tmp; 212695093e3SVarun Sethi unsigned long flags; 213695093e3SVarun Sethi 214695093e3SVarun Sethi spin_lock_irqsave(&dma_domain->domain_lock, flags); 215695093e3SVarun Sethi /* Remove the device from the domain device list */ 216695093e3SVarun Sethi list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { 217695093e3SVarun Sethi if (!dev || (info->dev == dev)) 218*ba58d121SChristoph Hellwig remove_device_ref(info); 219695093e3SVarun Sethi } 220695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 221695093e3SVarun Sethi } 222695093e3SVarun Sethi 223695093e3SVarun Sethi static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) 224695093e3SVarun Sethi { 225695093e3SVarun Sethi struct device_domain_info *info, *old_domain_info; 226695093e3SVarun Sethi unsigned long flags; 227695093e3SVarun Sethi 228695093e3SVarun Sethi spin_lock_irqsave(&device_domain_lock, flags); 229695093e3SVarun Sethi /* 230695093e3SVarun Sethi * Check here if the device is already attached to domain or not. 231695093e3SVarun Sethi * If the device is already attached to a domain detach it. 232695093e3SVarun Sethi */ 2332263d818SJoerg Roedel old_domain_info = dev_iommu_priv_get(dev); 234695093e3SVarun Sethi if (old_domain_info && old_domain_info->domain != dma_domain) { 235695093e3SVarun Sethi spin_unlock_irqrestore(&device_domain_lock, flags); 236695093e3SVarun Sethi detach_device(dev, old_domain_info->domain); 237695093e3SVarun Sethi spin_lock_irqsave(&device_domain_lock, flags); 238695093e3SVarun Sethi } 239695093e3SVarun Sethi 240695093e3SVarun Sethi info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); 241695093e3SVarun Sethi 242695093e3SVarun Sethi info->dev = dev; 243695093e3SVarun Sethi info->liodn = liodn; 244695093e3SVarun Sethi info->domain = dma_domain; 245695093e3SVarun Sethi 246695093e3SVarun Sethi list_add(&info->link, &dma_domain->devices); 247695093e3SVarun Sethi /* 248695093e3SVarun Sethi * In case of devices with multiple LIODNs just store 249695093e3SVarun Sethi * the info for the first LIODN as all 250695093e3SVarun Sethi * LIODNs share the same domain 251695093e3SVarun Sethi */ 2522263d818SJoerg Roedel if (!dev_iommu_priv_get(dev)) 2532263d818SJoerg Roedel dev_iommu_priv_set(dev, info); 254695093e3SVarun Sethi spin_unlock_irqrestore(&device_domain_lock, flags); 255695093e3SVarun Sethi } 256695093e3SVarun Sethi 257695093e3SVarun Sethi static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 258695093e3SVarun Sethi dma_addr_t iova) 259695093e3SVarun Sethi { 2608d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 261695093e3SVarun Sethi 262cd70d465SEmil Medve if (iova < domain->geometry.aperture_start || 263cd70d465SEmil Medve iova > domain->geometry.aperture_end) 264695093e3SVarun Sethi return 0; 265695093e3SVarun Sethi 266695093e3SVarun Sethi return get_phys_addr(dma_domain, iova); 267695093e3SVarun Sethi } 268695093e3SVarun Sethi 269b7eb6785SJoerg Roedel static bool fsl_pamu_capable(enum iommu_cap cap) 270695093e3SVarun Sethi { 271695093e3SVarun Sethi return cap == IOMMU_CAP_CACHE_COHERENCY; 272695093e3SVarun Sethi } 273695093e3SVarun Sethi 2748d4bfe40SJoerg Roedel static void fsl_pamu_domain_free(struct iommu_domain *domain) 275695093e3SVarun Sethi { 2768d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 277695093e3SVarun Sethi 278695093e3SVarun Sethi /* remove all the devices from the device list */ 279695093e3SVarun Sethi detach_device(NULL, dma_domain); 280695093e3SVarun Sethi 281695093e3SVarun Sethi dma_domain->enabled = 0; 282695093e3SVarun Sethi dma_domain->mapped = 0; 283695093e3SVarun Sethi 284695093e3SVarun Sethi kmem_cache_free(fsl_pamu_domain_cache, dma_domain); 285695093e3SVarun Sethi } 286695093e3SVarun Sethi 2878d4bfe40SJoerg Roedel static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) 288695093e3SVarun Sethi { 289695093e3SVarun Sethi struct fsl_dma_domain *dma_domain; 290695093e3SVarun Sethi 2918d4bfe40SJoerg Roedel if (type != IOMMU_DOMAIN_UNMANAGED) 2928d4bfe40SJoerg Roedel return NULL; 2938d4bfe40SJoerg Roedel 294c8224508SChristoph Hellwig dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); 295c8224508SChristoph Hellwig if (!dma_domain) 2968d4bfe40SJoerg Roedel return NULL; 297c8224508SChristoph Hellwig 298c8224508SChristoph Hellwig dma_domain->stash_id = ~(u32)0; 299c8224508SChristoph Hellwig dma_domain->snoop_id = ~(u32)0; 300c8224508SChristoph Hellwig INIT_LIST_HEAD(&dma_domain->devices); 301c8224508SChristoph Hellwig spin_lock_init(&dma_domain->domain_lock); 302c8224508SChristoph Hellwig 303c8224508SChristoph Hellwig /* default geometry 64 GB i.e. maximum system address */ 3048d4bfe40SJoerg Roedel dma_domain->iommu_domain. geometry.aperture_start = 0; 3058d4bfe40SJoerg Roedel dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; 3068d4bfe40SJoerg Roedel dma_domain->iommu_domain.geometry.force_aperture = true; 307695093e3SVarun Sethi 3088d4bfe40SJoerg Roedel return &dma_domain->iommu_domain; 309695093e3SVarun Sethi } 310695093e3SVarun Sethi 311695093e3SVarun Sethi /* Update stash destination for all LIODNs associated with the domain */ 312695093e3SVarun Sethi static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) 313695093e3SVarun Sethi { 314695093e3SVarun Sethi struct device_domain_info *info; 315695093e3SVarun Sethi int ret = 0; 316695093e3SVarun Sethi 317695093e3SVarun Sethi list_for_each_entry(info, &dma_domain->devices, link) { 318695093e3SVarun Sethi ret = update_liodn_stash(info->liodn, dma_domain, val); 319695093e3SVarun Sethi if (ret) 320695093e3SVarun Sethi break; 321695093e3SVarun Sethi } 322695093e3SVarun Sethi 323695093e3SVarun Sethi return ret; 324695093e3SVarun Sethi } 325695093e3SVarun Sethi 326695093e3SVarun Sethi /* Update domain mappings for all LIODNs associated with the domain */ 327695093e3SVarun Sethi static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) 328695093e3SVarun Sethi { 329695093e3SVarun Sethi struct device_domain_info *info; 330695093e3SVarun Sethi int ret = 0; 331695093e3SVarun Sethi 332695093e3SVarun Sethi list_for_each_entry(info, &dma_domain->devices, link) { 333695093e3SVarun Sethi ret = update_liodn(info->liodn, dma_domain, wnd_nr); 334695093e3SVarun Sethi if (ret) 335695093e3SVarun Sethi break; 336695093e3SVarun Sethi } 337695093e3SVarun Sethi return ret; 338695093e3SVarun Sethi } 339695093e3SVarun Sethi 340695093e3SVarun Sethi 341695093e3SVarun Sethi static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, 342695093e3SVarun Sethi phys_addr_t paddr, u64 size, int prot) 343695093e3SVarun Sethi { 3448d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 345695093e3SVarun Sethi struct dma_window *wnd; 346695093e3SVarun Sethi int pamu_prot = 0; 347695093e3SVarun Sethi int ret; 348695093e3SVarun Sethi unsigned long flags; 349695093e3SVarun Sethi u64 win_size; 350695093e3SVarun Sethi 351695093e3SVarun Sethi if (prot & IOMMU_READ) 352695093e3SVarun Sethi pamu_prot |= PAACE_AP_PERMS_QUERY; 353695093e3SVarun Sethi if (prot & IOMMU_WRITE) 354695093e3SVarun Sethi pamu_prot |= PAACE_AP_PERMS_UPDATE; 355695093e3SVarun Sethi 356695093e3SVarun Sethi spin_lock_irqsave(&dma_domain->domain_lock, flags); 357*ba58d121SChristoph Hellwig if (wnd_nr > 0) { 358695093e3SVarun Sethi pr_debug("Invalid window index\n"); 359695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 360695093e3SVarun Sethi return -EINVAL; 361695093e3SVarun Sethi } 362695093e3SVarun Sethi 363*ba58d121SChristoph Hellwig win_size = (domain->geometry.aperture_end + 1) >> ilog2(1); 364695093e3SVarun Sethi if (size > win_size) { 365695093e3SVarun Sethi pr_debug("Invalid window size\n"); 366695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 367695093e3SVarun Sethi return -EINVAL; 368695093e3SVarun Sethi } 369695093e3SVarun Sethi 370695093e3SVarun Sethi if (dma_domain->enabled) { 371695093e3SVarun Sethi pr_debug("Disable the window before updating the mapping\n"); 372695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 373695093e3SVarun Sethi return -EBUSY; 374695093e3SVarun Sethi } 375695093e3SVarun Sethi 376695093e3SVarun Sethi ret = check_size(size, domain->geometry.aperture_start); 377695093e3SVarun Sethi if (ret) { 378695093e3SVarun Sethi pr_debug("Aperture start not aligned to the size\n"); 379695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 380695093e3SVarun Sethi return -EINVAL; 381695093e3SVarun Sethi } 382695093e3SVarun Sethi 383695093e3SVarun Sethi wnd = &dma_domain->win_arr[wnd_nr]; 384695093e3SVarun Sethi if (!wnd->valid) { 385695093e3SVarun Sethi wnd->paddr = paddr; 386695093e3SVarun Sethi wnd->size = size; 387695093e3SVarun Sethi wnd->prot = pamu_prot; 388695093e3SVarun Sethi 389695093e3SVarun Sethi ret = update_domain_mapping(dma_domain, wnd_nr); 390695093e3SVarun Sethi if (!ret) { 391695093e3SVarun Sethi wnd->valid = 1; 392695093e3SVarun Sethi dma_domain->mapped++; 393695093e3SVarun Sethi } 394695093e3SVarun Sethi } else { 395695093e3SVarun Sethi pr_debug("Disable the window before updating the mapping\n"); 396695093e3SVarun Sethi ret = -EBUSY; 397695093e3SVarun Sethi } 398695093e3SVarun Sethi 399695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 400695093e3SVarun Sethi 401695093e3SVarun Sethi return ret; 402695093e3SVarun Sethi } 403695093e3SVarun Sethi 404695093e3SVarun Sethi /* 405695093e3SVarun Sethi * Attach the LIODN to the DMA domain and configure the geometry 406695093e3SVarun Sethi * and window mappings. 407695093e3SVarun Sethi */ 408695093e3SVarun Sethi static int handle_attach_device(struct fsl_dma_domain *dma_domain, 409695093e3SVarun Sethi struct device *dev, const u32 *liodn, 410695093e3SVarun Sethi int num) 411695093e3SVarun Sethi { 412695093e3SVarun Sethi unsigned long flags; 4138d4bfe40SJoerg Roedel struct iommu_domain *domain = &dma_domain->iommu_domain; 414695093e3SVarun Sethi int ret = 0; 415695093e3SVarun Sethi int i; 416695093e3SVarun Sethi 417695093e3SVarun Sethi spin_lock_irqsave(&dma_domain->domain_lock, flags); 418695093e3SVarun Sethi for (i = 0; i < num; i++) { 419695093e3SVarun Sethi /* Ensure that LIODN value is valid */ 420695093e3SVarun Sethi if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 4216bd4f1c7SRob Herring pr_debug("Invalid liodn %d, attach device failed for %pOF\n", 4226bd4f1c7SRob Herring liodn[i], dev->of_node); 423695093e3SVarun Sethi ret = -EINVAL; 424695093e3SVarun Sethi break; 425695093e3SVarun Sethi } 426695093e3SVarun Sethi 427695093e3SVarun Sethi attach_device(dma_domain, liodn[i], dev); 428695093e3SVarun Sethi /* 429695093e3SVarun Sethi * Check if geometry has already been configured 430695093e3SVarun Sethi * for the domain. If yes, set the geometry for 431695093e3SVarun Sethi * the LIODN. 432695093e3SVarun Sethi */ 433695093e3SVarun Sethi ret = pamu_set_liodn(liodn[i], dev, dma_domain, 434*ba58d121SChristoph Hellwig &domain->geometry); 435695093e3SVarun Sethi if (ret) 436695093e3SVarun Sethi break; 437695093e3SVarun Sethi if (dma_domain->mapped) { 438695093e3SVarun Sethi /* 439695093e3SVarun Sethi * Create window/subwindow mapping for 440695093e3SVarun Sethi * the LIODN. 441695093e3SVarun Sethi */ 442695093e3SVarun Sethi ret = map_liodn(liodn[i], dma_domain); 443695093e3SVarun Sethi if (ret) 444695093e3SVarun Sethi break; 445695093e3SVarun Sethi } 446695093e3SVarun Sethi } 447695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 448695093e3SVarun Sethi 449695093e3SVarun Sethi return ret; 450695093e3SVarun Sethi } 451695093e3SVarun Sethi 452695093e3SVarun Sethi static int fsl_pamu_attach_device(struct iommu_domain *domain, 453695093e3SVarun Sethi struct device *dev) 454695093e3SVarun Sethi { 4558d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 456695093e3SVarun Sethi const u32 *liodn; 457695093e3SVarun Sethi u32 liodn_cnt; 458695093e3SVarun Sethi int len, ret = 0; 459695093e3SVarun Sethi struct pci_dev *pdev = NULL; 460695093e3SVarun Sethi struct pci_controller *pci_ctl; 461695093e3SVarun Sethi 462695093e3SVarun Sethi /* 463695093e3SVarun Sethi * Use LIODN of the PCI controller while attaching a 464695093e3SVarun Sethi * PCI device. 465695093e3SVarun Sethi */ 466b3eb76d1SYijing Wang if (dev_is_pci(dev)) { 467695093e3SVarun Sethi pdev = to_pci_dev(dev); 468695093e3SVarun Sethi pci_ctl = pci_bus_to_host(pdev->bus); 469695093e3SVarun Sethi /* 470695093e3SVarun Sethi * make dev point to pci controller device 471695093e3SVarun Sethi * so we can get the LIODN programmed by 472695093e3SVarun Sethi * u-boot. 473695093e3SVarun Sethi */ 474695093e3SVarun Sethi dev = pci_ctl->parent; 475695093e3SVarun Sethi } 476695093e3SVarun Sethi 477695093e3SVarun Sethi liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 478695093e3SVarun Sethi if (liodn) { 479695093e3SVarun Sethi liodn_cnt = len / sizeof(u32); 480cd70d465SEmil Medve ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); 481695093e3SVarun Sethi } else { 4826bd4f1c7SRob Herring pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 483695093e3SVarun Sethi ret = -EINVAL; 484695093e3SVarun Sethi } 485695093e3SVarun Sethi 486695093e3SVarun Sethi return ret; 487695093e3SVarun Sethi } 488695093e3SVarun Sethi 489695093e3SVarun Sethi static void fsl_pamu_detach_device(struct iommu_domain *domain, 490695093e3SVarun Sethi struct device *dev) 491695093e3SVarun Sethi { 4928d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 493695093e3SVarun Sethi const u32 *prop; 494695093e3SVarun Sethi int len; 495695093e3SVarun Sethi struct pci_dev *pdev = NULL; 496695093e3SVarun Sethi struct pci_controller *pci_ctl; 497695093e3SVarun Sethi 498695093e3SVarun Sethi /* 499695093e3SVarun Sethi * Use LIODN of the PCI controller while detaching a 500695093e3SVarun Sethi * PCI device. 501695093e3SVarun Sethi */ 502b3eb76d1SYijing Wang if (dev_is_pci(dev)) { 503695093e3SVarun Sethi pdev = to_pci_dev(dev); 504695093e3SVarun Sethi pci_ctl = pci_bus_to_host(pdev->bus); 505695093e3SVarun Sethi /* 506695093e3SVarun Sethi * make dev point to pci controller device 507695093e3SVarun Sethi * so we can get the LIODN programmed by 508695093e3SVarun Sethi * u-boot. 509695093e3SVarun Sethi */ 510695093e3SVarun Sethi dev = pci_ctl->parent; 511695093e3SVarun Sethi } 512695093e3SVarun Sethi 513695093e3SVarun Sethi prop = of_get_property(dev->of_node, "fsl,liodn", &len); 514695093e3SVarun Sethi if (prop) 515695093e3SVarun Sethi detach_device(dev, dma_domain); 516695093e3SVarun Sethi else 5176bd4f1c7SRob Herring pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 518695093e3SVarun Sethi } 519695093e3SVarun Sethi 520695093e3SVarun Sethi /* Set the domain stash attribute */ 521695093e3SVarun Sethi static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) 522695093e3SVarun Sethi { 523695093e3SVarun Sethi struct pamu_stash_attribute *stash_attr = data; 524695093e3SVarun Sethi unsigned long flags; 525695093e3SVarun Sethi int ret; 526695093e3SVarun Sethi 527695093e3SVarun Sethi spin_lock_irqsave(&dma_domain->domain_lock, flags); 528695093e3SVarun Sethi 529695093e3SVarun Sethi memcpy(&dma_domain->dma_stash, stash_attr, 530695093e3SVarun Sethi sizeof(struct pamu_stash_attribute)); 531695093e3SVarun Sethi 532695093e3SVarun Sethi dma_domain->stash_id = get_stash_id(stash_attr->cache, 533695093e3SVarun Sethi stash_attr->cpu); 534695093e3SVarun Sethi if (dma_domain->stash_id == ~(u32)0) { 535695093e3SVarun Sethi pr_debug("Invalid stash attributes\n"); 536695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 537695093e3SVarun Sethi return -EINVAL; 538695093e3SVarun Sethi } 539695093e3SVarun Sethi 540695093e3SVarun Sethi ret = update_domain_stash(dma_domain, dma_domain->stash_id); 541695093e3SVarun Sethi 542695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 543695093e3SVarun Sethi 544695093e3SVarun Sethi return ret; 545695093e3SVarun Sethi } 546695093e3SVarun Sethi 547695093e3SVarun Sethi /* Configure domain dma state i.e. enable/disable DMA */ 548695093e3SVarun Sethi static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) 549695093e3SVarun Sethi { 550695093e3SVarun Sethi struct device_domain_info *info; 551695093e3SVarun Sethi unsigned long flags; 552695093e3SVarun Sethi int ret; 553695093e3SVarun Sethi 554695093e3SVarun Sethi spin_lock_irqsave(&dma_domain->domain_lock, flags); 555695093e3SVarun Sethi 556695093e3SVarun Sethi if (enable && !dma_domain->mapped) { 557695093e3SVarun Sethi pr_debug("Can't enable DMA domain without valid mapping\n"); 558695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 559695093e3SVarun Sethi return -ENODEV; 560695093e3SVarun Sethi } 561695093e3SVarun Sethi 562695093e3SVarun Sethi dma_domain->enabled = enable; 563cd70d465SEmil Medve list_for_each_entry(info, &dma_domain->devices, link) { 564695093e3SVarun Sethi ret = (enable) ? pamu_enable_liodn(info->liodn) : 565695093e3SVarun Sethi pamu_disable_liodn(info->liodn); 566695093e3SVarun Sethi if (ret) 567695093e3SVarun Sethi pr_debug("Unable to set dma state for liodn %d", 568695093e3SVarun Sethi info->liodn); 569695093e3SVarun Sethi } 570695093e3SVarun Sethi spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 571695093e3SVarun Sethi 572695093e3SVarun Sethi return 0; 573695093e3SVarun Sethi } 574695093e3SVarun Sethi 575695093e3SVarun Sethi static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, 576695093e3SVarun Sethi enum iommu_attr attr_type, void *data) 577695093e3SVarun Sethi { 5788d4bfe40SJoerg Roedel struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 579695093e3SVarun Sethi int ret = 0; 580695093e3SVarun Sethi 581695093e3SVarun Sethi switch (attr_type) { 582695093e3SVarun Sethi case DOMAIN_ATTR_FSL_PAMU_STASH: 583695093e3SVarun Sethi ret = configure_domain_stash(dma_domain, data); 584695093e3SVarun Sethi break; 585695093e3SVarun Sethi case DOMAIN_ATTR_FSL_PAMU_ENABLE: 586695093e3SVarun Sethi ret = configure_domain_dma_state(dma_domain, *(int *)data); 587695093e3SVarun Sethi break; 588695093e3SVarun Sethi default: 589695093e3SVarun Sethi pr_debug("Unsupported attribute type\n"); 590695093e3SVarun Sethi ret = -EINVAL; 591695093e3SVarun Sethi break; 592cd70d465SEmil Medve } 593695093e3SVarun Sethi 594695093e3SVarun Sethi return ret; 595695093e3SVarun Sethi } 596695093e3SVarun Sethi 597695093e3SVarun Sethi static struct iommu_group *get_device_iommu_group(struct device *dev) 598695093e3SVarun Sethi { 599695093e3SVarun Sethi struct iommu_group *group; 600695093e3SVarun Sethi 601695093e3SVarun Sethi group = iommu_group_get(dev); 602695093e3SVarun Sethi if (!group) 603695093e3SVarun Sethi group = iommu_group_alloc(); 604695093e3SVarun Sethi 605695093e3SVarun Sethi return group; 606695093e3SVarun Sethi } 607695093e3SVarun Sethi 608695093e3SVarun Sethi static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) 609695093e3SVarun Sethi { 610695093e3SVarun Sethi u32 version; 611695093e3SVarun Sethi 612695093e3SVarun Sethi /* Check the PCI controller version number by readding BRR1 register */ 613695093e3SVarun Sethi version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 614695093e3SVarun Sethi version &= PCI_FSL_BRR1_VER; 615695093e3SVarun Sethi /* If PCI controller version is >= 0x204 we can partition endpoints */ 616cd70d465SEmil Medve return version >= 0x204; 617695093e3SVarun Sethi } 618695093e3SVarun Sethi 619695093e3SVarun Sethi /* Get iommu group information from peer devices or devices on the parent bus */ 620695093e3SVarun Sethi static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) 621695093e3SVarun Sethi { 622695093e3SVarun Sethi struct pci_dev *tmp; 623695093e3SVarun Sethi struct iommu_group *group; 624695093e3SVarun Sethi struct pci_bus *bus = pdev->bus; 625695093e3SVarun Sethi 626695093e3SVarun Sethi /* 627695093e3SVarun Sethi * Traverese the pci bus device list to get 628695093e3SVarun Sethi * the shared iommu group. 629695093e3SVarun Sethi */ 630695093e3SVarun Sethi while (bus) { 631695093e3SVarun Sethi list_for_each_entry(tmp, &bus->devices, bus_list) { 632695093e3SVarun Sethi if (tmp == pdev) 633695093e3SVarun Sethi continue; 634695093e3SVarun Sethi group = iommu_group_get(&tmp->dev); 635695093e3SVarun Sethi if (group) 636695093e3SVarun Sethi return group; 637695093e3SVarun Sethi } 638695093e3SVarun Sethi 639695093e3SVarun Sethi bus = bus->parent; 640695093e3SVarun Sethi } 641695093e3SVarun Sethi 642695093e3SVarun Sethi return NULL; 643695093e3SVarun Sethi } 644695093e3SVarun Sethi 645695093e3SVarun Sethi static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) 646695093e3SVarun Sethi { 647695093e3SVarun Sethi struct pci_controller *pci_ctl; 648bc46c229SColin Ian King bool pci_endpt_partitioning; 649695093e3SVarun Sethi struct iommu_group *group = NULL; 650695093e3SVarun Sethi 651695093e3SVarun Sethi pci_ctl = pci_bus_to_host(pdev->bus); 652bc46c229SColin Ian King pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); 653695093e3SVarun Sethi /* We can partition PCIe devices so assign device group to the device */ 654bc46c229SColin Ian King if (pci_endpt_partitioning) { 655d5e58297SJoerg Roedel group = pci_device_group(&pdev->dev); 656695093e3SVarun Sethi 657695093e3SVarun Sethi /* 658695093e3SVarun Sethi * PCIe controller is not a paritionable entity 659695093e3SVarun Sethi * free the controller device iommu_group. 660695093e3SVarun Sethi */ 661695093e3SVarun Sethi if (pci_ctl->parent->iommu_group) 662695093e3SVarun Sethi iommu_group_remove_device(pci_ctl->parent); 663695093e3SVarun Sethi } else { 664695093e3SVarun Sethi /* 665695093e3SVarun Sethi * All devices connected to the controller will share the 666695093e3SVarun Sethi * PCI controllers device group. If this is the first 667695093e3SVarun Sethi * device to be probed for the pci controller, copy the 668695093e3SVarun Sethi * device group information from the PCI controller device 669695093e3SVarun Sethi * node and remove the PCI controller iommu group. 670695093e3SVarun Sethi * For subsequent devices, the iommu group information can 671695093e3SVarun Sethi * be obtained from sibling devices (i.e. from the bus_devices 672695093e3SVarun Sethi * link list). 673695093e3SVarun Sethi */ 674695093e3SVarun Sethi if (pci_ctl->parent->iommu_group) { 675695093e3SVarun Sethi group = get_device_iommu_group(pci_ctl->parent); 676695093e3SVarun Sethi iommu_group_remove_device(pci_ctl->parent); 677cd70d465SEmil Medve } else { 678695093e3SVarun Sethi group = get_shared_pci_device_group(pdev); 679695093e3SVarun Sethi } 680cd70d465SEmil Medve } 681695093e3SVarun Sethi 6823170447cSVarun Sethi if (!group) 6833170447cSVarun Sethi group = ERR_PTR(-ENODEV); 6843170447cSVarun Sethi 685695093e3SVarun Sethi return group; 686695093e3SVarun Sethi } 687695093e3SVarun Sethi 688d5e58297SJoerg Roedel static struct iommu_group *fsl_pamu_device_group(struct device *dev) 689695093e3SVarun Sethi { 6903170447cSVarun Sethi struct iommu_group *group = ERR_PTR(-ENODEV); 691d5e58297SJoerg Roedel int len; 692695093e3SVarun Sethi 693695093e3SVarun Sethi /* 694695093e3SVarun Sethi * For platform devices we allocate a separate group for 695695093e3SVarun Sethi * each of the devices. 696695093e3SVarun Sethi */ 697d5e58297SJoerg Roedel if (dev_is_pci(dev)) 698d5e58297SJoerg Roedel group = get_pci_device_group(to_pci_dev(dev)); 699d5e58297SJoerg Roedel else if (of_get_property(dev->of_node, "fsl,liodn", &len)) 700695093e3SVarun Sethi group = get_device_iommu_group(dev); 701d5e58297SJoerg Roedel 702d5e58297SJoerg Roedel return group; 703695093e3SVarun Sethi } 704695093e3SVarun Sethi 70552dd3ca4SJoerg Roedel static struct iommu_device *fsl_pamu_probe_device(struct device *dev) 706d5e58297SJoerg Roedel { 70752dd3ca4SJoerg Roedel return &pamu_iommu; 708695093e3SVarun Sethi } 709695093e3SVarun Sethi 71052dd3ca4SJoerg Roedel static void fsl_pamu_release_device(struct device *dev) 711695093e3SVarun Sethi { 712695093e3SVarun Sethi } 713695093e3SVarun Sethi 714b22f6434SThierry Reding static const struct iommu_ops fsl_pamu_ops = { 715b7eb6785SJoerg Roedel .capable = fsl_pamu_capable, 7168d4bfe40SJoerg Roedel .domain_alloc = fsl_pamu_domain_alloc, 7178d4bfe40SJoerg Roedel .domain_free = fsl_pamu_domain_free, 718695093e3SVarun Sethi .attach_dev = fsl_pamu_attach_device, 719695093e3SVarun Sethi .detach_dev = fsl_pamu_detach_device, 720695093e3SVarun Sethi .domain_window_enable = fsl_pamu_window_enable, 721695093e3SVarun Sethi .iova_to_phys = fsl_pamu_iova_to_phys, 722695093e3SVarun Sethi .domain_set_attr = fsl_pamu_set_domain_attr, 72352dd3ca4SJoerg Roedel .probe_device = fsl_pamu_probe_device, 72452dd3ca4SJoerg Roedel .release_device = fsl_pamu_release_device, 725d5e58297SJoerg Roedel .device_group = fsl_pamu_device_group, 726695093e3SVarun Sethi }; 727695093e3SVarun Sethi 728cd70d465SEmil Medve int __init pamu_domain_init(void) 729695093e3SVarun Sethi { 730695093e3SVarun Sethi int ret = 0; 731695093e3SVarun Sethi 732695093e3SVarun Sethi ret = iommu_init_mempool(); 733695093e3SVarun Sethi if (ret) 734695093e3SVarun Sethi return ret; 735695093e3SVarun Sethi 7363ff2dcc0SJoerg Roedel ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); 7373ff2dcc0SJoerg Roedel if (ret) 7383ff2dcc0SJoerg Roedel return ret; 7393ff2dcc0SJoerg Roedel 7403ff2dcc0SJoerg Roedel iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops); 7413ff2dcc0SJoerg Roedel 7423ff2dcc0SJoerg Roedel ret = iommu_device_register(&pamu_iommu); 7433ff2dcc0SJoerg Roedel if (ret) { 7443ff2dcc0SJoerg Roedel iommu_device_sysfs_remove(&pamu_iommu); 7453ff2dcc0SJoerg Roedel pr_err("Can't register iommu device\n"); 7463ff2dcc0SJoerg Roedel return ret; 7473ff2dcc0SJoerg Roedel } 7483ff2dcc0SJoerg Roedel 749695093e3SVarun Sethi bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); 750695093e3SVarun Sethi bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); 751695093e3SVarun Sethi 752695093e3SVarun Sethi return ret; 753695093e3SVarun Sethi } 754