1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2695093e3SVarun Sethi /*
3695093e3SVarun Sethi  *
4695093e3SVarun Sethi  * Copyright (C) 2013 Freescale Semiconductor, Inc.
5695093e3SVarun Sethi  * Author: Varun Sethi <varun.sethi@freescale.com>
6695093e3SVarun Sethi  */
7695093e3SVarun Sethi 
8695093e3SVarun Sethi #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9695093e3SVarun Sethi 
10695093e3SVarun Sethi #include "fsl_pamu_domain.h"
11695093e3SVarun Sethi 
12cd70d465SEmil Medve #include <sysdev/fsl_pci.h>
13cd70d465SEmil Medve 
14695093e3SVarun Sethi /*
15695093e3SVarun Sethi  * Global spinlock that needs to be held while
16695093e3SVarun Sethi  * configuring PAMU.
17695093e3SVarun Sethi  */
18695093e3SVarun Sethi static DEFINE_SPINLOCK(iommu_lock);
19695093e3SVarun Sethi 
20695093e3SVarun Sethi static struct kmem_cache *fsl_pamu_domain_cache;
21695093e3SVarun Sethi static struct kmem_cache *iommu_devinfo_cache;
22695093e3SVarun Sethi static DEFINE_SPINLOCK(device_domain_lock);
23695093e3SVarun Sethi 
243ff2dcc0SJoerg Roedel struct iommu_device pamu_iommu;	/* IOMMU core code handle */
253ff2dcc0SJoerg Roedel 
268d4bfe40SJoerg Roedel static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
278d4bfe40SJoerg Roedel {
288d4bfe40SJoerg Roedel 	return container_of(dom, struct fsl_dma_domain, iommu_domain);
298d4bfe40SJoerg Roedel }
308d4bfe40SJoerg Roedel 
31695093e3SVarun Sethi static int __init iommu_init_mempool(void)
32695093e3SVarun Sethi {
33695093e3SVarun Sethi 	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34695093e3SVarun Sethi 						  sizeof(struct fsl_dma_domain),
35695093e3SVarun Sethi 						  0,
36695093e3SVarun Sethi 						  SLAB_HWCACHE_ALIGN,
37695093e3SVarun Sethi 						  NULL);
38695093e3SVarun Sethi 	if (!fsl_pamu_domain_cache) {
39695093e3SVarun Sethi 		pr_debug("Couldn't create fsl iommu_domain cache\n");
40695093e3SVarun Sethi 		return -ENOMEM;
41695093e3SVarun Sethi 	}
42695093e3SVarun Sethi 
43695093e3SVarun Sethi 	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44695093e3SVarun Sethi 						sizeof(struct device_domain_info),
45695093e3SVarun Sethi 						0,
46695093e3SVarun Sethi 						SLAB_HWCACHE_ALIGN,
47695093e3SVarun Sethi 						NULL);
48695093e3SVarun Sethi 	if (!iommu_devinfo_cache) {
49695093e3SVarun Sethi 		pr_debug("Couldn't create devinfo cache\n");
50695093e3SVarun Sethi 		kmem_cache_destroy(fsl_pamu_domain_cache);
51695093e3SVarun Sethi 		return -ENOMEM;
52695093e3SVarun Sethi 	}
53695093e3SVarun Sethi 
54695093e3SVarun Sethi 	return 0;
55695093e3SVarun Sethi }
56695093e3SVarun Sethi 
57695093e3SVarun Sethi static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
58695093e3SVarun Sethi 			      u32 val)
59695093e3SVarun Sethi {
60695093e3SVarun Sethi 	int ret = 0, i;
61695093e3SVarun Sethi 	unsigned long flags;
62695093e3SVarun Sethi 
63695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
64ba58d121SChristoph Hellwig 	ret = pamu_update_paace_stash(liodn, val);
65695093e3SVarun Sethi 	if (ret) {
66cd70d465SEmil Medve 		pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
67cd70d465SEmil Medve 			 i, liodn);
68695093e3SVarun Sethi 		spin_unlock_irqrestore(&iommu_lock, flags);
69695093e3SVarun Sethi 		return ret;
70695093e3SVarun Sethi 	}
71695093e3SVarun Sethi 
72695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
73695093e3SVarun Sethi 
74695093e3SVarun Sethi 	return ret;
75695093e3SVarun Sethi }
76695093e3SVarun Sethi 
77695093e3SVarun Sethi /* Set the geometry parameters for a LIODN */
78dae7747aSChristoph Hellwig static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
79dae7747aSChristoph Hellwig 			  int liodn)
80695093e3SVarun Sethi {
81dae7747aSChristoph Hellwig 	struct iommu_domain *domain = &dma_domain->iommu_domain;
82dae7747aSChristoph Hellwig 	struct iommu_domain_geometry *geom = &domain->geometry;
83695093e3SVarun Sethi 	u32 omi_index = ~(u32)0;
84695093e3SVarun Sethi 	unsigned long flags;
85ba58d121SChristoph Hellwig 	int ret;
86695093e3SVarun Sethi 
87695093e3SVarun Sethi 	/*
88695093e3SVarun Sethi 	 * Configure the omi_index at the geometry setup time.
89695093e3SVarun Sethi 	 * This is a static value which depends on the type of
90695093e3SVarun Sethi 	 * device and would not change thereafter.
91695093e3SVarun Sethi 	 */
92695093e3SVarun Sethi 	get_ome_index(&omi_index, dev);
93695093e3SVarun Sethi 
94695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
95695093e3SVarun Sethi 	ret = pamu_disable_liodn(liodn);
96dae7747aSChristoph Hellwig 	if (ret)
97dae7747aSChristoph Hellwig 		goto out_unlock;
98dae7747aSChristoph Hellwig 	ret = pamu_config_ppaace(liodn, geom->aperture_start,
99dae7747aSChristoph Hellwig 				 geom->aperture_end + 1, omi_index, 0,
100dae7747aSChristoph Hellwig 				 dma_domain->snoop_id, dma_domain->stash_id, 0);
101dae7747aSChristoph Hellwig 	if (ret)
102dae7747aSChristoph Hellwig 		goto out_unlock;
103dae7747aSChristoph Hellwig 	ret = pamu_config_ppaace(liodn, geom->aperture_start,
104dae7747aSChristoph Hellwig 				 geom->aperture_end + 1, ~(u32)0,
105dae7747aSChristoph Hellwig 				 0, dma_domain->snoop_id, dma_domain->stash_id,
106dae7747aSChristoph Hellwig 				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
107dae7747aSChristoph Hellwig out_unlock:
108695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
109695093e3SVarun Sethi 	if (ret) {
110ba58d121SChristoph Hellwig 		pr_debug("PAACE configuration failed for liodn %d\n",
111cd70d465SEmil Medve 			 liodn);
112695093e3SVarun Sethi 	}
113695093e3SVarun Sethi 	return ret;
114695093e3SVarun Sethi }
115695093e3SVarun Sethi 
116ba58d121SChristoph Hellwig static void remove_device_ref(struct device_domain_info *info)
117695093e3SVarun Sethi {
118695093e3SVarun Sethi 	unsigned long flags;
119695093e3SVarun Sethi 
120695093e3SVarun Sethi 	list_del(&info->link);
121695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
122695093e3SVarun Sethi 	pamu_disable_liodn(info->liodn);
123695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
124695093e3SVarun Sethi 	spin_lock_irqsave(&device_domain_lock, flags);
1252263d818SJoerg Roedel 	dev_iommu_priv_set(info->dev, NULL);
126695093e3SVarun Sethi 	kmem_cache_free(iommu_devinfo_cache, info);
127695093e3SVarun Sethi 	spin_unlock_irqrestore(&device_domain_lock, flags);
128695093e3SVarun Sethi }
129695093e3SVarun Sethi 
130695093e3SVarun Sethi static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
131695093e3SVarun Sethi {
132695093e3SVarun Sethi 	struct device_domain_info *info, *tmp;
133695093e3SVarun Sethi 	unsigned long flags;
134695093e3SVarun Sethi 
135695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
136695093e3SVarun Sethi 	/* Remove the device from the domain device list */
137695093e3SVarun Sethi 	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
138695093e3SVarun Sethi 		if (!dev || (info->dev == dev))
139ba58d121SChristoph Hellwig 			remove_device_ref(info);
140695093e3SVarun Sethi 	}
141695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
142695093e3SVarun Sethi }
143695093e3SVarun Sethi 
144695093e3SVarun Sethi static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
145695093e3SVarun Sethi {
146695093e3SVarun Sethi 	struct device_domain_info *info, *old_domain_info;
147695093e3SVarun Sethi 	unsigned long flags;
148695093e3SVarun Sethi 
149695093e3SVarun Sethi 	spin_lock_irqsave(&device_domain_lock, flags);
150695093e3SVarun Sethi 	/*
151695093e3SVarun Sethi 	 * Check here if the device is already attached to domain or not.
152695093e3SVarun Sethi 	 * If the device is already attached to a domain detach it.
153695093e3SVarun Sethi 	 */
1542263d818SJoerg Roedel 	old_domain_info = dev_iommu_priv_get(dev);
155695093e3SVarun Sethi 	if (old_domain_info && old_domain_info->domain != dma_domain) {
156695093e3SVarun Sethi 		spin_unlock_irqrestore(&device_domain_lock, flags);
157695093e3SVarun Sethi 		detach_device(dev, old_domain_info->domain);
158695093e3SVarun Sethi 		spin_lock_irqsave(&device_domain_lock, flags);
159695093e3SVarun Sethi 	}
160695093e3SVarun Sethi 
161695093e3SVarun Sethi 	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
162695093e3SVarun Sethi 
163695093e3SVarun Sethi 	info->dev = dev;
164695093e3SVarun Sethi 	info->liodn = liodn;
165695093e3SVarun Sethi 	info->domain = dma_domain;
166695093e3SVarun Sethi 
167695093e3SVarun Sethi 	list_add(&info->link, &dma_domain->devices);
168695093e3SVarun Sethi 	/*
169695093e3SVarun Sethi 	 * In case of devices with multiple LIODNs just store
170695093e3SVarun Sethi 	 * the info for the first LIODN as all
171695093e3SVarun Sethi 	 * LIODNs share the same domain
172695093e3SVarun Sethi 	 */
1732263d818SJoerg Roedel 	if (!dev_iommu_priv_get(dev))
1742263d818SJoerg Roedel 		dev_iommu_priv_set(dev, info);
175695093e3SVarun Sethi 	spin_unlock_irqrestore(&device_domain_lock, flags);
176695093e3SVarun Sethi }
177695093e3SVarun Sethi 
178695093e3SVarun Sethi static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
179695093e3SVarun Sethi 					 dma_addr_t iova)
180695093e3SVarun Sethi {
181cd70d465SEmil Medve 	if (iova < domain->geometry.aperture_start ||
182cd70d465SEmil Medve 	    iova > domain->geometry.aperture_end)
183695093e3SVarun Sethi 		return 0;
184376dfd2aSChristoph Hellwig 	return iova;
185695093e3SVarun Sethi }
186695093e3SVarun Sethi 
187b7eb6785SJoerg Roedel static bool fsl_pamu_capable(enum iommu_cap cap)
188695093e3SVarun Sethi {
189695093e3SVarun Sethi 	return cap == IOMMU_CAP_CACHE_COHERENCY;
190695093e3SVarun Sethi }
191695093e3SVarun Sethi 
1928d4bfe40SJoerg Roedel static void fsl_pamu_domain_free(struct iommu_domain *domain)
193695093e3SVarun Sethi {
1948d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
195695093e3SVarun Sethi 
196695093e3SVarun Sethi 	/* remove all the devices from the device list */
197695093e3SVarun Sethi 	detach_device(NULL, dma_domain);
198695093e3SVarun Sethi 
199695093e3SVarun Sethi 	dma_domain->enabled = 0;
200695093e3SVarun Sethi 
201695093e3SVarun Sethi 	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
202695093e3SVarun Sethi }
203695093e3SVarun Sethi 
2048d4bfe40SJoerg Roedel static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
205695093e3SVarun Sethi {
206695093e3SVarun Sethi 	struct fsl_dma_domain *dma_domain;
207695093e3SVarun Sethi 
2088d4bfe40SJoerg Roedel 	if (type != IOMMU_DOMAIN_UNMANAGED)
2098d4bfe40SJoerg Roedel 		return NULL;
2108d4bfe40SJoerg Roedel 
211c8224508SChristoph Hellwig 	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
212c8224508SChristoph Hellwig 	if (!dma_domain)
2138d4bfe40SJoerg Roedel 		return NULL;
214c8224508SChristoph Hellwig 
215c8224508SChristoph Hellwig 	dma_domain->stash_id = ~(u32)0;
216c8224508SChristoph Hellwig 	dma_domain->snoop_id = ~(u32)0;
217c8224508SChristoph Hellwig 	INIT_LIST_HEAD(&dma_domain->devices);
218c8224508SChristoph Hellwig 	spin_lock_init(&dma_domain->domain_lock);
219c8224508SChristoph Hellwig 
220c8224508SChristoph Hellwig 	/* default geometry 64 GB i.e. maximum system address */
2218d4bfe40SJoerg Roedel 	dma_domain->iommu_domain. geometry.aperture_start = 0;
2228d4bfe40SJoerg Roedel 	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
2238d4bfe40SJoerg Roedel 	dma_domain->iommu_domain.geometry.force_aperture = true;
224695093e3SVarun Sethi 
2258d4bfe40SJoerg Roedel 	return &dma_domain->iommu_domain;
226695093e3SVarun Sethi }
227695093e3SVarun Sethi 
228695093e3SVarun Sethi /* Update stash destination for all LIODNs associated with the domain */
229695093e3SVarun Sethi static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
230695093e3SVarun Sethi {
231695093e3SVarun Sethi 	struct device_domain_info *info;
232695093e3SVarun Sethi 	int ret = 0;
233695093e3SVarun Sethi 
234695093e3SVarun Sethi 	list_for_each_entry(info, &dma_domain->devices, link) {
235695093e3SVarun Sethi 		ret = update_liodn_stash(info->liodn, dma_domain, val);
236695093e3SVarun Sethi 		if (ret)
237695093e3SVarun Sethi 			break;
238695093e3SVarun Sethi 	}
239695093e3SVarun Sethi 
240695093e3SVarun Sethi 	return ret;
241695093e3SVarun Sethi }
242695093e3SVarun Sethi 
243695093e3SVarun Sethi static int fsl_pamu_attach_device(struct iommu_domain *domain,
244695093e3SVarun Sethi 				  struct device *dev)
245695093e3SVarun Sethi {
2468d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
247*85e362caSChristoph Hellwig 	unsigned long flags;
248*85e362caSChristoph Hellwig 	int len, ret = 0, i;
249695093e3SVarun Sethi 	const u32 *liodn;
250695093e3SVarun Sethi 	struct pci_dev *pdev = NULL;
251695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
252695093e3SVarun Sethi 
253695093e3SVarun Sethi 	/*
254695093e3SVarun Sethi 	 * Use LIODN of the PCI controller while attaching a
255695093e3SVarun Sethi 	 * PCI device.
256695093e3SVarun Sethi 	 */
257b3eb76d1SYijing Wang 	if (dev_is_pci(dev)) {
258695093e3SVarun Sethi 		pdev = to_pci_dev(dev);
259695093e3SVarun Sethi 		pci_ctl = pci_bus_to_host(pdev->bus);
260695093e3SVarun Sethi 		/*
261695093e3SVarun Sethi 		 * make dev point to pci controller device
262695093e3SVarun Sethi 		 * so we can get the LIODN programmed by
263695093e3SVarun Sethi 		 * u-boot.
264695093e3SVarun Sethi 		 */
265695093e3SVarun Sethi 		dev = pci_ctl->parent;
266695093e3SVarun Sethi 	}
267695093e3SVarun Sethi 
268695093e3SVarun Sethi 	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
269*85e362caSChristoph Hellwig 	if (!liodn) {
2706bd4f1c7SRob Herring 		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
271*85e362caSChristoph Hellwig 		return -EINVAL;
272695093e3SVarun Sethi 	}
273695093e3SVarun Sethi 
274*85e362caSChristoph Hellwig 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
275*85e362caSChristoph Hellwig 	for (i = 0; i < len / sizeof(u32); i++) {
276*85e362caSChristoph Hellwig 		/* Ensure that LIODN value is valid */
277*85e362caSChristoph Hellwig 		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
278*85e362caSChristoph Hellwig 			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
279*85e362caSChristoph Hellwig 				 liodn[i], dev->of_node);
280*85e362caSChristoph Hellwig 			ret = -EINVAL;
281*85e362caSChristoph Hellwig 			break;
282*85e362caSChristoph Hellwig 		}
283*85e362caSChristoph Hellwig 
284*85e362caSChristoph Hellwig 		attach_device(dma_domain, liodn[i], dev);
285*85e362caSChristoph Hellwig 		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
286*85e362caSChristoph Hellwig 		if (ret)
287*85e362caSChristoph Hellwig 			break;
288*85e362caSChristoph Hellwig 	}
289*85e362caSChristoph Hellwig 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
290695093e3SVarun Sethi 	return ret;
291695093e3SVarun Sethi }
292695093e3SVarun Sethi 
293695093e3SVarun Sethi static void fsl_pamu_detach_device(struct iommu_domain *domain,
294695093e3SVarun Sethi 				   struct device *dev)
295695093e3SVarun Sethi {
2968d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
297695093e3SVarun Sethi 	const u32 *prop;
298695093e3SVarun Sethi 	int len;
299695093e3SVarun Sethi 	struct pci_dev *pdev = NULL;
300695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
301695093e3SVarun Sethi 
302695093e3SVarun Sethi 	/*
303695093e3SVarun Sethi 	 * Use LIODN of the PCI controller while detaching a
304695093e3SVarun Sethi 	 * PCI device.
305695093e3SVarun Sethi 	 */
306b3eb76d1SYijing Wang 	if (dev_is_pci(dev)) {
307695093e3SVarun Sethi 		pdev = to_pci_dev(dev);
308695093e3SVarun Sethi 		pci_ctl = pci_bus_to_host(pdev->bus);
309695093e3SVarun Sethi 		/*
310695093e3SVarun Sethi 		 * make dev point to pci controller device
311695093e3SVarun Sethi 		 * so we can get the LIODN programmed by
312695093e3SVarun Sethi 		 * u-boot.
313695093e3SVarun Sethi 		 */
314695093e3SVarun Sethi 		dev = pci_ctl->parent;
315695093e3SVarun Sethi 	}
316695093e3SVarun Sethi 
317695093e3SVarun Sethi 	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
318695093e3SVarun Sethi 	if (prop)
319695093e3SVarun Sethi 		detach_device(dev, dma_domain);
320695093e3SVarun Sethi 	else
3216bd4f1c7SRob Herring 		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
322695093e3SVarun Sethi }
323695093e3SVarun Sethi 
324695093e3SVarun Sethi /* Set the domain stash attribute */
3254eeb96f6SChristoph Hellwig int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
326695093e3SVarun Sethi {
3274eeb96f6SChristoph Hellwig 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
328695093e3SVarun Sethi 	unsigned long flags;
329695093e3SVarun Sethi 	int ret;
330695093e3SVarun Sethi 
331695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
3324eeb96f6SChristoph Hellwig 	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
333695093e3SVarun Sethi 	if (dma_domain->stash_id == ~(u32)0) {
334695093e3SVarun Sethi 		pr_debug("Invalid stash attributes\n");
335695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
336695093e3SVarun Sethi 		return -EINVAL;
337695093e3SVarun Sethi 	}
338695093e3SVarun Sethi 	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
339695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
340695093e3SVarun Sethi 
341695093e3SVarun Sethi 	return ret;
342695093e3SVarun Sethi }
343695093e3SVarun Sethi 
344695093e3SVarun Sethi /* Configure domain dma state i.e. enable/disable DMA */
345695093e3SVarun Sethi static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
346695093e3SVarun Sethi {
347695093e3SVarun Sethi 	struct device_domain_info *info;
348695093e3SVarun Sethi 	unsigned long flags;
349695093e3SVarun Sethi 	int ret;
350695093e3SVarun Sethi 
351695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
352695093e3SVarun Sethi 	dma_domain->enabled = enable;
353cd70d465SEmil Medve 	list_for_each_entry(info, &dma_domain->devices, link) {
354695093e3SVarun Sethi 		ret = (enable) ? pamu_enable_liodn(info->liodn) :
355695093e3SVarun Sethi 			pamu_disable_liodn(info->liodn);
356695093e3SVarun Sethi 		if (ret)
357695093e3SVarun Sethi 			pr_debug("Unable to set dma state for liodn %d",
358695093e3SVarun Sethi 				 info->liodn);
359695093e3SVarun Sethi 	}
360695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
361695093e3SVarun Sethi 
362695093e3SVarun Sethi 	return 0;
363695093e3SVarun Sethi }
364695093e3SVarun Sethi 
365695093e3SVarun Sethi static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
366695093e3SVarun Sethi 				    enum iommu_attr attr_type, void *data)
367695093e3SVarun Sethi {
3688d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
369695093e3SVarun Sethi 	int ret = 0;
370695093e3SVarun Sethi 
371695093e3SVarun Sethi 	switch (attr_type) {
372695093e3SVarun Sethi 	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
373695093e3SVarun Sethi 		ret = configure_domain_dma_state(dma_domain, *(int *)data);
374695093e3SVarun Sethi 		break;
375695093e3SVarun Sethi 	default:
376695093e3SVarun Sethi 		pr_debug("Unsupported attribute type\n");
377695093e3SVarun Sethi 		ret = -EINVAL;
378695093e3SVarun Sethi 		break;
379cd70d465SEmil Medve 	}
380695093e3SVarun Sethi 
381695093e3SVarun Sethi 	return ret;
382695093e3SVarun Sethi }
383695093e3SVarun Sethi 
384695093e3SVarun Sethi static struct iommu_group *get_device_iommu_group(struct device *dev)
385695093e3SVarun Sethi {
386695093e3SVarun Sethi 	struct iommu_group *group;
387695093e3SVarun Sethi 
388695093e3SVarun Sethi 	group = iommu_group_get(dev);
389695093e3SVarun Sethi 	if (!group)
390695093e3SVarun Sethi 		group = iommu_group_alloc();
391695093e3SVarun Sethi 
392695093e3SVarun Sethi 	return group;
393695093e3SVarun Sethi }
394695093e3SVarun Sethi 
395695093e3SVarun Sethi static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
396695093e3SVarun Sethi {
397695093e3SVarun Sethi 	u32 version;
398695093e3SVarun Sethi 
399695093e3SVarun Sethi 	/* Check the PCI controller version number by readding BRR1 register */
400695093e3SVarun Sethi 	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
401695093e3SVarun Sethi 	version &= PCI_FSL_BRR1_VER;
402695093e3SVarun Sethi 	/* If PCI controller version is >= 0x204 we can partition endpoints */
403cd70d465SEmil Medve 	return version >= 0x204;
404695093e3SVarun Sethi }
405695093e3SVarun Sethi 
406695093e3SVarun Sethi /* Get iommu group information from peer devices or devices on the parent bus */
407695093e3SVarun Sethi static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
408695093e3SVarun Sethi {
409695093e3SVarun Sethi 	struct pci_dev *tmp;
410695093e3SVarun Sethi 	struct iommu_group *group;
411695093e3SVarun Sethi 	struct pci_bus *bus = pdev->bus;
412695093e3SVarun Sethi 
413695093e3SVarun Sethi 	/*
414695093e3SVarun Sethi 	 * Traverese the pci bus device list to get
415695093e3SVarun Sethi 	 * the shared iommu group.
416695093e3SVarun Sethi 	 */
417695093e3SVarun Sethi 	while (bus) {
418695093e3SVarun Sethi 		list_for_each_entry(tmp, &bus->devices, bus_list) {
419695093e3SVarun Sethi 			if (tmp == pdev)
420695093e3SVarun Sethi 				continue;
421695093e3SVarun Sethi 			group = iommu_group_get(&tmp->dev);
422695093e3SVarun Sethi 			if (group)
423695093e3SVarun Sethi 				return group;
424695093e3SVarun Sethi 		}
425695093e3SVarun Sethi 
426695093e3SVarun Sethi 		bus = bus->parent;
427695093e3SVarun Sethi 	}
428695093e3SVarun Sethi 
429695093e3SVarun Sethi 	return NULL;
430695093e3SVarun Sethi }
431695093e3SVarun Sethi 
432695093e3SVarun Sethi static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
433695093e3SVarun Sethi {
434695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
435bc46c229SColin Ian King 	bool pci_endpt_partitioning;
436695093e3SVarun Sethi 	struct iommu_group *group = NULL;
437695093e3SVarun Sethi 
438695093e3SVarun Sethi 	pci_ctl = pci_bus_to_host(pdev->bus);
439bc46c229SColin Ian King 	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
440695093e3SVarun Sethi 	/* We can partition PCIe devices so assign device group to the device */
441bc46c229SColin Ian King 	if (pci_endpt_partitioning) {
442d5e58297SJoerg Roedel 		group = pci_device_group(&pdev->dev);
443695093e3SVarun Sethi 
444695093e3SVarun Sethi 		/*
445695093e3SVarun Sethi 		 * PCIe controller is not a paritionable entity
446695093e3SVarun Sethi 		 * free the controller device iommu_group.
447695093e3SVarun Sethi 		 */
448695093e3SVarun Sethi 		if (pci_ctl->parent->iommu_group)
449695093e3SVarun Sethi 			iommu_group_remove_device(pci_ctl->parent);
450695093e3SVarun Sethi 	} else {
451695093e3SVarun Sethi 		/*
452695093e3SVarun Sethi 		 * All devices connected to the controller will share the
453695093e3SVarun Sethi 		 * PCI controllers device group. If this is the first
454695093e3SVarun Sethi 		 * device to be probed for the pci controller, copy the
455695093e3SVarun Sethi 		 * device group information from the PCI controller device
456695093e3SVarun Sethi 		 * node and remove the PCI controller iommu group.
457695093e3SVarun Sethi 		 * For subsequent devices, the iommu group information can
458695093e3SVarun Sethi 		 * be obtained from sibling devices (i.e. from the bus_devices
459695093e3SVarun Sethi 		 * link list).
460695093e3SVarun Sethi 		 */
461695093e3SVarun Sethi 		if (pci_ctl->parent->iommu_group) {
462695093e3SVarun Sethi 			group = get_device_iommu_group(pci_ctl->parent);
463695093e3SVarun Sethi 			iommu_group_remove_device(pci_ctl->parent);
464cd70d465SEmil Medve 		} else {
465695093e3SVarun Sethi 			group = get_shared_pci_device_group(pdev);
466695093e3SVarun Sethi 		}
467cd70d465SEmil Medve 	}
468695093e3SVarun Sethi 
4693170447cSVarun Sethi 	if (!group)
4703170447cSVarun Sethi 		group = ERR_PTR(-ENODEV);
4713170447cSVarun Sethi 
472695093e3SVarun Sethi 	return group;
473695093e3SVarun Sethi }
474695093e3SVarun Sethi 
475d5e58297SJoerg Roedel static struct iommu_group *fsl_pamu_device_group(struct device *dev)
476695093e3SVarun Sethi {
4773170447cSVarun Sethi 	struct iommu_group *group = ERR_PTR(-ENODEV);
478d5e58297SJoerg Roedel 	int len;
479695093e3SVarun Sethi 
480695093e3SVarun Sethi 	/*
481695093e3SVarun Sethi 	 * For platform devices we allocate a separate group for
482695093e3SVarun Sethi 	 * each of the devices.
483695093e3SVarun Sethi 	 */
484d5e58297SJoerg Roedel 	if (dev_is_pci(dev))
485d5e58297SJoerg Roedel 		group = get_pci_device_group(to_pci_dev(dev));
486d5e58297SJoerg Roedel 	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
487695093e3SVarun Sethi 		group = get_device_iommu_group(dev);
488d5e58297SJoerg Roedel 
489d5e58297SJoerg Roedel 	return group;
490695093e3SVarun Sethi }
491695093e3SVarun Sethi 
49252dd3ca4SJoerg Roedel static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
493d5e58297SJoerg Roedel {
49452dd3ca4SJoerg Roedel 	return &pamu_iommu;
495695093e3SVarun Sethi }
496695093e3SVarun Sethi 
49752dd3ca4SJoerg Roedel static void fsl_pamu_release_device(struct device *dev)
498695093e3SVarun Sethi {
499695093e3SVarun Sethi }
500695093e3SVarun Sethi 
501b22f6434SThierry Reding static const struct iommu_ops fsl_pamu_ops = {
502b7eb6785SJoerg Roedel 	.capable	= fsl_pamu_capable,
5038d4bfe40SJoerg Roedel 	.domain_alloc	= fsl_pamu_domain_alloc,
5048d4bfe40SJoerg Roedel 	.domain_free    = fsl_pamu_domain_free,
505695093e3SVarun Sethi 	.attach_dev	= fsl_pamu_attach_device,
506695093e3SVarun Sethi 	.detach_dev	= fsl_pamu_detach_device,
507695093e3SVarun Sethi 	.iova_to_phys	= fsl_pamu_iova_to_phys,
508695093e3SVarun Sethi 	.domain_set_attr = fsl_pamu_set_domain_attr,
50952dd3ca4SJoerg Roedel 	.probe_device	= fsl_pamu_probe_device,
51052dd3ca4SJoerg Roedel 	.release_device	= fsl_pamu_release_device,
511d5e58297SJoerg Roedel 	.device_group   = fsl_pamu_device_group,
512695093e3SVarun Sethi };
513695093e3SVarun Sethi 
514cd70d465SEmil Medve int __init pamu_domain_init(void)
515695093e3SVarun Sethi {
516695093e3SVarun Sethi 	int ret = 0;
517695093e3SVarun Sethi 
518695093e3SVarun Sethi 	ret = iommu_init_mempool();
519695093e3SVarun Sethi 	if (ret)
520695093e3SVarun Sethi 		return ret;
521695093e3SVarun Sethi 
5223ff2dcc0SJoerg Roedel 	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
5233ff2dcc0SJoerg Roedel 	if (ret)
5243ff2dcc0SJoerg Roedel 		return ret;
5253ff2dcc0SJoerg Roedel 
5263ff2dcc0SJoerg Roedel 	iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
5273ff2dcc0SJoerg Roedel 
5283ff2dcc0SJoerg Roedel 	ret = iommu_device_register(&pamu_iommu);
5293ff2dcc0SJoerg Roedel 	if (ret) {
5303ff2dcc0SJoerg Roedel 		iommu_device_sysfs_remove(&pamu_iommu);
5313ff2dcc0SJoerg Roedel 		pr_err("Can't register iommu device\n");
5323ff2dcc0SJoerg Roedel 		return ret;
5333ff2dcc0SJoerg Roedel 	}
5343ff2dcc0SJoerg Roedel 
535695093e3SVarun Sethi 	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
536695093e3SVarun Sethi 	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
537695093e3SVarun Sethi 
538695093e3SVarun Sethi 	return ret;
539695093e3SVarun Sethi }
540