1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2695093e3SVarun Sethi /*
3695093e3SVarun Sethi  *
4695093e3SVarun Sethi  * Copyright (C) 2013 Freescale Semiconductor, Inc.
5695093e3SVarun Sethi  * Author: Varun Sethi <varun.sethi@freescale.com>
6695093e3SVarun Sethi  */
7695093e3SVarun Sethi 
8695093e3SVarun Sethi #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9695093e3SVarun Sethi 
10695093e3SVarun Sethi #include "fsl_pamu_domain.h"
11695093e3SVarun Sethi 
12cd70d465SEmil Medve #include <sysdev/fsl_pci.h>
13cd70d465SEmil Medve 
14695093e3SVarun Sethi /*
15695093e3SVarun Sethi  * Global spinlock that needs to be held while
16695093e3SVarun Sethi  * configuring PAMU.
17695093e3SVarun Sethi  */
18695093e3SVarun Sethi static DEFINE_SPINLOCK(iommu_lock);
19695093e3SVarun Sethi 
20695093e3SVarun Sethi static struct kmem_cache *fsl_pamu_domain_cache;
21695093e3SVarun Sethi static struct kmem_cache *iommu_devinfo_cache;
22695093e3SVarun Sethi static DEFINE_SPINLOCK(device_domain_lock);
23695093e3SVarun Sethi 
243ff2dcc0SJoerg Roedel struct iommu_device pamu_iommu;	/* IOMMU core code handle */
253ff2dcc0SJoerg Roedel 
268d4bfe40SJoerg Roedel static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
278d4bfe40SJoerg Roedel {
288d4bfe40SJoerg Roedel 	return container_of(dom, struct fsl_dma_domain, iommu_domain);
298d4bfe40SJoerg Roedel }
308d4bfe40SJoerg Roedel 
31695093e3SVarun Sethi static int __init iommu_init_mempool(void)
32695093e3SVarun Sethi {
33695093e3SVarun Sethi 	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
34695093e3SVarun Sethi 						  sizeof(struct fsl_dma_domain),
35695093e3SVarun Sethi 						  0,
36695093e3SVarun Sethi 						  SLAB_HWCACHE_ALIGN,
37695093e3SVarun Sethi 						  NULL);
38695093e3SVarun Sethi 	if (!fsl_pamu_domain_cache) {
39695093e3SVarun Sethi 		pr_debug("Couldn't create fsl iommu_domain cache\n");
40695093e3SVarun Sethi 		return -ENOMEM;
41695093e3SVarun Sethi 	}
42695093e3SVarun Sethi 
43695093e3SVarun Sethi 	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
44695093e3SVarun Sethi 						sizeof(struct device_domain_info),
45695093e3SVarun Sethi 						0,
46695093e3SVarun Sethi 						SLAB_HWCACHE_ALIGN,
47695093e3SVarun Sethi 						NULL);
48695093e3SVarun Sethi 	if (!iommu_devinfo_cache) {
49695093e3SVarun Sethi 		pr_debug("Couldn't create devinfo cache\n");
50695093e3SVarun Sethi 		kmem_cache_destroy(fsl_pamu_domain_cache);
51695093e3SVarun Sethi 		return -ENOMEM;
52695093e3SVarun Sethi 	}
53695093e3SVarun Sethi 
54695093e3SVarun Sethi 	return 0;
55695093e3SVarun Sethi }
56695093e3SVarun Sethi 
57695093e3SVarun Sethi static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
58695093e3SVarun Sethi {
59695093e3SVarun Sethi 	u32 win_cnt = dma_domain->win_cnt;
60cd70d465SEmil Medve 	struct dma_window *win_ptr = &dma_domain->win_arr[0];
61695093e3SVarun Sethi 	struct iommu_domain_geometry *geom;
62695093e3SVarun Sethi 
638d4bfe40SJoerg Roedel 	geom = &dma_domain->iommu_domain.geometry;
64695093e3SVarun Sethi 
65*f7641bb7SChristoph Hellwig 	if (!win_cnt) {
66695093e3SVarun Sethi 		pr_debug("Number of windows/geometry not configured for the domain\n");
67695093e3SVarun Sethi 		return 0;
68695093e3SVarun Sethi 	}
69695093e3SVarun Sethi 
70695093e3SVarun Sethi 	if (win_cnt > 1) {
71695093e3SVarun Sethi 		u64 subwin_size;
72695093e3SVarun Sethi 		dma_addr_t subwin_iova;
73695093e3SVarun Sethi 		u32 wnd;
74695093e3SVarun Sethi 
75*f7641bb7SChristoph Hellwig 		subwin_size = (geom->aperture_end + 1) >> ilog2(win_cnt);
76695093e3SVarun Sethi 		subwin_iova = iova & ~(subwin_size - 1);
77695093e3SVarun Sethi 		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
78695093e3SVarun Sethi 		win_ptr = &dma_domain->win_arr[wnd];
79695093e3SVarun Sethi 	}
80695093e3SVarun Sethi 
81695093e3SVarun Sethi 	if (win_ptr->valid)
82cd70d465SEmil Medve 		return win_ptr->paddr + (iova & (win_ptr->size - 1));
83695093e3SVarun Sethi 
84695093e3SVarun Sethi 	return 0;
85695093e3SVarun Sethi }
86695093e3SVarun Sethi 
87695093e3SVarun Sethi static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
88695093e3SVarun Sethi {
89cd70d465SEmil Medve 	struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
90695093e3SVarun Sethi 	int i, ret;
91695093e3SVarun Sethi 	unsigned long rpn, flags;
92695093e3SVarun Sethi 
93695093e3SVarun Sethi 	for (i = 0; i < dma_domain->win_cnt; i++) {
94695093e3SVarun Sethi 		if (sub_win_ptr[i].valid) {
95cd70d465SEmil Medve 			rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
96695093e3SVarun Sethi 			spin_lock_irqsave(&iommu_lock, flags);
97695093e3SVarun Sethi 			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
98695093e3SVarun Sethi 						 sub_win_ptr[i].size,
99695093e3SVarun Sethi 						 ~(u32)0,
100695093e3SVarun Sethi 						 rpn,
101695093e3SVarun Sethi 						 dma_domain->snoop_id,
102695093e3SVarun Sethi 						 dma_domain->stash_id,
103695093e3SVarun Sethi 						 (i > 0) ? 1 : 0,
104695093e3SVarun Sethi 						 sub_win_ptr[i].prot);
105695093e3SVarun Sethi 			spin_unlock_irqrestore(&iommu_lock, flags);
106695093e3SVarun Sethi 			if (ret) {
107cd70d465SEmil Medve 				pr_debug("SPAACE configuration failed for liodn %d\n",
108695093e3SVarun Sethi 					 liodn);
109695093e3SVarun Sethi 				return ret;
110695093e3SVarun Sethi 			}
111695093e3SVarun Sethi 		}
112695093e3SVarun Sethi 	}
113695093e3SVarun Sethi 
114695093e3SVarun Sethi 	return ret;
115695093e3SVarun Sethi }
116695093e3SVarun Sethi 
117695093e3SVarun Sethi static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
118695093e3SVarun Sethi {
119695093e3SVarun Sethi 	int ret;
120695093e3SVarun Sethi 	struct dma_window *wnd = &dma_domain->win_arr[0];
1218d4bfe40SJoerg Roedel 	phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
122695093e3SVarun Sethi 	unsigned long flags;
123695093e3SVarun Sethi 
124695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
125695093e3SVarun Sethi 	ret = pamu_config_ppaace(liodn, wnd_addr,
126695093e3SVarun Sethi 				 wnd->size,
127695093e3SVarun Sethi 				 ~(u32)0,
128695093e3SVarun Sethi 				 wnd->paddr >> PAMU_PAGE_SHIFT,
129695093e3SVarun Sethi 				 dma_domain->snoop_id, dma_domain->stash_id,
130695093e3SVarun Sethi 				 0, wnd->prot);
131695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
132695093e3SVarun Sethi 	if (ret)
133cd70d465SEmil Medve 		pr_debug("PAACE configuration failed for liodn %d\n", liodn);
134695093e3SVarun Sethi 
135695093e3SVarun Sethi 	return ret;
136695093e3SVarun Sethi }
137695093e3SVarun Sethi 
138695093e3SVarun Sethi /* Map the DMA window corresponding to the LIODN */
139695093e3SVarun Sethi static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
140695093e3SVarun Sethi {
141695093e3SVarun Sethi 	if (dma_domain->win_cnt > 1)
142695093e3SVarun Sethi 		return map_subwins(liodn, dma_domain);
143695093e3SVarun Sethi 	else
144695093e3SVarun Sethi 		return map_win(liodn, dma_domain);
145695093e3SVarun Sethi }
146695093e3SVarun Sethi 
147695093e3SVarun Sethi /* Update window/subwindow mapping for the LIODN */
148695093e3SVarun Sethi static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
149695093e3SVarun Sethi {
150695093e3SVarun Sethi 	int ret;
151695093e3SVarun Sethi 	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
152695093e3SVarun Sethi 	unsigned long flags;
153695093e3SVarun Sethi 
154695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
155695093e3SVarun Sethi 	if (dma_domain->win_cnt > 1) {
156695093e3SVarun Sethi 		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
157695093e3SVarun Sethi 					 wnd->size,
158695093e3SVarun Sethi 					 ~(u32)0,
159695093e3SVarun Sethi 					 wnd->paddr >> PAMU_PAGE_SHIFT,
160695093e3SVarun Sethi 					 dma_domain->snoop_id,
161695093e3SVarun Sethi 					 dma_domain->stash_id,
162695093e3SVarun Sethi 					 (wnd_nr > 0) ? 1 : 0,
163695093e3SVarun Sethi 					 wnd->prot);
164695093e3SVarun Sethi 		if (ret)
165cd70d465SEmil Medve 			pr_debug("Subwindow reconfiguration failed for liodn %d\n",
166cd70d465SEmil Medve 				 liodn);
167695093e3SVarun Sethi 	} else {
168695093e3SVarun Sethi 		phys_addr_t wnd_addr;
169695093e3SVarun Sethi 
1708d4bfe40SJoerg Roedel 		wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
171695093e3SVarun Sethi 
172695093e3SVarun Sethi 		ret = pamu_config_ppaace(liodn, wnd_addr,
173695093e3SVarun Sethi 					 wnd->size,
174695093e3SVarun Sethi 					 ~(u32)0,
175695093e3SVarun Sethi 					 wnd->paddr >> PAMU_PAGE_SHIFT,
176695093e3SVarun Sethi 					 dma_domain->snoop_id, dma_domain->stash_id,
177695093e3SVarun Sethi 					 0, wnd->prot);
178695093e3SVarun Sethi 		if (ret)
179cd70d465SEmil Medve 			pr_debug("Window reconfiguration failed for liodn %d\n",
180cd70d465SEmil Medve 				 liodn);
181695093e3SVarun Sethi 	}
182695093e3SVarun Sethi 
183695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
184695093e3SVarun Sethi 
185695093e3SVarun Sethi 	return ret;
186695093e3SVarun Sethi }
187695093e3SVarun Sethi 
188695093e3SVarun Sethi static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
189695093e3SVarun Sethi 			      u32 val)
190695093e3SVarun Sethi {
191695093e3SVarun Sethi 	int ret = 0, i;
192695093e3SVarun Sethi 	unsigned long flags;
193695093e3SVarun Sethi 
194695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
195695093e3SVarun Sethi 	if (!dma_domain->win_arr) {
196cd70d465SEmil Medve 		pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
197cd70d465SEmil Medve 			 liodn);
198695093e3SVarun Sethi 		spin_unlock_irqrestore(&iommu_lock, flags);
199695093e3SVarun Sethi 		return -EINVAL;
200695093e3SVarun Sethi 	}
201695093e3SVarun Sethi 
202695093e3SVarun Sethi 	for (i = 0; i < dma_domain->win_cnt; i++) {
203695093e3SVarun Sethi 		ret = pamu_update_paace_stash(liodn, i, val);
204695093e3SVarun Sethi 		if (ret) {
205cd70d465SEmil Medve 			pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
206cd70d465SEmil Medve 				 i, liodn);
207695093e3SVarun Sethi 			spin_unlock_irqrestore(&iommu_lock, flags);
208695093e3SVarun Sethi 			return ret;
209695093e3SVarun Sethi 		}
210695093e3SVarun Sethi 	}
211695093e3SVarun Sethi 
212695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
213695093e3SVarun Sethi 
214695093e3SVarun Sethi 	return ret;
215695093e3SVarun Sethi }
216695093e3SVarun Sethi 
217695093e3SVarun Sethi /* Set the geometry parameters for a LIODN */
218695093e3SVarun Sethi static int pamu_set_liodn(int liodn, struct device *dev,
219695093e3SVarun Sethi 			  struct fsl_dma_domain *dma_domain,
220695093e3SVarun Sethi 			  struct iommu_domain_geometry *geom_attr,
221695093e3SVarun Sethi 			  u32 win_cnt)
222695093e3SVarun Sethi {
223695093e3SVarun Sethi 	phys_addr_t window_addr, window_size;
224695093e3SVarun Sethi 	phys_addr_t subwin_size;
225695093e3SVarun Sethi 	int ret = 0, i;
226695093e3SVarun Sethi 	u32 omi_index = ~(u32)0;
227695093e3SVarun Sethi 	unsigned long flags;
228695093e3SVarun Sethi 
229695093e3SVarun Sethi 	/*
230695093e3SVarun Sethi 	 * Configure the omi_index at the geometry setup time.
231695093e3SVarun Sethi 	 * This is a static value which depends on the type of
232695093e3SVarun Sethi 	 * device and would not change thereafter.
233695093e3SVarun Sethi 	 */
234695093e3SVarun Sethi 	get_ome_index(&omi_index, dev);
235695093e3SVarun Sethi 
236695093e3SVarun Sethi 	window_addr = geom_attr->aperture_start;
237*f7641bb7SChristoph Hellwig 	window_size = geom_attr->aperture_end + 1;
238695093e3SVarun Sethi 
239695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
240695093e3SVarun Sethi 	ret = pamu_disable_liodn(liodn);
241695093e3SVarun Sethi 	if (!ret)
242695093e3SVarun Sethi 		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
243695093e3SVarun Sethi 					 0, dma_domain->snoop_id,
244695093e3SVarun Sethi 					 dma_domain->stash_id, win_cnt, 0);
245695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
246695093e3SVarun Sethi 	if (ret) {
247cd70d465SEmil Medve 		pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
248cd70d465SEmil Medve 			 liodn, win_cnt);
249695093e3SVarun Sethi 		return ret;
250695093e3SVarun Sethi 	}
251695093e3SVarun Sethi 
252695093e3SVarun Sethi 	if (win_cnt > 1) {
253695093e3SVarun Sethi 		subwin_size = window_size >> ilog2(win_cnt);
254695093e3SVarun Sethi 		for (i = 0; i < win_cnt; i++) {
255695093e3SVarun Sethi 			spin_lock_irqsave(&iommu_lock, flags);
256695093e3SVarun Sethi 			ret = pamu_disable_spaace(liodn, i);
257695093e3SVarun Sethi 			if (!ret)
258695093e3SVarun Sethi 				ret = pamu_config_spaace(liodn, win_cnt, i,
259695093e3SVarun Sethi 							 subwin_size, omi_index,
260695093e3SVarun Sethi 							 0, dma_domain->snoop_id,
261695093e3SVarun Sethi 							 dma_domain->stash_id,
262695093e3SVarun Sethi 							 0, 0);
263695093e3SVarun Sethi 			spin_unlock_irqrestore(&iommu_lock, flags);
264695093e3SVarun Sethi 			if (ret) {
265cd70d465SEmil Medve 				pr_debug("SPAACE configuration failed for liodn %d\n",
266cd70d465SEmil Medve 					 liodn);
267695093e3SVarun Sethi 				return ret;
268695093e3SVarun Sethi 			}
269695093e3SVarun Sethi 		}
270695093e3SVarun Sethi 	}
271695093e3SVarun Sethi 
272695093e3SVarun Sethi 	return ret;
273695093e3SVarun Sethi }
274695093e3SVarun Sethi 
275695093e3SVarun Sethi static int check_size(u64 size, dma_addr_t iova)
276695093e3SVarun Sethi {
277695093e3SVarun Sethi 	/*
278695093e3SVarun Sethi 	 * Size must be a power of two and at least be equal
279695093e3SVarun Sethi 	 * to PAMU page size.
280695093e3SVarun Sethi 	 */
281d033f48fSVarun Sethi 	if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
282cd70d465SEmil Medve 		pr_debug("Size too small or not a power of two\n");
283695093e3SVarun Sethi 		return -EINVAL;
284695093e3SVarun Sethi 	}
285695093e3SVarun Sethi 
286695093e3SVarun Sethi 	/* iova must be page size aligned */
287695093e3SVarun Sethi 	if (iova & (size - 1)) {
288cd70d465SEmil Medve 		pr_debug("Address is not aligned with window size\n");
289695093e3SVarun Sethi 		return -EINVAL;
290695093e3SVarun Sethi 	}
291695093e3SVarun Sethi 
292695093e3SVarun Sethi 	return 0;
293695093e3SVarun Sethi }
294695093e3SVarun Sethi 
295695093e3SVarun Sethi static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
296695093e3SVarun Sethi {
297695093e3SVarun Sethi 	struct fsl_dma_domain *domain;
298695093e3SVarun Sethi 
299695093e3SVarun Sethi 	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
300695093e3SVarun Sethi 	if (!domain)
301695093e3SVarun Sethi 		return NULL;
302695093e3SVarun Sethi 
303695093e3SVarun Sethi 	domain->stash_id = ~(u32)0;
304695093e3SVarun Sethi 	domain->snoop_id = ~(u32)0;
305695093e3SVarun Sethi 	domain->win_cnt = pamu_get_max_subwin_cnt();
306695093e3SVarun Sethi 
307695093e3SVarun Sethi 	INIT_LIST_HEAD(&domain->devices);
308695093e3SVarun Sethi 
309695093e3SVarun Sethi 	spin_lock_init(&domain->domain_lock);
310695093e3SVarun Sethi 
311695093e3SVarun Sethi 	return domain;
312695093e3SVarun Sethi }
313695093e3SVarun Sethi 
314695093e3SVarun Sethi static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
315695093e3SVarun Sethi {
316695093e3SVarun Sethi 	unsigned long flags;
317695093e3SVarun Sethi 
318695093e3SVarun Sethi 	list_del(&info->link);
319695093e3SVarun Sethi 	spin_lock_irqsave(&iommu_lock, flags);
320695093e3SVarun Sethi 	if (win_cnt > 1)
321695093e3SVarun Sethi 		pamu_free_subwins(info->liodn);
322695093e3SVarun Sethi 	pamu_disable_liodn(info->liodn);
323695093e3SVarun Sethi 	spin_unlock_irqrestore(&iommu_lock, flags);
324695093e3SVarun Sethi 	spin_lock_irqsave(&device_domain_lock, flags);
3252263d818SJoerg Roedel 	dev_iommu_priv_set(info->dev, NULL);
326695093e3SVarun Sethi 	kmem_cache_free(iommu_devinfo_cache, info);
327695093e3SVarun Sethi 	spin_unlock_irqrestore(&device_domain_lock, flags);
328695093e3SVarun Sethi }
329695093e3SVarun Sethi 
330695093e3SVarun Sethi static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
331695093e3SVarun Sethi {
332695093e3SVarun Sethi 	struct device_domain_info *info, *tmp;
333695093e3SVarun Sethi 	unsigned long flags;
334695093e3SVarun Sethi 
335695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
336695093e3SVarun Sethi 	/* Remove the device from the domain device list */
337695093e3SVarun Sethi 	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
338695093e3SVarun Sethi 		if (!dev || (info->dev == dev))
339695093e3SVarun Sethi 			remove_device_ref(info, dma_domain->win_cnt);
340695093e3SVarun Sethi 	}
341695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
342695093e3SVarun Sethi }
343695093e3SVarun Sethi 
344695093e3SVarun Sethi static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
345695093e3SVarun Sethi {
346695093e3SVarun Sethi 	struct device_domain_info *info, *old_domain_info;
347695093e3SVarun Sethi 	unsigned long flags;
348695093e3SVarun Sethi 
349695093e3SVarun Sethi 	spin_lock_irqsave(&device_domain_lock, flags);
350695093e3SVarun Sethi 	/*
351695093e3SVarun Sethi 	 * Check here if the device is already attached to domain or not.
352695093e3SVarun Sethi 	 * If the device is already attached to a domain detach it.
353695093e3SVarun Sethi 	 */
3542263d818SJoerg Roedel 	old_domain_info = dev_iommu_priv_get(dev);
355695093e3SVarun Sethi 	if (old_domain_info && old_domain_info->domain != dma_domain) {
356695093e3SVarun Sethi 		spin_unlock_irqrestore(&device_domain_lock, flags);
357695093e3SVarun Sethi 		detach_device(dev, old_domain_info->domain);
358695093e3SVarun Sethi 		spin_lock_irqsave(&device_domain_lock, flags);
359695093e3SVarun Sethi 	}
360695093e3SVarun Sethi 
361695093e3SVarun Sethi 	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
362695093e3SVarun Sethi 
363695093e3SVarun Sethi 	info->dev = dev;
364695093e3SVarun Sethi 	info->liodn = liodn;
365695093e3SVarun Sethi 	info->domain = dma_domain;
366695093e3SVarun Sethi 
367695093e3SVarun Sethi 	list_add(&info->link, &dma_domain->devices);
368695093e3SVarun Sethi 	/*
369695093e3SVarun Sethi 	 * In case of devices with multiple LIODNs just store
370695093e3SVarun Sethi 	 * the info for the first LIODN as all
371695093e3SVarun Sethi 	 * LIODNs share the same domain
372695093e3SVarun Sethi 	 */
3732263d818SJoerg Roedel 	if (!dev_iommu_priv_get(dev))
3742263d818SJoerg Roedel 		dev_iommu_priv_set(dev, info);
375695093e3SVarun Sethi 	spin_unlock_irqrestore(&device_domain_lock, flags);
376695093e3SVarun Sethi }
377695093e3SVarun Sethi 
378695093e3SVarun Sethi static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
379695093e3SVarun Sethi 					 dma_addr_t iova)
380695093e3SVarun Sethi {
3818d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
382695093e3SVarun Sethi 
383cd70d465SEmil Medve 	if (iova < domain->geometry.aperture_start ||
384cd70d465SEmil Medve 	    iova > domain->geometry.aperture_end)
385695093e3SVarun Sethi 		return 0;
386695093e3SVarun Sethi 
387695093e3SVarun Sethi 	return get_phys_addr(dma_domain, iova);
388695093e3SVarun Sethi }
389695093e3SVarun Sethi 
390b7eb6785SJoerg Roedel static bool fsl_pamu_capable(enum iommu_cap cap)
391695093e3SVarun Sethi {
392695093e3SVarun Sethi 	return cap == IOMMU_CAP_CACHE_COHERENCY;
393695093e3SVarun Sethi }
394695093e3SVarun Sethi 
3958d4bfe40SJoerg Roedel static void fsl_pamu_domain_free(struct iommu_domain *domain)
396695093e3SVarun Sethi {
3978d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
398695093e3SVarun Sethi 
399695093e3SVarun Sethi 	/* remove all the devices from the device list */
400695093e3SVarun Sethi 	detach_device(NULL, dma_domain);
401695093e3SVarun Sethi 
402695093e3SVarun Sethi 	dma_domain->enabled = 0;
403695093e3SVarun Sethi 	dma_domain->mapped = 0;
404695093e3SVarun Sethi 
405695093e3SVarun Sethi 	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
406695093e3SVarun Sethi }
407695093e3SVarun Sethi 
4088d4bfe40SJoerg Roedel static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
409695093e3SVarun Sethi {
410695093e3SVarun Sethi 	struct fsl_dma_domain *dma_domain;
411695093e3SVarun Sethi 
4128d4bfe40SJoerg Roedel 	if (type != IOMMU_DOMAIN_UNMANAGED)
4138d4bfe40SJoerg Roedel 		return NULL;
4148d4bfe40SJoerg Roedel 
415695093e3SVarun Sethi 	dma_domain = iommu_alloc_dma_domain();
416695093e3SVarun Sethi 	if (!dma_domain) {
417695093e3SVarun Sethi 		pr_debug("dma_domain allocation failed\n");
4188d4bfe40SJoerg Roedel 		return NULL;
419695093e3SVarun Sethi 	}
420695093e3SVarun Sethi 	/* defaul geometry 64 GB i.e. maximum system address */
4218d4bfe40SJoerg Roedel 	dma_domain->iommu_domain. geometry.aperture_start = 0;
4228d4bfe40SJoerg Roedel 	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
4238d4bfe40SJoerg Roedel 	dma_domain->iommu_domain.geometry.force_aperture = true;
424695093e3SVarun Sethi 
4258d4bfe40SJoerg Roedel 	return &dma_domain->iommu_domain;
426695093e3SVarun Sethi }
427695093e3SVarun Sethi 
428695093e3SVarun Sethi /* Configure geometry settings for all LIODNs associated with domain */
429695093e3SVarun Sethi static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
430695093e3SVarun Sethi 				    struct iommu_domain_geometry *geom_attr,
431695093e3SVarun Sethi 				    u32 win_cnt)
432695093e3SVarun Sethi {
433695093e3SVarun Sethi 	struct device_domain_info *info;
434695093e3SVarun Sethi 	int ret = 0;
435695093e3SVarun Sethi 
436695093e3SVarun Sethi 	list_for_each_entry(info, &dma_domain->devices, link) {
437695093e3SVarun Sethi 		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
438695093e3SVarun Sethi 				     geom_attr, win_cnt);
439695093e3SVarun Sethi 		if (ret)
440695093e3SVarun Sethi 			break;
441695093e3SVarun Sethi 	}
442695093e3SVarun Sethi 
443695093e3SVarun Sethi 	return ret;
444695093e3SVarun Sethi }
445695093e3SVarun Sethi 
446695093e3SVarun Sethi /* Update stash destination for all LIODNs associated with the domain */
447695093e3SVarun Sethi static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
448695093e3SVarun Sethi {
449695093e3SVarun Sethi 	struct device_domain_info *info;
450695093e3SVarun Sethi 	int ret = 0;
451695093e3SVarun Sethi 
452695093e3SVarun Sethi 	list_for_each_entry(info, &dma_domain->devices, link) {
453695093e3SVarun Sethi 		ret = update_liodn_stash(info->liodn, dma_domain, val);
454695093e3SVarun Sethi 		if (ret)
455695093e3SVarun Sethi 			break;
456695093e3SVarun Sethi 	}
457695093e3SVarun Sethi 
458695093e3SVarun Sethi 	return ret;
459695093e3SVarun Sethi }
460695093e3SVarun Sethi 
461695093e3SVarun Sethi /* Update domain mappings for all LIODNs associated with the domain */
462695093e3SVarun Sethi static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
463695093e3SVarun Sethi {
464695093e3SVarun Sethi 	struct device_domain_info *info;
465695093e3SVarun Sethi 	int ret = 0;
466695093e3SVarun Sethi 
467695093e3SVarun Sethi 	list_for_each_entry(info, &dma_domain->devices, link) {
468695093e3SVarun Sethi 		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
469695093e3SVarun Sethi 		if (ret)
470695093e3SVarun Sethi 			break;
471695093e3SVarun Sethi 	}
472695093e3SVarun Sethi 	return ret;
473695093e3SVarun Sethi }
474695093e3SVarun Sethi 
475695093e3SVarun Sethi 
476695093e3SVarun Sethi static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
477695093e3SVarun Sethi 				  phys_addr_t paddr, u64 size, int prot)
478695093e3SVarun Sethi {
4798d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
480695093e3SVarun Sethi 	struct dma_window *wnd;
481695093e3SVarun Sethi 	int pamu_prot = 0;
482695093e3SVarun Sethi 	int ret;
483695093e3SVarun Sethi 	unsigned long flags;
484695093e3SVarun Sethi 	u64 win_size;
485695093e3SVarun Sethi 
486695093e3SVarun Sethi 	if (prot & IOMMU_READ)
487695093e3SVarun Sethi 		pamu_prot |= PAACE_AP_PERMS_QUERY;
488695093e3SVarun Sethi 	if (prot & IOMMU_WRITE)
489695093e3SVarun Sethi 		pamu_prot |= PAACE_AP_PERMS_UPDATE;
490695093e3SVarun Sethi 
491695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
492695093e3SVarun Sethi 	if (!dma_domain->win_arr) {
493695093e3SVarun Sethi 		pr_debug("Number of windows not configured\n");
494695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
495695093e3SVarun Sethi 		return -ENODEV;
496695093e3SVarun Sethi 	}
497695093e3SVarun Sethi 
498695093e3SVarun Sethi 	if (wnd_nr >= dma_domain->win_cnt) {
499695093e3SVarun Sethi 		pr_debug("Invalid window index\n");
500695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
501695093e3SVarun Sethi 		return -EINVAL;
502695093e3SVarun Sethi 	}
503695093e3SVarun Sethi 
504*f7641bb7SChristoph Hellwig 	win_size = (domain->geometry.aperture_end + 1) >>
505*f7641bb7SChristoph Hellwig 			ilog2(dma_domain->win_cnt);
506695093e3SVarun Sethi 	if (size > win_size) {
507695093e3SVarun Sethi 		pr_debug("Invalid window size\n");
508695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
509695093e3SVarun Sethi 		return -EINVAL;
510695093e3SVarun Sethi 	}
511695093e3SVarun Sethi 
512695093e3SVarun Sethi 	if (dma_domain->win_cnt == 1) {
513695093e3SVarun Sethi 		if (dma_domain->enabled) {
514695093e3SVarun Sethi 			pr_debug("Disable the window before updating the mapping\n");
515695093e3SVarun Sethi 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
516695093e3SVarun Sethi 			return -EBUSY;
517695093e3SVarun Sethi 		}
518695093e3SVarun Sethi 
519695093e3SVarun Sethi 		ret = check_size(size, domain->geometry.aperture_start);
520695093e3SVarun Sethi 		if (ret) {
521695093e3SVarun Sethi 			pr_debug("Aperture start not aligned to the size\n");
522695093e3SVarun Sethi 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
523695093e3SVarun Sethi 			return -EINVAL;
524695093e3SVarun Sethi 		}
525695093e3SVarun Sethi 	}
526695093e3SVarun Sethi 
527695093e3SVarun Sethi 	wnd = &dma_domain->win_arr[wnd_nr];
528695093e3SVarun Sethi 	if (!wnd->valid) {
529695093e3SVarun Sethi 		wnd->paddr = paddr;
530695093e3SVarun Sethi 		wnd->size = size;
531695093e3SVarun Sethi 		wnd->prot = pamu_prot;
532695093e3SVarun Sethi 
533695093e3SVarun Sethi 		ret = update_domain_mapping(dma_domain, wnd_nr);
534695093e3SVarun Sethi 		if (!ret) {
535695093e3SVarun Sethi 			wnd->valid = 1;
536695093e3SVarun Sethi 			dma_domain->mapped++;
537695093e3SVarun Sethi 		}
538695093e3SVarun Sethi 	} else {
539695093e3SVarun Sethi 		pr_debug("Disable the window before updating the mapping\n");
540695093e3SVarun Sethi 		ret = -EBUSY;
541695093e3SVarun Sethi 	}
542695093e3SVarun Sethi 
543695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
544695093e3SVarun Sethi 
545695093e3SVarun Sethi 	return ret;
546695093e3SVarun Sethi }
547695093e3SVarun Sethi 
548695093e3SVarun Sethi /*
549695093e3SVarun Sethi  * Attach the LIODN to the DMA domain and configure the geometry
550695093e3SVarun Sethi  * and window mappings.
551695093e3SVarun Sethi  */
552695093e3SVarun Sethi static int handle_attach_device(struct fsl_dma_domain *dma_domain,
553695093e3SVarun Sethi 				struct device *dev, const u32 *liodn,
554695093e3SVarun Sethi 				int num)
555695093e3SVarun Sethi {
556695093e3SVarun Sethi 	unsigned long flags;
5578d4bfe40SJoerg Roedel 	struct iommu_domain *domain = &dma_domain->iommu_domain;
558695093e3SVarun Sethi 	int ret = 0;
559695093e3SVarun Sethi 	int i;
560695093e3SVarun Sethi 
561695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
562695093e3SVarun Sethi 	for (i = 0; i < num; i++) {
563695093e3SVarun Sethi 		/* Ensure that LIODN value is valid */
564695093e3SVarun Sethi 		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
5656bd4f1c7SRob Herring 			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
5666bd4f1c7SRob Herring 				 liodn[i], dev->of_node);
567695093e3SVarun Sethi 			ret = -EINVAL;
568695093e3SVarun Sethi 			break;
569695093e3SVarun Sethi 		}
570695093e3SVarun Sethi 
571695093e3SVarun Sethi 		attach_device(dma_domain, liodn[i], dev);
572695093e3SVarun Sethi 		/*
573695093e3SVarun Sethi 		 * Check if geometry has already been configured
574695093e3SVarun Sethi 		 * for the domain. If yes, set the geometry for
575695093e3SVarun Sethi 		 * the LIODN.
576695093e3SVarun Sethi 		 */
577695093e3SVarun Sethi 		if (dma_domain->win_arr) {
578695093e3SVarun Sethi 			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
579cd70d465SEmil Medve 
580695093e3SVarun Sethi 			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
581cd70d465SEmil Medve 					     &domain->geometry, win_cnt);
582695093e3SVarun Sethi 			if (ret)
583695093e3SVarun Sethi 				break;
584695093e3SVarun Sethi 			if (dma_domain->mapped) {
585695093e3SVarun Sethi 				/*
586695093e3SVarun Sethi 				 * Create window/subwindow mapping for
587695093e3SVarun Sethi 				 * the LIODN.
588695093e3SVarun Sethi 				 */
589695093e3SVarun Sethi 				ret = map_liodn(liodn[i], dma_domain);
590695093e3SVarun Sethi 				if (ret)
591695093e3SVarun Sethi 					break;
592695093e3SVarun Sethi 			}
593695093e3SVarun Sethi 		}
594695093e3SVarun Sethi 	}
595695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
596695093e3SVarun Sethi 
597695093e3SVarun Sethi 	return ret;
598695093e3SVarun Sethi }
599695093e3SVarun Sethi 
600695093e3SVarun Sethi static int fsl_pamu_attach_device(struct iommu_domain *domain,
601695093e3SVarun Sethi 				  struct device *dev)
602695093e3SVarun Sethi {
6038d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
604695093e3SVarun Sethi 	const u32 *liodn;
605695093e3SVarun Sethi 	u32 liodn_cnt;
606695093e3SVarun Sethi 	int len, ret = 0;
607695093e3SVarun Sethi 	struct pci_dev *pdev = NULL;
608695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
609695093e3SVarun Sethi 
610695093e3SVarun Sethi 	/*
611695093e3SVarun Sethi 	 * Use LIODN of the PCI controller while attaching a
612695093e3SVarun Sethi 	 * PCI device.
613695093e3SVarun Sethi 	 */
614b3eb76d1SYijing Wang 	if (dev_is_pci(dev)) {
615695093e3SVarun Sethi 		pdev = to_pci_dev(dev);
616695093e3SVarun Sethi 		pci_ctl = pci_bus_to_host(pdev->bus);
617695093e3SVarun Sethi 		/*
618695093e3SVarun Sethi 		 * make dev point to pci controller device
619695093e3SVarun Sethi 		 * so we can get the LIODN programmed by
620695093e3SVarun Sethi 		 * u-boot.
621695093e3SVarun Sethi 		 */
622695093e3SVarun Sethi 		dev = pci_ctl->parent;
623695093e3SVarun Sethi 	}
624695093e3SVarun Sethi 
625695093e3SVarun Sethi 	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
626695093e3SVarun Sethi 	if (liodn) {
627695093e3SVarun Sethi 		liodn_cnt = len / sizeof(u32);
628cd70d465SEmil Medve 		ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
629695093e3SVarun Sethi 	} else {
6306bd4f1c7SRob Herring 		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
631695093e3SVarun Sethi 		ret = -EINVAL;
632695093e3SVarun Sethi 	}
633695093e3SVarun Sethi 
634695093e3SVarun Sethi 	return ret;
635695093e3SVarun Sethi }
636695093e3SVarun Sethi 
637695093e3SVarun Sethi static void fsl_pamu_detach_device(struct iommu_domain *domain,
638695093e3SVarun Sethi 				   struct device *dev)
639695093e3SVarun Sethi {
6408d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
641695093e3SVarun Sethi 	const u32 *prop;
642695093e3SVarun Sethi 	int len;
643695093e3SVarun Sethi 	struct pci_dev *pdev = NULL;
644695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
645695093e3SVarun Sethi 
646695093e3SVarun Sethi 	/*
647695093e3SVarun Sethi 	 * Use LIODN of the PCI controller while detaching a
648695093e3SVarun Sethi 	 * PCI device.
649695093e3SVarun Sethi 	 */
650b3eb76d1SYijing Wang 	if (dev_is_pci(dev)) {
651695093e3SVarun Sethi 		pdev = to_pci_dev(dev);
652695093e3SVarun Sethi 		pci_ctl = pci_bus_to_host(pdev->bus);
653695093e3SVarun Sethi 		/*
654695093e3SVarun Sethi 		 * make dev point to pci controller device
655695093e3SVarun Sethi 		 * so we can get the LIODN programmed by
656695093e3SVarun Sethi 		 * u-boot.
657695093e3SVarun Sethi 		 */
658695093e3SVarun Sethi 		dev = pci_ctl->parent;
659695093e3SVarun Sethi 	}
660695093e3SVarun Sethi 
661695093e3SVarun Sethi 	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
662695093e3SVarun Sethi 	if (prop)
663695093e3SVarun Sethi 		detach_device(dev, dma_domain);
664695093e3SVarun Sethi 	else
6656bd4f1c7SRob Herring 		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
666695093e3SVarun Sethi }
667695093e3SVarun Sethi 
668695093e3SVarun Sethi /* Set the domain stash attribute */
669695093e3SVarun Sethi static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
670695093e3SVarun Sethi {
671695093e3SVarun Sethi 	struct pamu_stash_attribute *stash_attr = data;
672695093e3SVarun Sethi 	unsigned long flags;
673695093e3SVarun Sethi 	int ret;
674695093e3SVarun Sethi 
675695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
676695093e3SVarun Sethi 
677695093e3SVarun Sethi 	memcpy(&dma_domain->dma_stash, stash_attr,
678695093e3SVarun Sethi 	       sizeof(struct pamu_stash_attribute));
679695093e3SVarun Sethi 
680695093e3SVarun Sethi 	dma_domain->stash_id = get_stash_id(stash_attr->cache,
681695093e3SVarun Sethi 					    stash_attr->cpu);
682695093e3SVarun Sethi 	if (dma_domain->stash_id == ~(u32)0) {
683695093e3SVarun Sethi 		pr_debug("Invalid stash attributes\n");
684695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
685695093e3SVarun Sethi 		return -EINVAL;
686695093e3SVarun Sethi 	}
687695093e3SVarun Sethi 
688695093e3SVarun Sethi 	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
689695093e3SVarun Sethi 
690695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
691695093e3SVarun Sethi 
692695093e3SVarun Sethi 	return ret;
693695093e3SVarun Sethi }
694695093e3SVarun Sethi 
695695093e3SVarun Sethi /* Configure domain dma state i.e. enable/disable DMA */
696695093e3SVarun Sethi static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
697695093e3SVarun Sethi {
698695093e3SVarun Sethi 	struct device_domain_info *info;
699695093e3SVarun Sethi 	unsigned long flags;
700695093e3SVarun Sethi 	int ret;
701695093e3SVarun Sethi 
702695093e3SVarun Sethi 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
703695093e3SVarun Sethi 
704695093e3SVarun Sethi 	if (enable && !dma_domain->mapped) {
705695093e3SVarun Sethi 		pr_debug("Can't enable DMA domain without valid mapping\n");
706695093e3SVarun Sethi 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
707695093e3SVarun Sethi 		return -ENODEV;
708695093e3SVarun Sethi 	}
709695093e3SVarun Sethi 
710695093e3SVarun Sethi 	dma_domain->enabled = enable;
711cd70d465SEmil Medve 	list_for_each_entry(info, &dma_domain->devices, link) {
712695093e3SVarun Sethi 		ret = (enable) ? pamu_enable_liodn(info->liodn) :
713695093e3SVarun Sethi 			pamu_disable_liodn(info->liodn);
714695093e3SVarun Sethi 		if (ret)
715695093e3SVarun Sethi 			pr_debug("Unable to set dma state for liodn %d",
716695093e3SVarun Sethi 				 info->liodn);
717695093e3SVarun Sethi 	}
718695093e3SVarun Sethi 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
719695093e3SVarun Sethi 
720695093e3SVarun Sethi 	return 0;
721695093e3SVarun Sethi }
722695093e3SVarun Sethi 
7235131e08cSRobin Murphy static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
7245131e08cSRobin Murphy {
7255131e08cSRobin Murphy 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
7265131e08cSRobin Murphy 	unsigned long flags;
7275131e08cSRobin Murphy 	int ret;
7285131e08cSRobin Murphy 
7295131e08cSRobin Murphy 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
7305131e08cSRobin Murphy 	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
7315131e08cSRobin Murphy 	if (dma_domain->enabled) {
7325131e08cSRobin Murphy 		pr_debug("Can't set geometry attributes as domain is active\n");
7335131e08cSRobin Murphy 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
7345131e08cSRobin Murphy 		return  -EBUSY;
7355131e08cSRobin Murphy 	}
7365131e08cSRobin Murphy 
7375131e08cSRobin Murphy 	/*
7385131e08cSRobin Murphy 	 * Ensure we have valid window count i.e. it should be less than
7395131e08cSRobin Murphy 	 * maximum permissible limit and should be a power of two.
7405131e08cSRobin Murphy 	 */
7415131e08cSRobin Murphy 	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
7425131e08cSRobin Murphy 		pr_debug("Invalid window count\n");
7435131e08cSRobin Murphy 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
7445131e08cSRobin Murphy 		return -EINVAL;
7455131e08cSRobin Murphy 	}
7465131e08cSRobin Murphy 
7475131e08cSRobin Murphy 	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
7485131e08cSRobin Murphy 				       w_count > 1 ? w_count : 0);
7495131e08cSRobin Murphy 	if (!ret) {
7505131e08cSRobin Murphy 		kfree(dma_domain->win_arr);
7515131e08cSRobin Murphy 		dma_domain->win_arr = kcalloc(w_count,
7525131e08cSRobin Murphy 					      sizeof(*dma_domain->win_arr),
7535131e08cSRobin Murphy 					      GFP_ATOMIC);
7545131e08cSRobin Murphy 		if (!dma_domain->win_arr) {
7555131e08cSRobin Murphy 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
7565131e08cSRobin Murphy 			return -ENOMEM;
7575131e08cSRobin Murphy 		}
7585131e08cSRobin Murphy 		dma_domain->win_cnt = w_count;
7595131e08cSRobin Murphy 	}
7605131e08cSRobin Murphy 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
7615131e08cSRobin Murphy 
7625131e08cSRobin Murphy 	return ret;
7635131e08cSRobin Murphy }
7645131e08cSRobin Murphy 
765695093e3SVarun Sethi static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
766695093e3SVarun Sethi 				    enum iommu_attr attr_type, void *data)
767695093e3SVarun Sethi {
7688d4bfe40SJoerg Roedel 	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
769695093e3SVarun Sethi 	int ret = 0;
770695093e3SVarun Sethi 
771695093e3SVarun Sethi 	switch (attr_type) {
772695093e3SVarun Sethi 	case DOMAIN_ATTR_FSL_PAMU_STASH:
773695093e3SVarun Sethi 		ret = configure_domain_stash(dma_domain, data);
774695093e3SVarun Sethi 		break;
775695093e3SVarun Sethi 	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
776695093e3SVarun Sethi 		ret = configure_domain_dma_state(dma_domain, *(int *)data);
777695093e3SVarun Sethi 		break;
778701d8a62SRobin Murphy 	case DOMAIN_ATTR_WINDOWS:
7795131e08cSRobin Murphy 		ret = fsl_pamu_set_windows(domain, *(u32 *)data);
780701d8a62SRobin Murphy 		break;
781695093e3SVarun Sethi 	default:
782695093e3SVarun Sethi 		pr_debug("Unsupported attribute type\n");
783695093e3SVarun Sethi 		ret = -EINVAL;
784695093e3SVarun Sethi 		break;
785cd70d465SEmil Medve 	}
786695093e3SVarun Sethi 
787695093e3SVarun Sethi 	return ret;
788695093e3SVarun Sethi }
789695093e3SVarun Sethi 
790695093e3SVarun Sethi static struct iommu_group *get_device_iommu_group(struct device *dev)
791695093e3SVarun Sethi {
792695093e3SVarun Sethi 	struct iommu_group *group;
793695093e3SVarun Sethi 
794695093e3SVarun Sethi 	group = iommu_group_get(dev);
795695093e3SVarun Sethi 	if (!group)
796695093e3SVarun Sethi 		group = iommu_group_alloc();
797695093e3SVarun Sethi 
798695093e3SVarun Sethi 	return group;
799695093e3SVarun Sethi }
800695093e3SVarun Sethi 
801695093e3SVarun Sethi static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
802695093e3SVarun Sethi {
803695093e3SVarun Sethi 	u32 version;
804695093e3SVarun Sethi 
805695093e3SVarun Sethi 	/* Check the PCI controller version number by readding BRR1 register */
806695093e3SVarun Sethi 	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
807695093e3SVarun Sethi 	version &= PCI_FSL_BRR1_VER;
808695093e3SVarun Sethi 	/* If PCI controller version is >= 0x204 we can partition endpoints */
809cd70d465SEmil Medve 	return version >= 0x204;
810695093e3SVarun Sethi }
811695093e3SVarun Sethi 
812695093e3SVarun Sethi /* Get iommu group information from peer devices or devices on the parent bus */
813695093e3SVarun Sethi static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
814695093e3SVarun Sethi {
815695093e3SVarun Sethi 	struct pci_dev *tmp;
816695093e3SVarun Sethi 	struct iommu_group *group;
817695093e3SVarun Sethi 	struct pci_bus *bus = pdev->bus;
818695093e3SVarun Sethi 
819695093e3SVarun Sethi 	/*
820695093e3SVarun Sethi 	 * Traverese the pci bus device list to get
821695093e3SVarun Sethi 	 * the shared iommu group.
822695093e3SVarun Sethi 	 */
823695093e3SVarun Sethi 	while (bus) {
824695093e3SVarun Sethi 		list_for_each_entry(tmp, &bus->devices, bus_list) {
825695093e3SVarun Sethi 			if (tmp == pdev)
826695093e3SVarun Sethi 				continue;
827695093e3SVarun Sethi 			group = iommu_group_get(&tmp->dev);
828695093e3SVarun Sethi 			if (group)
829695093e3SVarun Sethi 				return group;
830695093e3SVarun Sethi 		}
831695093e3SVarun Sethi 
832695093e3SVarun Sethi 		bus = bus->parent;
833695093e3SVarun Sethi 	}
834695093e3SVarun Sethi 
835695093e3SVarun Sethi 	return NULL;
836695093e3SVarun Sethi }
837695093e3SVarun Sethi 
838695093e3SVarun Sethi static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
839695093e3SVarun Sethi {
840695093e3SVarun Sethi 	struct pci_controller *pci_ctl;
841bc46c229SColin Ian King 	bool pci_endpt_partitioning;
842695093e3SVarun Sethi 	struct iommu_group *group = NULL;
843695093e3SVarun Sethi 
844695093e3SVarun Sethi 	pci_ctl = pci_bus_to_host(pdev->bus);
845bc46c229SColin Ian King 	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
846695093e3SVarun Sethi 	/* We can partition PCIe devices so assign device group to the device */
847bc46c229SColin Ian King 	if (pci_endpt_partitioning) {
848d5e58297SJoerg Roedel 		group = pci_device_group(&pdev->dev);
849695093e3SVarun Sethi 
850695093e3SVarun Sethi 		/*
851695093e3SVarun Sethi 		 * PCIe controller is not a paritionable entity
852695093e3SVarun Sethi 		 * free the controller device iommu_group.
853695093e3SVarun Sethi 		 */
854695093e3SVarun Sethi 		if (pci_ctl->parent->iommu_group)
855695093e3SVarun Sethi 			iommu_group_remove_device(pci_ctl->parent);
856695093e3SVarun Sethi 	} else {
857695093e3SVarun Sethi 		/*
858695093e3SVarun Sethi 		 * All devices connected to the controller will share the
859695093e3SVarun Sethi 		 * PCI controllers device group. If this is the first
860695093e3SVarun Sethi 		 * device to be probed for the pci controller, copy the
861695093e3SVarun Sethi 		 * device group information from the PCI controller device
862695093e3SVarun Sethi 		 * node and remove the PCI controller iommu group.
863695093e3SVarun Sethi 		 * For subsequent devices, the iommu group information can
864695093e3SVarun Sethi 		 * be obtained from sibling devices (i.e. from the bus_devices
865695093e3SVarun Sethi 		 * link list).
866695093e3SVarun Sethi 		 */
867695093e3SVarun Sethi 		if (pci_ctl->parent->iommu_group) {
868695093e3SVarun Sethi 			group = get_device_iommu_group(pci_ctl->parent);
869695093e3SVarun Sethi 			iommu_group_remove_device(pci_ctl->parent);
870cd70d465SEmil Medve 		} else {
871695093e3SVarun Sethi 			group = get_shared_pci_device_group(pdev);
872695093e3SVarun Sethi 		}
873cd70d465SEmil Medve 	}
874695093e3SVarun Sethi 
8753170447cSVarun Sethi 	if (!group)
8763170447cSVarun Sethi 		group = ERR_PTR(-ENODEV);
8773170447cSVarun Sethi 
878695093e3SVarun Sethi 	return group;
879695093e3SVarun Sethi }
880695093e3SVarun Sethi 
881d5e58297SJoerg Roedel static struct iommu_group *fsl_pamu_device_group(struct device *dev)
882695093e3SVarun Sethi {
8833170447cSVarun Sethi 	struct iommu_group *group = ERR_PTR(-ENODEV);
884d5e58297SJoerg Roedel 	int len;
885695093e3SVarun Sethi 
886695093e3SVarun Sethi 	/*
887695093e3SVarun Sethi 	 * For platform devices we allocate a separate group for
888695093e3SVarun Sethi 	 * each of the devices.
889695093e3SVarun Sethi 	 */
890d5e58297SJoerg Roedel 	if (dev_is_pci(dev))
891d5e58297SJoerg Roedel 		group = get_pci_device_group(to_pci_dev(dev));
892d5e58297SJoerg Roedel 	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
893695093e3SVarun Sethi 		group = get_device_iommu_group(dev);
894d5e58297SJoerg Roedel 
895d5e58297SJoerg Roedel 	return group;
896695093e3SVarun Sethi }
897695093e3SVarun Sethi 
89852dd3ca4SJoerg Roedel static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
899d5e58297SJoerg Roedel {
90052dd3ca4SJoerg Roedel 	return &pamu_iommu;
901695093e3SVarun Sethi }
902695093e3SVarun Sethi 
90352dd3ca4SJoerg Roedel static void fsl_pamu_release_device(struct device *dev)
904695093e3SVarun Sethi {
905695093e3SVarun Sethi }
906695093e3SVarun Sethi 
907b22f6434SThierry Reding static const struct iommu_ops fsl_pamu_ops = {
908b7eb6785SJoerg Roedel 	.capable	= fsl_pamu_capable,
9098d4bfe40SJoerg Roedel 	.domain_alloc	= fsl_pamu_domain_alloc,
9108d4bfe40SJoerg Roedel 	.domain_free    = fsl_pamu_domain_free,
911695093e3SVarun Sethi 	.attach_dev	= fsl_pamu_attach_device,
912695093e3SVarun Sethi 	.detach_dev	= fsl_pamu_detach_device,
913695093e3SVarun Sethi 	.domain_window_enable = fsl_pamu_window_enable,
914695093e3SVarun Sethi 	.iova_to_phys	= fsl_pamu_iova_to_phys,
915695093e3SVarun Sethi 	.domain_set_attr = fsl_pamu_set_domain_attr,
91652dd3ca4SJoerg Roedel 	.probe_device	= fsl_pamu_probe_device,
91752dd3ca4SJoerg Roedel 	.release_device	= fsl_pamu_release_device,
918d5e58297SJoerg Roedel 	.device_group   = fsl_pamu_device_group,
919695093e3SVarun Sethi };
920695093e3SVarun Sethi 
921cd70d465SEmil Medve int __init pamu_domain_init(void)
922695093e3SVarun Sethi {
923695093e3SVarun Sethi 	int ret = 0;
924695093e3SVarun Sethi 
925695093e3SVarun Sethi 	ret = iommu_init_mempool();
926695093e3SVarun Sethi 	if (ret)
927695093e3SVarun Sethi 		return ret;
928695093e3SVarun Sethi 
9293ff2dcc0SJoerg Roedel 	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
9303ff2dcc0SJoerg Roedel 	if (ret)
9313ff2dcc0SJoerg Roedel 		return ret;
9323ff2dcc0SJoerg Roedel 
9333ff2dcc0SJoerg Roedel 	iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
9343ff2dcc0SJoerg Roedel 
9353ff2dcc0SJoerg Roedel 	ret = iommu_device_register(&pamu_iommu);
9363ff2dcc0SJoerg Roedel 	if (ret) {
9373ff2dcc0SJoerg Roedel 		iommu_device_sysfs_remove(&pamu_iommu);
9383ff2dcc0SJoerg Roedel 		pr_err("Can't register iommu device\n");
9393ff2dcc0SJoerg Roedel 		return ret;
9403ff2dcc0SJoerg Roedel 	}
9413ff2dcc0SJoerg Roedel 
942695093e3SVarun Sethi 	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
943695093e3SVarun Sethi 	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
944695093e3SVarun Sethi 
945695093e3SVarun Sethi 	return ret;
946695093e3SVarun Sethi }
947