1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright (C) 2013 Freescale Semiconductor, Inc.
16  * Author: Varun Sethi <varun.sethi@freescale.com>
17  *
18  */
19 
20 #define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
21 
22 #include <linux/init.h>
23 #include <linux/iommu.h>
24 #include <linux/notifier.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <linux/of_platform.h>
32 #include <linux/bootmem.h>
33 #include <linux/err.h>
34 #include <asm/io.h>
35 #include <asm/bitops.h>
36 
37 #include <asm/pci-bridge.h>
38 #include <sysdev/fsl_pci.h>
39 
40 #include "fsl_pamu_domain.h"
41 #include "pci.h"
42 
43 /*
44  * Global spinlock that needs to be held while
45  * configuring PAMU.
46  */
47 static DEFINE_SPINLOCK(iommu_lock);
48 
49 static struct kmem_cache *fsl_pamu_domain_cache;
50 static struct kmem_cache *iommu_devinfo_cache;
51 static DEFINE_SPINLOCK(device_domain_lock);
52 
53 static int __init iommu_init_mempool(void)
54 {
55 
56 	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
57 					 sizeof(struct fsl_dma_domain),
58 					 0,
59 					 SLAB_HWCACHE_ALIGN,
60 
61 					 NULL);
62 	if (!fsl_pamu_domain_cache) {
63 		pr_debug("Couldn't create fsl iommu_domain cache\n");
64 		return -ENOMEM;
65 	}
66 
67 	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
68 					 sizeof(struct device_domain_info),
69 					 0,
70 					 SLAB_HWCACHE_ALIGN,
71 					 NULL);
72 	if (!iommu_devinfo_cache) {
73 		pr_debug("Couldn't create devinfo cache\n");
74 		kmem_cache_destroy(fsl_pamu_domain_cache);
75 		return -ENOMEM;
76 	}
77 
78 	return 0;
79 }
80 
81 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
82 {
83 	u32 win_cnt = dma_domain->win_cnt;
84 	struct dma_window *win_ptr =
85 				&dma_domain->win_arr[0];
86 	struct iommu_domain_geometry *geom;
87 
88 	geom = &dma_domain->iommu_domain->geometry;
89 
90 	if (!win_cnt || !dma_domain->geom_size) {
91 		pr_debug("Number of windows/geometry not configured for the domain\n");
92 		return 0;
93 	}
94 
95 	if (win_cnt > 1) {
96 		u64 subwin_size;
97 		dma_addr_t subwin_iova;
98 		u32 wnd;
99 
100 		subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
101 		subwin_iova = iova & ~(subwin_size - 1);
102 		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
103 		win_ptr = &dma_domain->win_arr[wnd];
104 	}
105 
106 	if (win_ptr->valid)
107 		return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
108 
109 	return 0;
110 }
111 
112 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
113 {
114 	struct dma_window *sub_win_ptr =
115 				&dma_domain->win_arr[0];
116 	int i, ret;
117 	unsigned long rpn, flags;
118 
119 	for (i = 0; i < dma_domain->win_cnt; i++) {
120 		if (sub_win_ptr[i].valid) {
121 			rpn = sub_win_ptr[i].paddr >>
122 				 PAMU_PAGE_SHIFT;
123 			spin_lock_irqsave(&iommu_lock, flags);
124 			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
125 						 sub_win_ptr[i].size,
126 						 ~(u32)0,
127 						 rpn,
128 						 dma_domain->snoop_id,
129 						 dma_domain->stash_id,
130 						 (i > 0) ? 1 : 0,
131 						 sub_win_ptr[i].prot);
132 			spin_unlock_irqrestore(&iommu_lock, flags);
133 			if (ret) {
134 				pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
135 					 liodn);
136 				return ret;
137 			}
138 		}
139 	}
140 
141 	return ret;
142 }
143 
144 static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
145 {
146 	int ret;
147 	struct dma_window *wnd = &dma_domain->win_arr[0];
148 	phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&iommu_lock, flags);
152 	ret = pamu_config_ppaace(liodn, wnd_addr,
153 				 wnd->size,
154 				 ~(u32)0,
155 				 wnd->paddr >> PAMU_PAGE_SHIFT,
156 				 dma_domain->snoop_id, dma_domain->stash_id,
157 				 0, wnd->prot);
158 	spin_unlock_irqrestore(&iommu_lock, flags);
159 	if (ret)
160 		pr_debug("PAMU PAACE configuration failed for liodn %d\n",
161 			liodn);
162 
163 	return ret;
164 }
165 
166 /* Map the DMA window corresponding to the LIODN */
167 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
168 {
169 	if (dma_domain->win_cnt > 1)
170 		return map_subwins(liodn, dma_domain);
171 	else
172 		return map_win(liodn, dma_domain);
173 
174 }
175 
176 /* Update window/subwindow mapping for the LIODN */
177 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
178 {
179 	int ret;
180 	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
181 	unsigned long flags;
182 
183 	spin_lock_irqsave(&iommu_lock, flags);
184 	if (dma_domain->win_cnt > 1) {
185 		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
186 					 wnd->size,
187 					 ~(u32)0,
188 					 wnd->paddr >> PAMU_PAGE_SHIFT,
189 					 dma_domain->snoop_id,
190 					 dma_domain->stash_id,
191 					 (wnd_nr > 0) ? 1 : 0,
192 					 wnd->prot);
193 		if (ret)
194 			pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
195 	} else {
196 		phys_addr_t wnd_addr;
197 
198 		wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
199 
200 		ret = pamu_config_ppaace(liodn, wnd_addr,
201 					 wnd->size,
202 					 ~(u32)0,
203 					 wnd->paddr >> PAMU_PAGE_SHIFT,
204 					dma_domain->snoop_id, dma_domain->stash_id,
205 					0, wnd->prot);
206 		if (ret)
207 			pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
208 	}
209 
210 	spin_unlock_irqrestore(&iommu_lock, flags);
211 
212 	return ret;
213 }
214 
215 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
216 				 u32 val)
217 {
218 	int ret = 0, i;
219 	unsigned long flags;
220 
221 	spin_lock_irqsave(&iommu_lock, flags);
222 	if (!dma_domain->win_arr) {
223 		pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
224 		spin_unlock_irqrestore(&iommu_lock, flags);
225 		return -EINVAL;
226 	}
227 
228 	for (i = 0; i < dma_domain->win_cnt; i++) {
229 		ret = pamu_update_paace_stash(liodn, i, val);
230 		if (ret) {
231 			pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
232 			spin_unlock_irqrestore(&iommu_lock, flags);
233 			return ret;
234 		}
235 	}
236 
237 	spin_unlock_irqrestore(&iommu_lock, flags);
238 
239 	return ret;
240 }
241 
242 /* Set the geometry parameters for a LIODN */
243 static int pamu_set_liodn(int liodn, struct device *dev,
244 			   struct fsl_dma_domain *dma_domain,
245 			   struct iommu_domain_geometry *geom_attr,
246 			   u32 win_cnt)
247 {
248 	phys_addr_t window_addr, window_size;
249 	phys_addr_t subwin_size;
250 	int ret = 0, i;
251 	u32 omi_index = ~(u32)0;
252 	unsigned long flags;
253 
254 	/*
255 	 * Configure the omi_index at the geometry setup time.
256 	 * This is a static value which depends on the type of
257 	 * device and would not change thereafter.
258 	 */
259 	get_ome_index(&omi_index, dev);
260 
261 	window_addr = geom_attr->aperture_start;
262 	window_size = dma_domain->geom_size;
263 
264 	spin_lock_irqsave(&iommu_lock, flags);
265 	ret = pamu_disable_liodn(liodn);
266 	if (!ret)
267 		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
268 					 0, dma_domain->snoop_id,
269 					 dma_domain->stash_id, win_cnt, 0);
270 	spin_unlock_irqrestore(&iommu_lock, flags);
271 	if (ret) {
272 		pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
273 		return ret;
274 	}
275 
276 	if (win_cnt > 1) {
277 		subwin_size = window_size >> ilog2(win_cnt);
278 		for (i = 0; i < win_cnt; i++) {
279 			spin_lock_irqsave(&iommu_lock, flags);
280 			ret = pamu_disable_spaace(liodn, i);
281 			if (!ret)
282 				ret = pamu_config_spaace(liodn, win_cnt, i,
283 							 subwin_size, omi_index,
284 							 0, dma_domain->snoop_id,
285 							 dma_domain->stash_id,
286 							 0, 0);
287 			spin_unlock_irqrestore(&iommu_lock, flags);
288 			if (ret) {
289 				pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
290 				return ret;
291 			}
292 		}
293 	}
294 
295 	return ret;
296 }
297 
298 static int check_size(u64 size, dma_addr_t iova)
299 {
300 	/*
301 	 * Size must be a power of two and at least be equal
302 	 * to PAMU page size.
303 	 */
304 	if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
305 		pr_debug("%s: size too small or not a power of two\n", __func__);
306 		return -EINVAL;
307 	}
308 
309 	/* iova must be page size aligned*/
310 	if (iova & (size - 1)) {
311 		pr_debug("%s: address is not aligned with window size\n", __func__);
312 		return -EINVAL;
313 	}
314 
315 	return 0;
316 }
317 
318 static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
319 {
320 	struct fsl_dma_domain *domain;
321 
322 	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
323 	if (!domain)
324 		return NULL;
325 
326 	domain->stash_id = ~(u32)0;
327 	domain->snoop_id = ~(u32)0;
328 	domain->win_cnt = pamu_get_max_subwin_cnt();
329 	domain->geom_size = 0;
330 
331 	INIT_LIST_HEAD(&domain->devices);
332 
333 	spin_lock_init(&domain->domain_lock);
334 
335 	return domain;
336 }
337 
338 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
339 {
340 	unsigned long flags;
341 
342 	list_del(&info->link);
343 	spin_lock_irqsave(&iommu_lock, flags);
344 	if (win_cnt > 1)
345 		pamu_free_subwins(info->liodn);
346 	pamu_disable_liodn(info->liodn);
347 	spin_unlock_irqrestore(&iommu_lock, flags);
348 	spin_lock_irqsave(&device_domain_lock, flags);
349 	info->dev->archdata.iommu_domain = NULL;
350 	kmem_cache_free(iommu_devinfo_cache, info);
351 	spin_unlock_irqrestore(&device_domain_lock, flags);
352 }
353 
354 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
355 {
356 	struct device_domain_info *info, *tmp;
357 	unsigned long flags;
358 
359 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
360 	/* Remove the device from the domain device list */
361 	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
362 		if (!dev || (info->dev == dev))
363 			remove_device_ref(info, dma_domain->win_cnt);
364 	}
365 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
366 }
367 
368 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
369 {
370 	struct device_domain_info *info, *old_domain_info;
371 	unsigned long flags;
372 
373 	spin_lock_irqsave(&device_domain_lock, flags);
374 	/*
375 	 * Check here if the device is already attached to domain or not.
376 	 * If the device is already attached to a domain detach it.
377 	 */
378 	old_domain_info = dev->archdata.iommu_domain;
379 	if (old_domain_info && old_domain_info->domain != dma_domain) {
380 		spin_unlock_irqrestore(&device_domain_lock, flags);
381 		detach_device(dev, old_domain_info->domain);
382 		spin_lock_irqsave(&device_domain_lock, flags);
383 	}
384 
385 	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
386 
387 	info->dev = dev;
388 	info->liodn = liodn;
389 	info->domain = dma_domain;
390 
391 	list_add(&info->link, &dma_domain->devices);
392 	/*
393 	 * In case of devices with multiple LIODNs just store
394 	 * the info for the first LIODN as all
395 	 * LIODNs share the same domain
396 	 */
397 	if (!dev->archdata.iommu_domain)
398 		dev->archdata.iommu_domain = info;
399 	spin_unlock_irqrestore(&device_domain_lock, flags);
400 
401 }
402 
403 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
404 					    dma_addr_t iova)
405 {
406 	struct fsl_dma_domain *dma_domain = domain->priv;
407 
408 	if ((iova < domain->geometry.aperture_start) ||
409 		iova > (domain->geometry.aperture_end))
410 		return 0;
411 
412 	return get_phys_addr(dma_domain, iova);
413 }
414 
415 static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
416 				      unsigned long cap)
417 {
418 	return cap == IOMMU_CAP_CACHE_COHERENCY;
419 }
420 
421 static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
422 {
423 	struct fsl_dma_domain *dma_domain = domain->priv;
424 
425 	domain->priv = NULL;
426 
427 	/* remove all the devices from the device list */
428 	detach_device(NULL, dma_domain);
429 
430 	dma_domain->enabled = 0;
431 	dma_domain->mapped = 0;
432 
433 	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
434 }
435 
436 static int fsl_pamu_domain_init(struct iommu_domain *domain)
437 {
438 	struct fsl_dma_domain *dma_domain;
439 
440 	dma_domain = iommu_alloc_dma_domain();
441 	if (!dma_domain) {
442 		pr_debug("dma_domain allocation failed\n");
443 		return -ENOMEM;
444 	}
445 	domain->priv = dma_domain;
446 	dma_domain->iommu_domain = domain;
447 	/* defaul geometry 64 GB i.e. maximum system address */
448 	domain->geometry.aperture_start = 0;
449 	domain->geometry.aperture_end = (1ULL << 36) - 1;
450 	domain->geometry.force_aperture = true;
451 
452 	return 0;
453 }
454 
455 /* Configure geometry settings for all LIODNs associated with domain */
456 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
457 				    struct iommu_domain_geometry *geom_attr,
458 				    u32 win_cnt)
459 {
460 	struct device_domain_info *info;
461 	int ret = 0;
462 
463 	list_for_each_entry(info, &dma_domain->devices, link) {
464 		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
465 				      geom_attr, win_cnt);
466 		if (ret)
467 			break;
468 	}
469 
470 	return ret;
471 }
472 
473 /* Update stash destination for all LIODNs associated with the domain */
474 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
475 {
476 	struct device_domain_info *info;
477 	int ret = 0;
478 
479 	list_for_each_entry(info, &dma_domain->devices, link) {
480 		ret = update_liodn_stash(info->liodn, dma_domain, val);
481 		if (ret)
482 			break;
483 	}
484 
485 	return ret;
486 }
487 
488 /* Update domain mappings for all LIODNs associated with the domain */
489 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
490 {
491 	struct device_domain_info *info;
492 	int ret = 0;
493 
494 	list_for_each_entry(info, &dma_domain->devices, link) {
495 		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
496 		if (ret)
497 			break;
498 	}
499 	return ret;
500 }
501 
502 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
503 {
504 	struct device_domain_info *info;
505 	int ret = 0;
506 
507 	list_for_each_entry(info, &dma_domain->devices, link) {
508 		if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
509 			ret = pamu_disable_liodn(info->liodn);
510 			if (!ret)
511 				dma_domain->enabled = 0;
512 		} else {
513 			ret = pamu_disable_spaace(info->liodn, wnd_nr);
514 		}
515 	}
516 
517 	return ret;
518 }
519 
520 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
521 {
522 	struct fsl_dma_domain *dma_domain = domain->priv;
523 	unsigned long flags;
524 	int ret;
525 
526 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
527 	if (!dma_domain->win_arr) {
528 		pr_debug("Number of windows not configured\n");
529 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
530 		return;
531 	}
532 
533 	if (wnd_nr >= dma_domain->win_cnt) {
534 		pr_debug("Invalid window index\n");
535 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
536 		return;
537 	}
538 
539 	if (dma_domain->win_arr[wnd_nr].valid) {
540 		ret = disable_domain_win(dma_domain, wnd_nr);
541 		if (!ret) {
542 			dma_domain->win_arr[wnd_nr].valid = 0;
543 			dma_domain->mapped--;
544 		}
545 	}
546 
547 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
548 
549 }
550 
551 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
552 				  phys_addr_t paddr, u64 size, int prot)
553 {
554 	struct fsl_dma_domain *dma_domain = domain->priv;
555 	struct dma_window *wnd;
556 	int pamu_prot = 0;
557 	int ret;
558 	unsigned long flags;
559 	u64 win_size;
560 
561 	if (prot & IOMMU_READ)
562 		pamu_prot |= PAACE_AP_PERMS_QUERY;
563 	if (prot & IOMMU_WRITE)
564 		pamu_prot |= PAACE_AP_PERMS_UPDATE;
565 
566 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
567 	if (!dma_domain->win_arr) {
568 		pr_debug("Number of windows not configured\n");
569 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
570 		return -ENODEV;
571 	}
572 
573 	if (wnd_nr >= dma_domain->win_cnt) {
574 		pr_debug("Invalid window index\n");
575 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
576 		return -EINVAL;
577 	}
578 
579 	win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
580 	if (size > win_size) {
581 		pr_debug("Invalid window size \n");
582 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
583 		return -EINVAL;
584 	}
585 
586 	if (dma_domain->win_cnt == 1) {
587 		if (dma_domain->enabled) {
588 			pr_debug("Disable the window before updating the mapping\n");
589 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
590 			return -EBUSY;
591 		}
592 
593 		ret = check_size(size, domain->geometry.aperture_start);
594 		if (ret) {
595 			pr_debug("Aperture start not aligned to the size\n");
596 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
597 			return -EINVAL;
598 		}
599 	}
600 
601 	wnd = &dma_domain->win_arr[wnd_nr];
602 	if (!wnd->valid) {
603 		wnd->paddr = paddr;
604 		wnd->size = size;
605 		wnd->prot = pamu_prot;
606 
607 		ret = update_domain_mapping(dma_domain, wnd_nr);
608 		if (!ret) {
609 			wnd->valid = 1;
610 			dma_domain->mapped++;
611 		}
612 	} else {
613 		pr_debug("Disable the window before updating the mapping\n");
614 		ret = -EBUSY;
615 	}
616 
617 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
618 
619 	return ret;
620 }
621 
622 /*
623  * Attach the LIODN to the DMA domain and configure the geometry
624  * and window mappings.
625  */
626 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
627 				 struct device *dev, const u32 *liodn,
628 				 int num)
629 {
630 	unsigned long flags;
631 	struct iommu_domain *domain = dma_domain->iommu_domain;
632 	int ret = 0;
633 	int i;
634 
635 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
636 	for (i = 0; i < num; i++) {
637 
638 		/* Ensure that LIODN value is valid */
639 		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
640 			pr_debug("Invalid liodn %d, attach device failed for %s\n",
641 				liodn[i], dev->of_node->full_name);
642 			ret = -EINVAL;
643 			break;
644 		}
645 
646 		attach_device(dma_domain, liodn[i], dev);
647 		/*
648 		 * Check if geometry has already been configured
649 		 * for the domain. If yes, set the geometry for
650 		 * the LIODN.
651 		 */
652 		if (dma_domain->win_arr) {
653 			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
654 			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
655 					      &domain->geometry,
656 					      win_cnt);
657 			if (ret)
658 				break;
659 			if (dma_domain->mapped) {
660 				/*
661 				 * Create window/subwindow mapping for
662 				 * the LIODN.
663 				 */
664 				ret = map_liodn(liodn[i], dma_domain);
665 				if (ret)
666 					break;
667 			}
668 		}
669 	}
670 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
671 
672 	return ret;
673 }
674 
675 static int fsl_pamu_attach_device(struct iommu_domain *domain,
676 				  struct device *dev)
677 {
678 	struct fsl_dma_domain *dma_domain = domain->priv;
679 	const u32 *liodn;
680 	u32 liodn_cnt;
681 	int len, ret = 0;
682 	struct pci_dev *pdev = NULL;
683 	struct pci_controller *pci_ctl;
684 
685 	/*
686 	 * Use LIODN of the PCI controller while attaching a
687 	 * PCI device.
688 	 */
689 	if (dev_is_pci(dev)) {
690 		pdev = to_pci_dev(dev);
691 		pci_ctl = pci_bus_to_host(pdev->bus);
692 		/*
693 		 * make dev point to pci controller device
694 		 * so we can get the LIODN programmed by
695 		 * u-boot.
696 		 */
697 		dev = pci_ctl->parent;
698 	}
699 
700 	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
701 	if (liodn) {
702 		liodn_cnt = len / sizeof(u32);
703 		ret = handle_attach_device(dma_domain, dev,
704 					 liodn, liodn_cnt);
705 	} else {
706 		pr_debug("missing fsl,liodn property at %s\n",
707 		          dev->of_node->full_name);
708 			ret = -EINVAL;
709 	}
710 
711 	return ret;
712 }
713 
714 static void fsl_pamu_detach_device(struct iommu_domain *domain,
715 				      struct device *dev)
716 {
717 	struct fsl_dma_domain *dma_domain = domain->priv;
718 	const u32 *prop;
719 	int len;
720 	struct pci_dev *pdev = NULL;
721 	struct pci_controller *pci_ctl;
722 
723 	/*
724 	 * Use LIODN of the PCI controller while detaching a
725 	 * PCI device.
726 	 */
727 	if (dev_is_pci(dev)) {
728 		pdev = to_pci_dev(dev);
729 		pci_ctl = pci_bus_to_host(pdev->bus);
730 		/*
731 		 * make dev point to pci controller device
732 		 * so we can get the LIODN programmed by
733 		 * u-boot.
734 		 */
735 		dev = pci_ctl->parent;
736 	}
737 
738 	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
739 	if (prop)
740 		detach_device(dev, dma_domain);
741 	else
742 		pr_debug("missing fsl,liodn property at %s\n",
743 		          dev->of_node->full_name);
744 }
745 
746 static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
747 {
748 	struct iommu_domain_geometry *geom_attr = data;
749 	struct fsl_dma_domain *dma_domain = domain->priv;
750 	dma_addr_t geom_size;
751 	unsigned long flags;
752 
753 	geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
754 	/*
755 	 * Sanity check the geometry size. Also, we do not support
756 	 * DMA outside of the geometry.
757 	 */
758 	if (check_size(geom_size, geom_attr->aperture_start) ||
759 		!geom_attr->force_aperture) {
760 			pr_debug("Invalid PAMU geometry attributes\n");
761 			return -EINVAL;
762 		}
763 
764 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
765 	if (dma_domain->enabled) {
766 		pr_debug("Can't set geometry attributes as domain is active\n");
767 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
768 		return  -EBUSY;
769 	}
770 
771 	/* Copy the domain geometry information */
772 	memcpy(&domain->geometry, geom_attr,
773 	       sizeof(struct iommu_domain_geometry));
774 	dma_domain->geom_size = geom_size;
775 
776 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
777 
778 	return 0;
779 }
780 
781 /* Set the domain stash attribute */
782 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
783 {
784 	struct pamu_stash_attribute *stash_attr = data;
785 	unsigned long flags;
786 	int ret;
787 
788 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
789 
790 	memcpy(&dma_domain->dma_stash, stash_attr,
791 		 sizeof(struct pamu_stash_attribute));
792 
793 	dma_domain->stash_id = get_stash_id(stash_attr->cache,
794 					    stash_attr->cpu);
795 	if (dma_domain->stash_id == ~(u32)0) {
796 		pr_debug("Invalid stash attributes\n");
797 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
798 		return -EINVAL;
799 	}
800 
801 	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
802 
803 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
804 
805 	return ret;
806 }
807 
808 /* Configure domain dma state i.e. enable/disable DMA*/
809 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
810 {
811 	struct device_domain_info *info;
812 	unsigned long flags;
813 	int ret;
814 
815 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
816 
817 	if (enable && !dma_domain->mapped) {
818 		pr_debug("Can't enable DMA domain without valid mapping\n");
819 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
820 		return -ENODEV;
821 	}
822 
823 	dma_domain->enabled = enable;
824 	list_for_each_entry(info, &dma_domain->devices,
825 				 link) {
826 		ret = (enable) ? pamu_enable_liodn(info->liodn) :
827 			pamu_disable_liodn(info->liodn);
828 		if (ret)
829 			pr_debug("Unable to set dma state for liodn %d",
830 				 info->liodn);
831 	}
832 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
833 
834 	return 0;
835 }
836 
837 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
838 				 enum iommu_attr attr_type, void *data)
839 {
840 	struct fsl_dma_domain *dma_domain = domain->priv;
841 	int ret = 0;
842 
843 
844 	switch (attr_type) {
845 	case DOMAIN_ATTR_GEOMETRY:
846 		ret = configure_domain_geometry(domain, data);
847 		break;
848 	case DOMAIN_ATTR_FSL_PAMU_STASH:
849 		ret = configure_domain_stash(dma_domain, data);
850 		break;
851 	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
852 		ret = configure_domain_dma_state(dma_domain, *(int *)data);
853 		break;
854 	default:
855 		pr_debug("Unsupported attribute type\n");
856 		ret = -EINVAL;
857 		break;
858 	};
859 
860 	return ret;
861 }
862 
863 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
864 				 enum iommu_attr attr_type, void *data)
865 {
866 	struct fsl_dma_domain *dma_domain = domain->priv;
867 	int ret = 0;
868 
869 
870 	switch (attr_type) {
871 	case DOMAIN_ATTR_FSL_PAMU_STASH:
872 		memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
873 				 sizeof(struct pamu_stash_attribute));
874 		break;
875 	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
876 		*(int *)data = dma_domain->enabled;
877 		break;
878 	case DOMAIN_ATTR_FSL_PAMUV1:
879 		*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
880 		break;
881 	default:
882 		pr_debug("Unsupported attribute type\n");
883 		ret = -EINVAL;
884 		break;
885 	};
886 
887 	return ret;
888 }
889 
890 #define REQ_ACS_FLAGS	(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
891 
892 static struct iommu_group *get_device_iommu_group(struct device *dev)
893 {
894 	struct iommu_group *group;
895 
896 	group = iommu_group_get(dev);
897 	if (!group)
898 		group = iommu_group_alloc();
899 
900 	return group;
901 }
902 
903 static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
904 {
905 	u32 version;
906 
907 	/* Check the PCI controller version number by readding BRR1 register */
908 	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
909 	version &= PCI_FSL_BRR1_VER;
910 	/* If PCI controller version is >= 0x204 we can partition endpoints*/
911 	if (version >= 0x204)
912 		return 1;
913 
914 	return 0;
915 }
916 
917 /* Get iommu group information from peer devices or devices on the parent bus */
918 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
919 {
920 	struct pci_dev *tmp;
921 	struct iommu_group *group;
922 	struct pci_bus *bus = pdev->bus;
923 
924 	/*
925 	 * Traverese the pci bus device list to get
926 	 * the shared iommu group.
927 	 */
928 	while (bus) {
929 		list_for_each_entry(tmp, &bus->devices, bus_list) {
930 			if (tmp == pdev)
931 				continue;
932 			group = iommu_group_get(&tmp->dev);
933 			if (group)
934 				return group;
935 		}
936 
937 		bus = bus->parent;
938 	}
939 
940 	return NULL;
941 }
942 
943 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
944 {
945 	struct pci_controller *pci_ctl;
946 	bool pci_endpt_partioning;
947 	struct iommu_group *group = NULL;
948 	struct pci_dev *bridge, *dma_pdev = NULL;
949 
950 	pci_ctl = pci_bus_to_host(pdev->bus);
951 	pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
952 	/* We can partition PCIe devices so assign device group to the device */
953 	if (pci_endpt_partioning) {
954 		bridge = pci_find_upstream_pcie_bridge(pdev);
955 		if (bridge) {
956 			if (pci_is_pcie(bridge))
957 				dma_pdev = pci_get_domain_bus_and_slot(
958 						pci_domain_nr(pdev->bus),
959 						bridge->subordinate->number, 0);
960 			if (!dma_pdev)
961 				dma_pdev = pci_dev_get(bridge);
962 		} else
963 			dma_pdev = pci_dev_get(pdev);
964 
965 		/* Account for quirked devices */
966 		swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
967 
968 		/*
969 		 * If it's a multifunction device that does not support our
970 		 * required ACS flags, add to the same group as lowest numbered
971 		 * function that also does not suport the required ACS flags.
972 		 */
973 		if (dma_pdev->multifunction &&
974 		    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
975 			u8 i, slot = PCI_SLOT(dma_pdev->devfn);
976 
977 			for (i = 0; i < 8; i++) {
978 				struct pci_dev *tmp;
979 
980 				tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
981 				if (!tmp)
982 					continue;
983 
984 				if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
985 					swap_pci_ref(&dma_pdev, tmp);
986 					break;
987 				}
988 				pci_dev_put(tmp);
989 			}
990 		}
991 
992 		/*
993 		 * Devices on the root bus go through the iommu.  If that's not us,
994 		 * find the next upstream device and test ACS up to the root bus.
995 		 * Finding the next device may require skipping virtual buses.
996 		 */
997 		while (!pci_is_root_bus(dma_pdev->bus)) {
998 			struct pci_bus *bus = dma_pdev->bus;
999 
1000 			while (!bus->self) {
1001 				if (!pci_is_root_bus(bus))
1002 					bus = bus->parent;
1003 				else
1004 					goto root_bus;
1005 			}
1006 
1007 			if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1008 				break;
1009 
1010 			swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
1011 		}
1012 
1013 root_bus:
1014 		group = get_device_iommu_group(&dma_pdev->dev);
1015 		pci_dev_put(dma_pdev);
1016 		/*
1017 		 * PCIe controller is not a paritionable entity
1018 		 * free the controller device iommu_group.
1019 		 */
1020 		if (pci_ctl->parent->iommu_group)
1021 			iommu_group_remove_device(pci_ctl->parent);
1022 	} else {
1023 		/*
1024 		 * All devices connected to the controller will share the
1025 		 * PCI controllers device group. If this is the first
1026 		 * device to be probed for the pci controller, copy the
1027 		 * device group information from the PCI controller device
1028 		 * node and remove the PCI controller iommu group.
1029 		 * For subsequent devices, the iommu group information can
1030 		 * be obtained from sibling devices (i.e. from the bus_devices
1031 		 * link list).
1032 		 */
1033 		if (pci_ctl->parent->iommu_group) {
1034 			group = get_device_iommu_group(pci_ctl->parent);
1035 			iommu_group_remove_device(pci_ctl->parent);
1036 		} else
1037 			group = get_shared_pci_device_group(pdev);
1038 	}
1039 
1040 	if (!group)
1041 		group = ERR_PTR(-ENODEV);
1042 
1043 	return group;
1044 }
1045 
1046 static int fsl_pamu_add_device(struct device *dev)
1047 {
1048 	struct iommu_group *group = ERR_PTR(-ENODEV);
1049 	struct pci_dev *pdev;
1050 	const u32 *prop;
1051 	int ret, len;
1052 
1053 	/*
1054 	 * For platform devices we allocate a separate group for
1055 	 * each of the devices.
1056 	 */
1057 	if (dev_is_pci(dev)) {
1058 		pdev = to_pci_dev(dev);
1059 		/* Don't create device groups for virtual PCI bridges */
1060 		if (pdev->subordinate)
1061 			return 0;
1062 
1063 		group = get_pci_device_group(pdev);
1064 
1065 	} else {
1066 		prop = of_get_property(dev->of_node, "fsl,liodn", &len);
1067 		if (prop)
1068 			group = get_device_iommu_group(dev);
1069 	}
1070 
1071 	if (IS_ERR(group))
1072 		return PTR_ERR(group);
1073 
1074 	ret = iommu_group_add_device(group, dev);
1075 
1076 	iommu_group_put(group);
1077 	return ret;
1078 }
1079 
1080 static void fsl_pamu_remove_device(struct device *dev)
1081 {
1082 	iommu_group_remove_device(dev);
1083 }
1084 
1085 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1086 {
1087 	struct fsl_dma_domain *dma_domain = domain->priv;
1088 	unsigned long flags;
1089 	int ret;
1090 
1091 	spin_lock_irqsave(&dma_domain->domain_lock, flags);
1092 	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1093 	if (dma_domain->enabled) {
1094 		pr_debug("Can't set geometry attributes as domain is active\n");
1095 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1096 		return  -EBUSY;
1097 	}
1098 
1099 	/* Ensure that the geometry has been set for the domain */
1100 	if (!dma_domain->geom_size) {
1101 		pr_debug("Please configure geometry before setting the number of windows\n");
1102 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1103 		return -EINVAL;
1104 	}
1105 
1106 	/*
1107 	 * Ensure we have valid window count i.e. it should be less than
1108 	 * maximum permissible limit and should be a power of two.
1109 	 */
1110 	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1111 		pr_debug("Invalid window count\n");
1112 		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1113 		return -EINVAL;
1114 	}
1115 
1116 	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1117 				((w_count > 1) ? w_count : 0));
1118 	if (!ret) {
1119 		if (dma_domain->win_arr)
1120 			kfree(dma_domain->win_arr);
1121 		dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
1122 							  w_count, GFP_ATOMIC);
1123 		if (!dma_domain->win_arr) {
1124 			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1125 			return -ENOMEM;
1126 		}
1127 		dma_domain->win_cnt = w_count;
1128 	}
1129 	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1130 
1131 	return ret;
1132 }
1133 
1134 static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1135 {
1136 	struct fsl_dma_domain *dma_domain = domain->priv;
1137 
1138 	return dma_domain->win_cnt;
1139 }
1140 
1141 static struct iommu_ops fsl_pamu_ops = {
1142 	.domain_init	= fsl_pamu_domain_init,
1143 	.domain_destroy = fsl_pamu_domain_destroy,
1144 	.attach_dev	= fsl_pamu_attach_device,
1145 	.detach_dev	= fsl_pamu_detach_device,
1146 	.domain_window_enable = fsl_pamu_window_enable,
1147 	.domain_window_disable = fsl_pamu_window_disable,
1148 	.domain_get_windows = fsl_pamu_get_windows,
1149 	.domain_set_windows = fsl_pamu_set_windows,
1150 	.iova_to_phys	= fsl_pamu_iova_to_phys,
1151 	.domain_has_cap = fsl_pamu_domain_has_cap,
1152 	.domain_set_attr = fsl_pamu_set_domain_attr,
1153 	.domain_get_attr = fsl_pamu_get_domain_attr,
1154 	.add_device	= fsl_pamu_add_device,
1155 	.remove_device	= fsl_pamu_remove_device,
1156 };
1157 
1158 int pamu_domain_init()
1159 {
1160 	int ret = 0;
1161 
1162 	ret = iommu_init_mempool();
1163 	if (ret)
1164 		return ret;
1165 
1166 	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1167 	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1168 
1169 	return ret;
1170 }
1171