xref: /openbmc/linux/drivers/iommu/iommu.c (revision cf9441ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <trace/events/iommu.h>
26 
27 static struct kset *iommu_group_kset;
28 static DEFINE_IDA(iommu_group_ida);
29 #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
30 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
31 #else
32 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
33 #endif
34 static bool iommu_dma_strict __read_mostly = true;
35 
36 struct iommu_group {
37 	struct kobject kobj;
38 	struct kobject *devices_kobj;
39 	struct list_head devices;
40 	struct mutex mutex;
41 	struct blocking_notifier_head notifier;
42 	void *iommu_data;
43 	void (*iommu_data_release)(void *iommu_data);
44 	char *name;
45 	int id;
46 	struct iommu_domain *default_domain;
47 	struct iommu_domain *domain;
48 };
49 
50 struct group_device {
51 	struct list_head list;
52 	struct device *dev;
53 	char *name;
54 };
55 
56 struct iommu_group_attribute {
57 	struct attribute attr;
58 	ssize_t (*show)(struct iommu_group *group, char *buf);
59 	ssize_t (*store)(struct iommu_group *group,
60 			 const char *buf, size_t count);
61 };
62 
63 static const char * const iommu_group_resv_type_string[] = {
64 	[IOMMU_RESV_DIRECT]			= "direct",
65 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
66 	[IOMMU_RESV_RESERVED]			= "reserved",
67 	[IOMMU_RESV_MSI]			= "msi",
68 	[IOMMU_RESV_SW_MSI]			= "msi",
69 };
70 
71 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
72 struct iommu_group_attribute iommu_group_attr_##_name =		\
73 	__ATTR(_name, _mode, _show, _store)
74 
75 #define to_iommu_group_attr(_attr)	\
76 	container_of(_attr, struct iommu_group_attribute, attr)
77 #define to_iommu_group(_kobj)		\
78 	container_of(_kobj, struct iommu_group, kobj)
79 
80 static LIST_HEAD(iommu_device_list);
81 static DEFINE_SPINLOCK(iommu_device_lock);
82 
83 int iommu_device_register(struct iommu_device *iommu)
84 {
85 	spin_lock(&iommu_device_lock);
86 	list_add_tail(&iommu->list, &iommu_device_list);
87 	spin_unlock(&iommu_device_lock);
88 
89 	return 0;
90 }
91 
92 void iommu_device_unregister(struct iommu_device *iommu)
93 {
94 	spin_lock(&iommu_device_lock);
95 	list_del(&iommu->list);
96 	spin_unlock(&iommu_device_lock);
97 }
98 
99 static struct iommu_param *iommu_get_dev_param(struct device *dev)
100 {
101 	struct iommu_param *param = dev->iommu_param;
102 
103 	if (param)
104 		return param;
105 
106 	param = kzalloc(sizeof(*param), GFP_KERNEL);
107 	if (!param)
108 		return NULL;
109 
110 	mutex_init(&param->lock);
111 	dev->iommu_param = param;
112 	return param;
113 }
114 
115 static void iommu_free_dev_param(struct device *dev)
116 {
117 	kfree(dev->iommu_param);
118 	dev->iommu_param = NULL;
119 }
120 
121 int iommu_probe_device(struct device *dev)
122 {
123 	const struct iommu_ops *ops = dev->bus->iommu_ops;
124 	int ret;
125 
126 	WARN_ON(dev->iommu_group);
127 	if (!ops)
128 		return -EINVAL;
129 
130 	if (!iommu_get_dev_param(dev))
131 		return -ENOMEM;
132 
133 	ret = ops->add_device(dev);
134 	if (ret)
135 		iommu_free_dev_param(dev);
136 
137 	return ret;
138 }
139 
140 void iommu_release_device(struct device *dev)
141 {
142 	const struct iommu_ops *ops = dev->bus->iommu_ops;
143 
144 	if (dev->iommu_group)
145 		ops->remove_device(dev);
146 
147 	iommu_free_dev_param(dev);
148 }
149 
150 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
151 						 unsigned type);
152 static int __iommu_attach_device(struct iommu_domain *domain,
153 				 struct device *dev);
154 static int __iommu_attach_group(struct iommu_domain *domain,
155 				struct iommu_group *group);
156 static void __iommu_detach_group(struct iommu_domain *domain,
157 				 struct iommu_group *group);
158 
159 static int __init iommu_set_def_domain_type(char *str)
160 {
161 	bool pt;
162 	int ret;
163 
164 	ret = kstrtobool(str, &pt);
165 	if (ret)
166 		return ret;
167 
168 	iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
169 	return 0;
170 }
171 early_param("iommu.passthrough", iommu_set_def_domain_type);
172 
173 static int __init iommu_dma_setup(char *str)
174 {
175 	return kstrtobool(str, &iommu_dma_strict);
176 }
177 early_param("iommu.strict", iommu_dma_setup);
178 
179 static ssize_t iommu_group_attr_show(struct kobject *kobj,
180 				     struct attribute *__attr, char *buf)
181 {
182 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
183 	struct iommu_group *group = to_iommu_group(kobj);
184 	ssize_t ret = -EIO;
185 
186 	if (attr->show)
187 		ret = attr->show(group, buf);
188 	return ret;
189 }
190 
191 static ssize_t iommu_group_attr_store(struct kobject *kobj,
192 				      struct attribute *__attr,
193 				      const char *buf, size_t count)
194 {
195 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
196 	struct iommu_group *group = to_iommu_group(kobj);
197 	ssize_t ret = -EIO;
198 
199 	if (attr->store)
200 		ret = attr->store(group, buf, count);
201 	return ret;
202 }
203 
204 static const struct sysfs_ops iommu_group_sysfs_ops = {
205 	.show = iommu_group_attr_show,
206 	.store = iommu_group_attr_store,
207 };
208 
209 static int iommu_group_create_file(struct iommu_group *group,
210 				   struct iommu_group_attribute *attr)
211 {
212 	return sysfs_create_file(&group->kobj, &attr->attr);
213 }
214 
215 static void iommu_group_remove_file(struct iommu_group *group,
216 				    struct iommu_group_attribute *attr)
217 {
218 	sysfs_remove_file(&group->kobj, &attr->attr);
219 }
220 
221 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
222 {
223 	return sprintf(buf, "%s\n", group->name);
224 }
225 
226 /**
227  * iommu_insert_resv_region - Insert a new region in the
228  * list of reserved regions.
229  * @new: new region to insert
230  * @regions: list of regions
231  *
232  * The new element is sorted by address with respect to the other
233  * regions of the same type. In case it overlaps with another
234  * region of the same type, regions are merged. In case it
235  * overlaps with another region of different type, regions are
236  * not merged.
237  */
238 static int iommu_insert_resv_region(struct iommu_resv_region *new,
239 				    struct list_head *regions)
240 {
241 	struct iommu_resv_region *region;
242 	phys_addr_t start = new->start;
243 	phys_addr_t end = new->start + new->length - 1;
244 	struct list_head *pos = regions->next;
245 
246 	while (pos != regions) {
247 		struct iommu_resv_region *entry =
248 			list_entry(pos, struct iommu_resv_region, list);
249 		phys_addr_t a = entry->start;
250 		phys_addr_t b = entry->start + entry->length - 1;
251 		int type = entry->type;
252 
253 		if (end < a) {
254 			goto insert;
255 		} else if (start > b) {
256 			pos = pos->next;
257 		} else if ((start >= a) && (end <= b)) {
258 			if (new->type == type)
259 				return 0;
260 			else
261 				pos = pos->next;
262 		} else {
263 			if (new->type == type) {
264 				phys_addr_t new_start = min(a, start);
265 				phys_addr_t new_end = max(b, end);
266 				int ret;
267 
268 				list_del(&entry->list);
269 				entry->start = new_start;
270 				entry->length = new_end - new_start + 1;
271 				ret = iommu_insert_resv_region(entry, regions);
272 				kfree(entry);
273 				return ret;
274 			} else {
275 				pos = pos->next;
276 			}
277 		}
278 	}
279 insert:
280 	region = iommu_alloc_resv_region(new->start, new->length,
281 					 new->prot, new->type);
282 	if (!region)
283 		return -ENOMEM;
284 
285 	list_add_tail(&region->list, pos);
286 	return 0;
287 }
288 
289 static int
290 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
291 				 struct list_head *group_resv_regions)
292 {
293 	struct iommu_resv_region *entry;
294 	int ret = 0;
295 
296 	list_for_each_entry(entry, dev_resv_regions, list) {
297 		ret = iommu_insert_resv_region(entry, group_resv_regions);
298 		if (ret)
299 			break;
300 	}
301 	return ret;
302 }
303 
304 int iommu_get_group_resv_regions(struct iommu_group *group,
305 				 struct list_head *head)
306 {
307 	struct group_device *device;
308 	int ret = 0;
309 
310 	mutex_lock(&group->mutex);
311 	list_for_each_entry(device, &group->devices, list) {
312 		struct list_head dev_resv_regions;
313 
314 		INIT_LIST_HEAD(&dev_resv_regions);
315 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
316 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
317 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
318 		if (ret)
319 			break;
320 	}
321 	mutex_unlock(&group->mutex);
322 	return ret;
323 }
324 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
325 
326 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
327 					     char *buf)
328 {
329 	struct iommu_resv_region *region, *next;
330 	struct list_head group_resv_regions;
331 	char *str = buf;
332 
333 	INIT_LIST_HEAD(&group_resv_regions);
334 	iommu_get_group_resv_regions(group, &group_resv_regions);
335 
336 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
337 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
338 			       (long long int)region->start,
339 			       (long long int)(region->start +
340 						region->length - 1),
341 			       iommu_group_resv_type_string[region->type]);
342 		kfree(region);
343 	}
344 
345 	return (str - buf);
346 }
347 
348 static ssize_t iommu_group_show_type(struct iommu_group *group,
349 				     char *buf)
350 {
351 	char *type = "unknown\n";
352 
353 	if (group->default_domain) {
354 		switch (group->default_domain->type) {
355 		case IOMMU_DOMAIN_BLOCKED:
356 			type = "blocked\n";
357 			break;
358 		case IOMMU_DOMAIN_IDENTITY:
359 			type = "identity\n";
360 			break;
361 		case IOMMU_DOMAIN_UNMANAGED:
362 			type = "unmanaged\n";
363 			break;
364 		case IOMMU_DOMAIN_DMA:
365 			type = "DMA\n";
366 			break;
367 		}
368 	}
369 	strcpy(buf, type);
370 
371 	return strlen(type);
372 }
373 
374 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
375 
376 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
377 			iommu_group_show_resv_regions, NULL);
378 
379 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
380 
381 static void iommu_group_release(struct kobject *kobj)
382 {
383 	struct iommu_group *group = to_iommu_group(kobj);
384 
385 	pr_debug("Releasing group %d\n", group->id);
386 
387 	if (group->iommu_data_release)
388 		group->iommu_data_release(group->iommu_data);
389 
390 	ida_simple_remove(&iommu_group_ida, group->id);
391 
392 	if (group->default_domain)
393 		iommu_domain_free(group->default_domain);
394 
395 	kfree(group->name);
396 	kfree(group);
397 }
398 
399 static struct kobj_type iommu_group_ktype = {
400 	.sysfs_ops = &iommu_group_sysfs_ops,
401 	.release = iommu_group_release,
402 };
403 
404 /**
405  * iommu_group_alloc - Allocate a new group
406  *
407  * This function is called by an iommu driver to allocate a new iommu
408  * group.  The iommu group represents the minimum granularity of the iommu.
409  * Upon successful return, the caller holds a reference to the supplied
410  * group in order to hold the group until devices are added.  Use
411  * iommu_group_put() to release this extra reference count, allowing the
412  * group to be automatically reclaimed once it has no devices or external
413  * references.
414  */
415 struct iommu_group *iommu_group_alloc(void)
416 {
417 	struct iommu_group *group;
418 	int ret;
419 
420 	group = kzalloc(sizeof(*group), GFP_KERNEL);
421 	if (!group)
422 		return ERR_PTR(-ENOMEM);
423 
424 	group->kobj.kset = iommu_group_kset;
425 	mutex_init(&group->mutex);
426 	INIT_LIST_HEAD(&group->devices);
427 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
428 
429 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
430 	if (ret < 0) {
431 		kfree(group);
432 		return ERR_PTR(ret);
433 	}
434 	group->id = ret;
435 
436 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
437 				   NULL, "%d", group->id);
438 	if (ret) {
439 		ida_simple_remove(&iommu_group_ida, group->id);
440 		kfree(group);
441 		return ERR_PTR(ret);
442 	}
443 
444 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
445 	if (!group->devices_kobj) {
446 		kobject_put(&group->kobj); /* triggers .release & free */
447 		return ERR_PTR(-ENOMEM);
448 	}
449 
450 	/*
451 	 * The devices_kobj holds a reference on the group kobject, so
452 	 * as long as that exists so will the group.  We can therefore
453 	 * use the devices_kobj for reference counting.
454 	 */
455 	kobject_put(&group->kobj);
456 
457 	ret = iommu_group_create_file(group,
458 				      &iommu_group_attr_reserved_regions);
459 	if (ret)
460 		return ERR_PTR(ret);
461 
462 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
463 	if (ret)
464 		return ERR_PTR(ret);
465 
466 	pr_debug("Allocated group %d\n", group->id);
467 
468 	return group;
469 }
470 EXPORT_SYMBOL_GPL(iommu_group_alloc);
471 
472 struct iommu_group *iommu_group_get_by_id(int id)
473 {
474 	struct kobject *group_kobj;
475 	struct iommu_group *group;
476 	const char *name;
477 
478 	if (!iommu_group_kset)
479 		return NULL;
480 
481 	name = kasprintf(GFP_KERNEL, "%d", id);
482 	if (!name)
483 		return NULL;
484 
485 	group_kobj = kset_find_obj(iommu_group_kset, name);
486 	kfree(name);
487 
488 	if (!group_kobj)
489 		return NULL;
490 
491 	group = container_of(group_kobj, struct iommu_group, kobj);
492 	BUG_ON(group->id != id);
493 
494 	kobject_get(group->devices_kobj);
495 	kobject_put(&group->kobj);
496 
497 	return group;
498 }
499 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
500 
501 /**
502  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
503  * @group: the group
504  *
505  * iommu drivers can store data in the group for use when doing iommu
506  * operations.  This function provides a way to retrieve it.  Caller
507  * should hold a group reference.
508  */
509 void *iommu_group_get_iommudata(struct iommu_group *group)
510 {
511 	return group->iommu_data;
512 }
513 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
514 
515 /**
516  * iommu_group_set_iommudata - set iommu_data for a group
517  * @group: the group
518  * @iommu_data: new data
519  * @release: release function for iommu_data
520  *
521  * iommu drivers can store data in the group for use when doing iommu
522  * operations.  This function provides a way to set the data after
523  * the group has been allocated.  Caller should hold a group reference.
524  */
525 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
526 			       void (*release)(void *iommu_data))
527 {
528 	group->iommu_data = iommu_data;
529 	group->iommu_data_release = release;
530 }
531 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
532 
533 /**
534  * iommu_group_set_name - set name for a group
535  * @group: the group
536  * @name: name
537  *
538  * Allow iommu driver to set a name for a group.  When set it will
539  * appear in a name attribute file under the group in sysfs.
540  */
541 int iommu_group_set_name(struct iommu_group *group, const char *name)
542 {
543 	int ret;
544 
545 	if (group->name) {
546 		iommu_group_remove_file(group, &iommu_group_attr_name);
547 		kfree(group->name);
548 		group->name = NULL;
549 		if (!name)
550 			return 0;
551 	}
552 
553 	group->name = kstrdup(name, GFP_KERNEL);
554 	if (!group->name)
555 		return -ENOMEM;
556 
557 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
558 	if (ret) {
559 		kfree(group->name);
560 		group->name = NULL;
561 		return ret;
562 	}
563 
564 	return 0;
565 }
566 EXPORT_SYMBOL_GPL(iommu_group_set_name);
567 
568 static int iommu_group_create_direct_mappings(struct iommu_group *group,
569 					      struct device *dev)
570 {
571 	struct iommu_domain *domain = group->default_domain;
572 	struct iommu_resv_region *entry;
573 	struct list_head mappings;
574 	unsigned long pg_size;
575 	int ret = 0;
576 
577 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
578 		return 0;
579 
580 	BUG_ON(!domain->pgsize_bitmap);
581 
582 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
583 	INIT_LIST_HEAD(&mappings);
584 
585 	iommu_get_resv_regions(dev, &mappings);
586 
587 	/* We need to consider overlapping regions for different devices */
588 	list_for_each_entry(entry, &mappings, list) {
589 		dma_addr_t start, end, addr;
590 
591 		if (domain->ops->apply_resv_region)
592 			domain->ops->apply_resv_region(dev, domain, entry);
593 
594 		start = ALIGN(entry->start, pg_size);
595 		end   = ALIGN(entry->start + entry->length, pg_size);
596 
597 		if (entry->type != IOMMU_RESV_DIRECT &&
598 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
599 			continue;
600 
601 		for (addr = start; addr < end; addr += pg_size) {
602 			phys_addr_t phys_addr;
603 
604 			phys_addr = iommu_iova_to_phys(domain, addr);
605 			if (phys_addr)
606 				continue;
607 
608 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
609 			if (ret)
610 				goto out;
611 		}
612 
613 	}
614 
615 	iommu_flush_tlb_all(domain);
616 
617 out:
618 	iommu_put_resv_regions(dev, &mappings);
619 
620 	return ret;
621 }
622 
623 /**
624  * iommu_group_add_device - add a device to an iommu group
625  * @group: the group into which to add the device (reference should be held)
626  * @dev: the device
627  *
628  * This function is called by an iommu driver to add a device into a
629  * group.  Adding a device increments the group reference count.
630  */
631 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
632 {
633 	int ret, i = 0;
634 	struct group_device *device;
635 
636 	device = kzalloc(sizeof(*device), GFP_KERNEL);
637 	if (!device)
638 		return -ENOMEM;
639 
640 	device->dev = dev;
641 
642 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
643 	if (ret)
644 		goto err_free_device;
645 
646 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
647 rename:
648 	if (!device->name) {
649 		ret = -ENOMEM;
650 		goto err_remove_link;
651 	}
652 
653 	ret = sysfs_create_link_nowarn(group->devices_kobj,
654 				       &dev->kobj, device->name);
655 	if (ret) {
656 		if (ret == -EEXIST && i >= 0) {
657 			/*
658 			 * Account for the slim chance of collision
659 			 * and append an instance to the name.
660 			 */
661 			kfree(device->name);
662 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
663 						 kobject_name(&dev->kobj), i++);
664 			goto rename;
665 		}
666 		goto err_free_name;
667 	}
668 
669 	kobject_get(group->devices_kobj);
670 
671 	dev->iommu_group = group;
672 
673 	iommu_group_create_direct_mappings(group, dev);
674 
675 	mutex_lock(&group->mutex);
676 	list_add_tail(&device->list, &group->devices);
677 	if (group->domain)
678 		ret = __iommu_attach_device(group->domain, dev);
679 	mutex_unlock(&group->mutex);
680 	if (ret)
681 		goto err_put_group;
682 
683 	/* Notify any listeners about change to group. */
684 	blocking_notifier_call_chain(&group->notifier,
685 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
686 
687 	trace_add_device_to_group(group->id, dev);
688 
689 	dev_info(dev, "Adding to iommu group %d\n", group->id);
690 
691 	return 0;
692 
693 err_put_group:
694 	mutex_lock(&group->mutex);
695 	list_del(&device->list);
696 	mutex_unlock(&group->mutex);
697 	dev->iommu_group = NULL;
698 	kobject_put(group->devices_kobj);
699 err_free_name:
700 	kfree(device->name);
701 err_remove_link:
702 	sysfs_remove_link(&dev->kobj, "iommu_group");
703 err_free_device:
704 	kfree(device);
705 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
706 	return ret;
707 }
708 EXPORT_SYMBOL_GPL(iommu_group_add_device);
709 
710 /**
711  * iommu_group_remove_device - remove a device from it's current group
712  * @dev: device to be removed
713  *
714  * This function is called by an iommu driver to remove the device from
715  * it's current group.  This decrements the iommu group reference count.
716  */
717 void iommu_group_remove_device(struct device *dev)
718 {
719 	struct iommu_group *group = dev->iommu_group;
720 	struct group_device *tmp_device, *device = NULL;
721 
722 	dev_info(dev, "Removing from iommu group %d\n", group->id);
723 
724 	/* Pre-notify listeners that a device is being removed. */
725 	blocking_notifier_call_chain(&group->notifier,
726 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
727 
728 	mutex_lock(&group->mutex);
729 	list_for_each_entry(tmp_device, &group->devices, list) {
730 		if (tmp_device->dev == dev) {
731 			device = tmp_device;
732 			list_del(&device->list);
733 			break;
734 		}
735 	}
736 	mutex_unlock(&group->mutex);
737 
738 	if (!device)
739 		return;
740 
741 	sysfs_remove_link(group->devices_kobj, device->name);
742 	sysfs_remove_link(&dev->kobj, "iommu_group");
743 
744 	trace_remove_device_from_group(group->id, dev);
745 
746 	kfree(device->name);
747 	kfree(device);
748 	dev->iommu_group = NULL;
749 	kobject_put(group->devices_kobj);
750 }
751 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
752 
753 static int iommu_group_device_count(struct iommu_group *group)
754 {
755 	struct group_device *entry;
756 	int ret = 0;
757 
758 	list_for_each_entry(entry, &group->devices, list)
759 		ret++;
760 
761 	return ret;
762 }
763 
764 /**
765  * iommu_group_for_each_dev - iterate over each device in the group
766  * @group: the group
767  * @data: caller opaque data to be passed to callback function
768  * @fn: caller supplied callback function
769  *
770  * This function is called by group users to iterate over group devices.
771  * Callers should hold a reference count to the group during callback.
772  * The group->mutex is held across callbacks, which will block calls to
773  * iommu_group_add/remove_device.
774  */
775 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
776 				      int (*fn)(struct device *, void *))
777 {
778 	struct group_device *device;
779 	int ret = 0;
780 
781 	list_for_each_entry(device, &group->devices, list) {
782 		ret = fn(device->dev, data);
783 		if (ret)
784 			break;
785 	}
786 	return ret;
787 }
788 
789 
790 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
791 			     int (*fn)(struct device *, void *))
792 {
793 	int ret;
794 
795 	mutex_lock(&group->mutex);
796 	ret = __iommu_group_for_each_dev(group, data, fn);
797 	mutex_unlock(&group->mutex);
798 
799 	return ret;
800 }
801 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
802 
803 /**
804  * iommu_group_get - Return the group for a device and increment reference
805  * @dev: get the group that this device belongs to
806  *
807  * This function is called by iommu drivers and users to get the group
808  * for the specified device.  If found, the group is returned and the group
809  * reference in incremented, else NULL.
810  */
811 struct iommu_group *iommu_group_get(struct device *dev)
812 {
813 	struct iommu_group *group = dev->iommu_group;
814 
815 	if (group)
816 		kobject_get(group->devices_kobj);
817 
818 	return group;
819 }
820 EXPORT_SYMBOL_GPL(iommu_group_get);
821 
822 /**
823  * iommu_group_ref_get - Increment reference on a group
824  * @group: the group to use, must not be NULL
825  *
826  * This function is called by iommu drivers to take additional references on an
827  * existing group.  Returns the given group for convenience.
828  */
829 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
830 {
831 	kobject_get(group->devices_kobj);
832 	return group;
833 }
834 
835 /**
836  * iommu_group_put - Decrement group reference
837  * @group: the group to use
838  *
839  * This function is called by iommu drivers and users to release the
840  * iommu group.  Once the reference count is zero, the group is released.
841  */
842 void iommu_group_put(struct iommu_group *group)
843 {
844 	if (group)
845 		kobject_put(group->devices_kobj);
846 }
847 EXPORT_SYMBOL_GPL(iommu_group_put);
848 
849 /**
850  * iommu_group_register_notifier - Register a notifier for group changes
851  * @group: the group to watch
852  * @nb: notifier block to signal
853  *
854  * This function allows iommu group users to track changes in a group.
855  * See include/linux/iommu.h for actions sent via this notifier.  Caller
856  * should hold a reference to the group throughout notifier registration.
857  */
858 int iommu_group_register_notifier(struct iommu_group *group,
859 				  struct notifier_block *nb)
860 {
861 	return blocking_notifier_chain_register(&group->notifier, nb);
862 }
863 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
864 
865 /**
866  * iommu_group_unregister_notifier - Unregister a notifier
867  * @group: the group to watch
868  * @nb: notifier block to signal
869  *
870  * Unregister a previously registered group notifier block.
871  */
872 int iommu_group_unregister_notifier(struct iommu_group *group,
873 				    struct notifier_block *nb)
874 {
875 	return blocking_notifier_chain_unregister(&group->notifier, nb);
876 }
877 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
878 
879 /**
880  * iommu_register_device_fault_handler() - Register a device fault handler
881  * @dev: the device
882  * @handler: the fault handler
883  * @data: private data passed as argument to the handler
884  *
885  * When an IOMMU fault event is received, this handler gets called with the
886  * fault event and data as argument. The handler should return 0 on success. If
887  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
888  * complete the fault by calling iommu_page_response() with one of the following
889  * response code:
890  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
891  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
892  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
893  *   page faults if possible.
894  *
895  * Return 0 if the fault handler was installed successfully, or an error.
896  */
897 int iommu_register_device_fault_handler(struct device *dev,
898 					iommu_dev_fault_handler_t handler,
899 					void *data)
900 {
901 	struct iommu_param *param = dev->iommu_param;
902 	int ret = 0;
903 
904 	if (!param)
905 		return -EINVAL;
906 
907 	mutex_lock(&param->lock);
908 	/* Only allow one fault handler registered for each device */
909 	if (param->fault_param) {
910 		ret = -EBUSY;
911 		goto done_unlock;
912 	}
913 
914 	get_device(dev);
915 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
916 	if (!param->fault_param) {
917 		put_device(dev);
918 		ret = -ENOMEM;
919 		goto done_unlock;
920 	}
921 	param->fault_param->handler = handler;
922 	param->fault_param->data = data;
923 	mutex_init(&param->fault_param->lock);
924 	INIT_LIST_HEAD(&param->fault_param->faults);
925 
926 done_unlock:
927 	mutex_unlock(&param->lock);
928 
929 	return ret;
930 }
931 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
932 
933 /**
934  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
935  * @dev: the device
936  *
937  * Remove the device fault handler installed with
938  * iommu_register_device_fault_handler().
939  *
940  * Return 0 on success, or an error.
941  */
942 int iommu_unregister_device_fault_handler(struct device *dev)
943 {
944 	struct iommu_param *param = dev->iommu_param;
945 	int ret = 0;
946 
947 	if (!param)
948 		return -EINVAL;
949 
950 	mutex_lock(&param->lock);
951 
952 	if (!param->fault_param)
953 		goto unlock;
954 
955 	/* we cannot unregister handler if there are pending faults */
956 	if (!list_empty(&param->fault_param->faults)) {
957 		ret = -EBUSY;
958 		goto unlock;
959 	}
960 
961 	kfree(param->fault_param);
962 	param->fault_param = NULL;
963 	put_device(dev);
964 unlock:
965 	mutex_unlock(&param->lock);
966 
967 	return ret;
968 }
969 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
970 
971 /**
972  * iommu_report_device_fault() - Report fault event to device driver
973  * @dev: the device
974  * @evt: fault event data
975  *
976  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
977  * handler. When this function fails and the fault is recoverable, it is the
978  * caller's responsibility to complete the fault.
979  *
980  * Return 0 on success, or an error.
981  */
982 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
983 {
984 	struct iommu_param *param = dev->iommu_param;
985 	struct iommu_fault_event *evt_pending = NULL;
986 	struct iommu_fault_param *fparam;
987 	int ret = 0;
988 
989 	if (!param || !evt)
990 		return -EINVAL;
991 
992 	/* we only report device fault if there is a handler registered */
993 	mutex_lock(&param->lock);
994 	fparam = param->fault_param;
995 	if (!fparam || !fparam->handler) {
996 		ret = -EINVAL;
997 		goto done_unlock;
998 	}
999 
1000 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1001 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1002 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1003 				      GFP_KERNEL);
1004 		if (!evt_pending) {
1005 			ret = -ENOMEM;
1006 			goto done_unlock;
1007 		}
1008 		mutex_lock(&fparam->lock);
1009 		list_add_tail(&evt_pending->list, &fparam->faults);
1010 		mutex_unlock(&fparam->lock);
1011 	}
1012 
1013 	ret = fparam->handler(&evt->fault, fparam->data);
1014 	if (ret && evt_pending) {
1015 		mutex_lock(&fparam->lock);
1016 		list_del(&evt_pending->list);
1017 		mutex_unlock(&fparam->lock);
1018 		kfree(evt_pending);
1019 	}
1020 done_unlock:
1021 	mutex_unlock(&param->lock);
1022 	return ret;
1023 }
1024 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1025 
1026 int iommu_page_response(struct device *dev,
1027 			struct iommu_page_response *msg)
1028 {
1029 	bool pasid_valid;
1030 	int ret = -EINVAL;
1031 	struct iommu_fault_event *evt;
1032 	struct iommu_fault_page_request *prm;
1033 	struct iommu_param *param = dev->iommu_param;
1034 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1035 
1036 	if (!domain || !domain->ops->page_response)
1037 		return -ENODEV;
1038 
1039 	if (!param || !param->fault_param)
1040 		return -EINVAL;
1041 
1042 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1043 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1044 		return -EINVAL;
1045 
1046 	/* Only send response if there is a fault report pending */
1047 	mutex_lock(&param->fault_param->lock);
1048 	if (list_empty(&param->fault_param->faults)) {
1049 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1050 		goto done_unlock;
1051 	}
1052 	/*
1053 	 * Check if we have a matching page request pending to respond,
1054 	 * otherwise return -EINVAL
1055 	 */
1056 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1057 		prm = &evt->fault.prm;
1058 		pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1059 
1060 		if ((pasid_valid && prm->pasid != msg->pasid) ||
1061 		    prm->grpid != msg->grpid)
1062 			continue;
1063 
1064 		/* Sanitize the reply */
1065 		msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1066 
1067 		ret = domain->ops->page_response(dev, evt, msg);
1068 		list_del(&evt->list);
1069 		kfree(evt);
1070 		break;
1071 	}
1072 
1073 done_unlock:
1074 	mutex_unlock(&param->fault_param->lock);
1075 	return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(iommu_page_response);
1078 
1079 /**
1080  * iommu_group_id - Return ID for a group
1081  * @group: the group to ID
1082  *
1083  * Return the unique ID for the group matching the sysfs group number.
1084  */
1085 int iommu_group_id(struct iommu_group *group)
1086 {
1087 	return group->id;
1088 }
1089 EXPORT_SYMBOL_GPL(iommu_group_id);
1090 
1091 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1092 					       unsigned long *devfns);
1093 
1094 /*
1095  * To consider a PCI device isolated, we require ACS to support Source
1096  * Validation, Request Redirection, Completer Redirection, and Upstream
1097  * Forwarding.  This effectively means that devices cannot spoof their
1098  * requester ID, requests and completions cannot be redirected, and all
1099  * transactions are forwarded upstream, even as it passes through a
1100  * bridge where the target device is downstream.
1101  */
1102 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1103 
1104 /*
1105  * For multifunction devices which are not isolated from each other, find
1106  * all the other non-isolated functions and look for existing groups.  For
1107  * each function, we also need to look for aliases to or from other devices
1108  * that may already have a group.
1109  */
1110 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1111 							unsigned long *devfns)
1112 {
1113 	struct pci_dev *tmp = NULL;
1114 	struct iommu_group *group;
1115 
1116 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1117 		return NULL;
1118 
1119 	for_each_pci_dev(tmp) {
1120 		if (tmp == pdev || tmp->bus != pdev->bus ||
1121 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1122 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1123 			continue;
1124 
1125 		group = get_pci_alias_group(tmp, devfns);
1126 		if (group) {
1127 			pci_dev_put(tmp);
1128 			return group;
1129 		}
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
1135 /*
1136  * Look for aliases to or from the given device for existing groups. DMA
1137  * aliases are only supported on the same bus, therefore the search
1138  * space is quite small (especially since we're really only looking at pcie
1139  * device, and therefore only expect multiple slots on the root complex or
1140  * downstream switch ports).  It's conceivable though that a pair of
1141  * multifunction devices could have aliases between them that would cause a
1142  * loop.  To prevent this, we use a bitmap to track where we've been.
1143  */
1144 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1145 					       unsigned long *devfns)
1146 {
1147 	struct pci_dev *tmp = NULL;
1148 	struct iommu_group *group;
1149 
1150 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1151 		return NULL;
1152 
1153 	group = iommu_group_get(&pdev->dev);
1154 	if (group)
1155 		return group;
1156 
1157 	for_each_pci_dev(tmp) {
1158 		if (tmp == pdev || tmp->bus != pdev->bus)
1159 			continue;
1160 
1161 		/* We alias them or they alias us */
1162 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1163 			group = get_pci_alias_group(tmp, devfns);
1164 			if (group) {
1165 				pci_dev_put(tmp);
1166 				return group;
1167 			}
1168 
1169 			group = get_pci_function_alias_group(tmp, devfns);
1170 			if (group) {
1171 				pci_dev_put(tmp);
1172 				return group;
1173 			}
1174 		}
1175 	}
1176 
1177 	return NULL;
1178 }
1179 
1180 struct group_for_pci_data {
1181 	struct pci_dev *pdev;
1182 	struct iommu_group *group;
1183 };
1184 
1185 /*
1186  * DMA alias iterator callback, return the last seen device.  Stop and return
1187  * the IOMMU group if we find one along the way.
1188  */
1189 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1190 {
1191 	struct group_for_pci_data *data = opaque;
1192 
1193 	data->pdev = pdev;
1194 	data->group = iommu_group_get(&pdev->dev);
1195 
1196 	return data->group != NULL;
1197 }
1198 
1199 /*
1200  * Generic device_group call-back function. It just allocates one
1201  * iommu-group per device.
1202  */
1203 struct iommu_group *generic_device_group(struct device *dev)
1204 {
1205 	return iommu_group_alloc();
1206 }
1207 
1208 /*
1209  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1210  * to find or create an IOMMU group for a device.
1211  */
1212 struct iommu_group *pci_device_group(struct device *dev)
1213 {
1214 	struct pci_dev *pdev = to_pci_dev(dev);
1215 	struct group_for_pci_data data;
1216 	struct pci_bus *bus;
1217 	struct iommu_group *group = NULL;
1218 	u64 devfns[4] = { 0 };
1219 
1220 	if (WARN_ON(!dev_is_pci(dev)))
1221 		return ERR_PTR(-EINVAL);
1222 
1223 	/*
1224 	 * Find the upstream DMA alias for the device.  A device must not
1225 	 * be aliased due to topology in order to have its own IOMMU group.
1226 	 * If we find an alias along the way that already belongs to a
1227 	 * group, use it.
1228 	 */
1229 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1230 		return data.group;
1231 
1232 	pdev = data.pdev;
1233 
1234 	/*
1235 	 * Continue upstream from the point of minimum IOMMU granularity
1236 	 * due to aliases to the point where devices are protected from
1237 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1238 	 * group, use it.
1239 	 */
1240 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1241 		if (!bus->self)
1242 			continue;
1243 
1244 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1245 			break;
1246 
1247 		pdev = bus->self;
1248 
1249 		group = iommu_group_get(&pdev->dev);
1250 		if (group)
1251 			return group;
1252 	}
1253 
1254 	/*
1255 	 * Look for existing groups on device aliases.  If we alias another
1256 	 * device or another device aliases us, use the same group.
1257 	 */
1258 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1259 	if (group)
1260 		return group;
1261 
1262 	/*
1263 	 * Look for existing groups on non-isolated functions on the same
1264 	 * slot and aliases of those funcions, if any.  No need to clear
1265 	 * the search bitmap, the tested devfns are still valid.
1266 	 */
1267 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1268 	if (group)
1269 		return group;
1270 
1271 	/* No shared group found, allocate new */
1272 	return iommu_group_alloc();
1273 }
1274 
1275 /* Get the IOMMU group for device on fsl-mc bus */
1276 struct iommu_group *fsl_mc_device_group(struct device *dev)
1277 {
1278 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1279 	struct iommu_group *group;
1280 
1281 	group = iommu_group_get(cont_dev);
1282 	if (!group)
1283 		group = iommu_group_alloc();
1284 	return group;
1285 }
1286 
1287 /**
1288  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1289  * @dev: target device
1290  *
1291  * This function is intended to be called by IOMMU drivers and extended to
1292  * support common, bus-defined algorithms when determining or creating the
1293  * IOMMU group for a device.  On success, the caller will hold a reference
1294  * to the returned IOMMU group, which will already include the provided
1295  * device.  The reference should be released with iommu_group_put().
1296  */
1297 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1298 {
1299 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1300 	struct iommu_group *group;
1301 	int ret;
1302 
1303 	group = iommu_group_get(dev);
1304 	if (group)
1305 		return group;
1306 
1307 	if (!ops)
1308 		return ERR_PTR(-EINVAL);
1309 
1310 	group = ops->device_group(dev);
1311 	if (WARN_ON_ONCE(group == NULL))
1312 		return ERR_PTR(-EINVAL);
1313 
1314 	if (IS_ERR(group))
1315 		return group;
1316 
1317 	/*
1318 	 * Try to allocate a default domain - needs support from the
1319 	 * IOMMU driver.
1320 	 */
1321 	if (!group->default_domain) {
1322 		struct iommu_domain *dom;
1323 
1324 		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1325 		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1326 			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1327 			if (dom) {
1328 				dev_warn(dev,
1329 					 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1330 					 iommu_def_domain_type);
1331 			}
1332 		}
1333 
1334 		group->default_domain = dom;
1335 		if (!group->domain)
1336 			group->domain = dom;
1337 
1338 		if (dom && !iommu_dma_strict) {
1339 			int attr = 1;
1340 			iommu_domain_set_attr(dom,
1341 					      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1342 					      &attr);
1343 		}
1344 	}
1345 
1346 	ret = iommu_group_add_device(group, dev);
1347 	if (ret) {
1348 		iommu_group_put(group);
1349 		return ERR_PTR(ret);
1350 	}
1351 
1352 	return group;
1353 }
1354 
1355 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1356 {
1357 	return group->default_domain;
1358 }
1359 
1360 static int add_iommu_group(struct device *dev, void *data)
1361 {
1362 	int ret = iommu_probe_device(dev);
1363 
1364 	/*
1365 	 * We ignore -ENODEV errors for now, as they just mean that the
1366 	 * device is not translated by an IOMMU. We still care about
1367 	 * other errors and fail to initialize when they happen.
1368 	 */
1369 	if (ret == -ENODEV)
1370 		ret = 0;
1371 
1372 	return ret;
1373 }
1374 
1375 static int remove_iommu_group(struct device *dev, void *data)
1376 {
1377 	iommu_release_device(dev);
1378 
1379 	return 0;
1380 }
1381 
1382 static int iommu_bus_notifier(struct notifier_block *nb,
1383 			      unsigned long action, void *data)
1384 {
1385 	unsigned long group_action = 0;
1386 	struct device *dev = data;
1387 	struct iommu_group *group;
1388 
1389 	/*
1390 	 * ADD/DEL call into iommu driver ops if provided, which may
1391 	 * result in ADD/DEL notifiers to group->notifier
1392 	 */
1393 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1394 		int ret;
1395 
1396 		ret = iommu_probe_device(dev);
1397 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1398 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1399 		iommu_release_device(dev);
1400 		return NOTIFY_OK;
1401 	}
1402 
1403 	/*
1404 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1405 	 * group, if anyone is listening
1406 	 */
1407 	group = iommu_group_get(dev);
1408 	if (!group)
1409 		return 0;
1410 
1411 	switch (action) {
1412 	case BUS_NOTIFY_BIND_DRIVER:
1413 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1414 		break;
1415 	case BUS_NOTIFY_BOUND_DRIVER:
1416 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1417 		break;
1418 	case BUS_NOTIFY_UNBIND_DRIVER:
1419 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1420 		break;
1421 	case BUS_NOTIFY_UNBOUND_DRIVER:
1422 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1423 		break;
1424 	}
1425 
1426 	if (group_action)
1427 		blocking_notifier_call_chain(&group->notifier,
1428 					     group_action, dev);
1429 
1430 	iommu_group_put(group);
1431 	return 0;
1432 }
1433 
1434 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1435 {
1436 	int err;
1437 	struct notifier_block *nb;
1438 
1439 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1440 	if (!nb)
1441 		return -ENOMEM;
1442 
1443 	nb->notifier_call = iommu_bus_notifier;
1444 
1445 	err = bus_register_notifier(bus, nb);
1446 	if (err)
1447 		goto out_free;
1448 
1449 	err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1450 	if (err)
1451 		goto out_err;
1452 
1453 
1454 	return 0;
1455 
1456 out_err:
1457 	/* Clean up */
1458 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1459 	bus_unregister_notifier(bus, nb);
1460 
1461 out_free:
1462 	kfree(nb);
1463 
1464 	return err;
1465 }
1466 
1467 /**
1468  * bus_set_iommu - set iommu-callbacks for the bus
1469  * @bus: bus.
1470  * @ops: the callbacks provided by the iommu-driver
1471  *
1472  * This function is called by an iommu driver to set the iommu methods
1473  * used for a particular bus. Drivers for devices on that bus can use
1474  * the iommu-api after these ops are registered.
1475  * This special function is needed because IOMMUs are usually devices on
1476  * the bus itself, so the iommu drivers are not initialized when the bus
1477  * is set up. With this function the iommu-driver can set the iommu-ops
1478  * afterwards.
1479  */
1480 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1481 {
1482 	int err;
1483 
1484 	if (bus->iommu_ops != NULL)
1485 		return -EBUSY;
1486 
1487 	bus->iommu_ops = ops;
1488 
1489 	/* Do IOMMU specific setup for this bus-type */
1490 	err = iommu_bus_init(bus, ops);
1491 	if (err)
1492 		bus->iommu_ops = NULL;
1493 
1494 	return err;
1495 }
1496 EXPORT_SYMBOL_GPL(bus_set_iommu);
1497 
1498 bool iommu_present(struct bus_type *bus)
1499 {
1500 	return bus->iommu_ops != NULL;
1501 }
1502 EXPORT_SYMBOL_GPL(iommu_present);
1503 
1504 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1505 {
1506 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1507 		return false;
1508 
1509 	return bus->iommu_ops->capable(cap);
1510 }
1511 EXPORT_SYMBOL_GPL(iommu_capable);
1512 
1513 /**
1514  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1515  * @domain: iommu domain
1516  * @handler: fault handler
1517  * @token: user data, will be passed back to the fault handler
1518  *
1519  * This function should be used by IOMMU users which want to be notified
1520  * whenever an IOMMU fault happens.
1521  *
1522  * The fault handler itself should return 0 on success, and an appropriate
1523  * error code otherwise.
1524  */
1525 void iommu_set_fault_handler(struct iommu_domain *domain,
1526 					iommu_fault_handler_t handler,
1527 					void *token)
1528 {
1529 	BUG_ON(!domain);
1530 
1531 	domain->handler = handler;
1532 	domain->handler_token = token;
1533 }
1534 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1535 
1536 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1537 						 unsigned type)
1538 {
1539 	struct iommu_domain *domain;
1540 
1541 	if (bus == NULL || bus->iommu_ops == NULL)
1542 		return NULL;
1543 
1544 	domain = bus->iommu_ops->domain_alloc(type);
1545 	if (!domain)
1546 		return NULL;
1547 
1548 	domain->ops  = bus->iommu_ops;
1549 	domain->type = type;
1550 	/* Assume all sizes by default; the driver may override this later */
1551 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1552 
1553 	return domain;
1554 }
1555 
1556 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1557 {
1558 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1559 }
1560 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1561 
1562 void iommu_domain_free(struct iommu_domain *domain)
1563 {
1564 	domain->ops->domain_free(domain);
1565 }
1566 EXPORT_SYMBOL_GPL(iommu_domain_free);
1567 
1568 static int __iommu_attach_device(struct iommu_domain *domain,
1569 				 struct device *dev)
1570 {
1571 	int ret;
1572 	if ((domain->ops->is_attach_deferred != NULL) &&
1573 	    domain->ops->is_attach_deferred(domain, dev))
1574 		return 0;
1575 
1576 	if (unlikely(domain->ops->attach_dev == NULL))
1577 		return -ENODEV;
1578 
1579 	ret = domain->ops->attach_dev(domain, dev);
1580 	if (!ret)
1581 		trace_attach_device_to_domain(dev);
1582 	return ret;
1583 }
1584 
1585 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1586 {
1587 	struct iommu_group *group;
1588 	int ret;
1589 
1590 	group = iommu_group_get(dev);
1591 	if (!group)
1592 		return -ENODEV;
1593 
1594 	/*
1595 	 * Lock the group to make sure the device-count doesn't
1596 	 * change while we are attaching
1597 	 */
1598 	mutex_lock(&group->mutex);
1599 	ret = -EINVAL;
1600 	if (iommu_group_device_count(group) != 1)
1601 		goto out_unlock;
1602 
1603 	ret = __iommu_attach_group(domain, group);
1604 
1605 out_unlock:
1606 	mutex_unlock(&group->mutex);
1607 	iommu_group_put(group);
1608 
1609 	return ret;
1610 }
1611 EXPORT_SYMBOL_GPL(iommu_attach_device);
1612 
1613 static void __iommu_detach_device(struct iommu_domain *domain,
1614 				  struct device *dev)
1615 {
1616 	if ((domain->ops->is_attach_deferred != NULL) &&
1617 	    domain->ops->is_attach_deferred(domain, dev))
1618 		return;
1619 
1620 	if (unlikely(domain->ops->detach_dev == NULL))
1621 		return;
1622 
1623 	domain->ops->detach_dev(domain, dev);
1624 	trace_detach_device_from_domain(dev);
1625 }
1626 
1627 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1628 {
1629 	struct iommu_group *group;
1630 
1631 	group = iommu_group_get(dev);
1632 	if (!group)
1633 		return;
1634 
1635 	mutex_lock(&group->mutex);
1636 	if (iommu_group_device_count(group) != 1) {
1637 		WARN_ON(1);
1638 		goto out_unlock;
1639 	}
1640 
1641 	__iommu_detach_group(domain, group);
1642 
1643 out_unlock:
1644 	mutex_unlock(&group->mutex);
1645 	iommu_group_put(group);
1646 }
1647 EXPORT_SYMBOL_GPL(iommu_detach_device);
1648 
1649 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1650 {
1651 	struct iommu_domain *domain;
1652 	struct iommu_group *group;
1653 
1654 	group = iommu_group_get(dev);
1655 	if (!group)
1656 		return NULL;
1657 
1658 	domain = group->domain;
1659 
1660 	iommu_group_put(group);
1661 
1662 	return domain;
1663 }
1664 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1665 
1666 /*
1667  * For IOMMU_DOMAIN_DMA implementations which already provide their own
1668  * guarantees that the group and its default domain are valid and correct.
1669  */
1670 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1671 {
1672 	return dev->iommu_group->default_domain;
1673 }
1674 
1675 /*
1676  * IOMMU groups are really the natural working unit of the IOMMU, but
1677  * the IOMMU API works on domains and devices.  Bridge that gap by
1678  * iterating over the devices in a group.  Ideally we'd have a single
1679  * device which represents the requestor ID of the group, but we also
1680  * allow IOMMU drivers to create policy defined minimum sets, where
1681  * the physical hardware may be able to distiguish members, but we
1682  * wish to group them at a higher level (ex. untrusted multi-function
1683  * PCI devices).  Thus we attach each device.
1684  */
1685 static int iommu_group_do_attach_device(struct device *dev, void *data)
1686 {
1687 	struct iommu_domain *domain = data;
1688 
1689 	return __iommu_attach_device(domain, dev);
1690 }
1691 
1692 static int __iommu_attach_group(struct iommu_domain *domain,
1693 				struct iommu_group *group)
1694 {
1695 	int ret;
1696 
1697 	if (group->default_domain && group->domain != group->default_domain)
1698 		return -EBUSY;
1699 
1700 	ret = __iommu_group_for_each_dev(group, domain,
1701 					 iommu_group_do_attach_device);
1702 	if (ret == 0)
1703 		group->domain = domain;
1704 
1705 	return ret;
1706 }
1707 
1708 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1709 {
1710 	int ret;
1711 
1712 	mutex_lock(&group->mutex);
1713 	ret = __iommu_attach_group(domain, group);
1714 	mutex_unlock(&group->mutex);
1715 
1716 	return ret;
1717 }
1718 EXPORT_SYMBOL_GPL(iommu_attach_group);
1719 
1720 static int iommu_group_do_detach_device(struct device *dev, void *data)
1721 {
1722 	struct iommu_domain *domain = data;
1723 
1724 	__iommu_detach_device(domain, dev);
1725 
1726 	return 0;
1727 }
1728 
1729 static void __iommu_detach_group(struct iommu_domain *domain,
1730 				 struct iommu_group *group)
1731 {
1732 	int ret;
1733 
1734 	if (!group->default_domain) {
1735 		__iommu_group_for_each_dev(group, domain,
1736 					   iommu_group_do_detach_device);
1737 		group->domain = NULL;
1738 		return;
1739 	}
1740 
1741 	if (group->domain == group->default_domain)
1742 		return;
1743 
1744 	/* Detach by re-attaching to the default domain */
1745 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1746 					 iommu_group_do_attach_device);
1747 	if (ret != 0)
1748 		WARN_ON(1);
1749 	else
1750 		group->domain = group->default_domain;
1751 }
1752 
1753 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1754 {
1755 	mutex_lock(&group->mutex);
1756 	__iommu_detach_group(domain, group);
1757 	mutex_unlock(&group->mutex);
1758 }
1759 EXPORT_SYMBOL_GPL(iommu_detach_group);
1760 
1761 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1762 {
1763 	if (unlikely(domain->ops->iova_to_phys == NULL))
1764 		return 0;
1765 
1766 	return domain->ops->iova_to_phys(domain, iova);
1767 }
1768 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1769 
1770 static size_t iommu_pgsize(struct iommu_domain *domain,
1771 			   unsigned long addr_merge, size_t size)
1772 {
1773 	unsigned int pgsize_idx;
1774 	size_t pgsize;
1775 
1776 	/* Max page size that still fits into 'size' */
1777 	pgsize_idx = __fls(size);
1778 
1779 	/* need to consider alignment requirements ? */
1780 	if (likely(addr_merge)) {
1781 		/* Max page size allowed by address */
1782 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1783 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1784 	}
1785 
1786 	/* build a mask of acceptable page sizes */
1787 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1788 
1789 	/* throw away page sizes not supported by the hardware */
1790 	pgsize &= domain->pgsize_bitmap;
1791 
1792 	/* make sure we're still sane */
1793 	BUG_ON(!pgsize);
1794 
1795 	/* pick the biggest page */
1796 	pgsize_idx = __fls(pgsize);
1797 	pgsize = 1UL << pgsize_idx;
1798 
1799 	return pgsize;
1800 }
1801 
1802 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1803 	      phys_addr_t paddr, size_t size, int prot)
1804 {
1805 	const struct iommu_ops *ops = domain->ops;
1806 	unsigned long orig_iova = iova;
1807 	unsigned int min_pagesz;
1808 	size_t orig_size = size;
1809 	phys_addr_t orig_paddr = paddr;
1810 	int ret = 0;
1811 
1812 	if (unlikely(ops->map == NULL ||
1813 		     domain->pgsize_bitmap == 0UL))
1814 		return -ENODEV;
1815 
1816 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1817 		return -EINVAL;
1818 
1819 	/* find out the minimum page size supported */
1820 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1821 
1822 	/*
1823 	 * both the virtual address and the physical one, as well as
1824 	 * the size of the mapping, must be aligned (at least) to the
1825 	 * size of the smallest page supported by the hardware
1826 	 */
1827 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1828 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1829 		       iova, &paddr, size, min_pagesz);
1830 		return -EINVAL;
1831 	}
1832 
1833 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1834 
1835 	while (size) {
1836 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1837 
1838 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1839 			 iova, &paddr, pgsize);
1840 
1841 		ret = ops->map(domain, iova, paddr, pgsize, prot);
1842 		if (ret)
1843 			break;
1844 
1845 		iova += pgsize;
1846 		paddr += pgsize;
1847 		size -= pgsize;
1848 	}
1849 
1850 	if (ops->iotlb_sync_map)
1851 		ops->iotlb_sync_map(domain);
1852 
1853 	/* unroll mapping in case something went wrong */
1854 	if (ret)
1855 		iommu_unmap(domain, orig_iova, orig_size - size);
1856 	else
1857 		trace_map(orig_iova, orig_paddr, orig_size);
1858 
1859 	return ret;
1860 }
1861 EXPORT_SYMBOL_GPL(iommu_map);
1862 
1863 static size_t __iommu_unmap(struct iommu_domain *domain,
1864 			    unsigned long iova, size_t size,
1865 			    bool sync)
1866 {
1867 	const struct iommu_ops *ops = domain->ops;
1868 	size_t unmapped_page, unmapped = 0;
1869 	unsigned long orig_iova = iova;
1870 	unsigned int min_pagesz;
1871 
1872 	if (unlikely(ops->unmap == NULL ||
1873 		     domain->pgsize_bitmap == 0UL))
1874 		return 0;
1875 
1876 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1877 		return 0;
1878 
1879 	/* find out the minimum page size supported */
1880 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1881 
1882 	/*
1883 	 * The virtual address, as well as the size of the mapping, must be
1884 	 * aligned (at least) to the size of the smallest page supported
1885 	 * by the hardware
1886 	 */
1887 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1888 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1889 		       iova, size, min_pagesz);
1890 		return 0;
1891 	}
1892 
1893 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1894 
1895 	/*
1896 	 * Keep iterating until we either unmap 'size' bytes (or more)
1897 	 * or we hit an area that isn't mapped.
1898 	 */
1899 	while (unmapped < size) {
1900 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1901 
1902 		unmapped_page = ops->unmap(domain, iova, pgsize);
1903 		if (!unmapped_page)
1904 			break;
1905 
1906 		if (sync && ops->iotlb_range_add)
1907 			ops->iotlb_range_add(domain, iova, pgsize);
1908 
1909 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1910 			 iova, unmapped_page);
1911 
1912 		iova += unmapped_page;
1913 		unmapped += unmapped_page;
1914 	}
1915 
1916 	if (sync && ops->iotlb_sync)
1917 		ops->iotlb_sync(domain);
1918 
1919 	trace_unmap(orig_iova, size, unmapped);
1920 	return unmapped;
1921 }
1922 
1923 size_t iommu_unmap(struct iommu_domain *domain,
1924 		   unsigned long iova, size_t size)
1925 {
1926 	return __iommu_unmap(domain, iova, size, true);
1927 }
1928 EXPORT_SYMBOL_GPL(iommu_unmap);
1929 
1930 size_t iommu_unmap_fast(struct iommu_domain *domain,
1931 			unsigned long iova, size_t size)
1932 {
1933 	return __iommu_unmap(domain, iova, size, false);
1934 }
1935 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
1936 
1937 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1938 		    struct scatterlist *sg, unsigned int nents, int prot)
1939 {
1940 	size_t len = 0, mapped = 0;
1941 	phys_addr_t start;
1942 	unsigned int i = 0;
1943 	int ret;
1944 
1945 	while (i <= nents) {
1946 		phys_addr_t s_phys = sg_phys(sg);
1947 
1948 		if (len && s_phys != start + len) {
1949 			ret = iommu_map(domain, iova + mapped, start, len, prot);
1950 			if (ret)
1951 				goto out_err;
1952 
1953 			mapped += len;
1954 			len = 0;
1955 		}
1956 
1957 		if (len) {
1958 			len += sg->length;
1959 		} else {
1960 			len = sg->length;
1961 			start = s_phys;
1962 		}
1963 
1964 		if (++i < nents)
1965 			sg = sg_next(sg);
1966 	}
1967 
1968 	return mapped;
1969 
1970 out_err:
1971 	/* undo mappings already done */
1972 	iommu_unmap(domain, iova, mapped);
1973 
1974 	return 0;
1975 
1976 }
1977 EXPORT_SYMBOL_GPL(iommu_map_sg);
1978 
1979 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1980 			       phys_addr_t paddr, u64 size, int prot)
1981 {
1982 	if (unlikely(domain->ops->domain_window_enable == NULL))
1983 		return -ENODEV;
1984 
1985 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1986 						 prot);
1987 }
1988 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1989 
1990 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1991 {
1992 	if (unlikely(domain->ops->domain_window_disable == NULL))
1993 		return;
1994 
1995 	return domain->ops->domain_window_disable(domain, wnd_nr);
1996 }
1997 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1998 
1999 /**
2000  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2001  * @domain: the iommu domain where the fault has happened
2002  * @dev: the device where the fault has happened
2003  * @iova: the faulting address
2004  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2005  *
2006  * This function should be called by the low-level IOMMU implementations
2007  * whenever IOMMU faults happen, to allow high-level users, that are
2008  * interested in such events, to know about them.
2009  *
2010  * This event may be useful for several possible use cases:
2011  * - mere logging of the event
2012  * - dynamic TLB/PTE loading
2013  * - if restarting of the faulting device is required
2014  *
2015  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2016  * PTE/TLB loading will one day be supported, implementations will be able
2017  * to tell whether it succeeded or not according to this return value).
2018  *
2019  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2020  * (though fault handlers can also return -ENOSYS, in case they want to
2021  * elicit the default behavior of the IOMMU drivers).
2022  */
2023 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2024 		       unsigned long iova, int flags)
2025 {
2026 	int ret = -ENOSYS;
2027 
2028 	/*
2029 	 * if upper layers showed interest and installed a fault handler,
2030 	 * invoke it.
2031 	 */
2032 	if (domain->handler)
2033 		ret = domain->handler(domain, dev, iova, flags,
2034 						domain->handler_token);
2035 
2036 	trace_io_page_fault(dev, iova, flags);
2037 	return ret;
2038 }
2039 EXPORT_SYMBOL_GPL(report_iommu_fault);
2040 
2041 static int __init iommu_init(void)
2042 {
2043 	iommu_group_kset = kset_create_and_add("iommu_groups",
2044 					       NULL, kernel_kobj);
2045 	BUG_ON(!iommu_group_kset);
2046 
2047 	iommu_debugfs_setup();
2048 
2049 	return 0;
2050 }
2051 core_initcall(iommu_init);
2052 
2053 int iommu_domain_get_attr(struct iommu_domain *domain,
2054 			  enum iommu_attr attr, void *data)
2055 {
2056 	struct iommu_domain_geometry *geometry;
2057 	bool *paging;
2058 	int ret = 0;
2059 
2060 	switch (attr) {
2061 	case DOMAIN_ATTR_GEOMETRY:
2062 		geometry  = data;
2063 		*geometry = domain->geometry;
2064 
2065 		break;
2066 	case DOMAIN_ATTR_PAGING:
2067 		paging  = data;
2068 		*paging = (domain->pgsize_bitmap != 0UL);
2069 		break;
2070 	default:
2071 		if (!domain->ops->domain_get_attr)
2072 			return -EINVAL;
2073 
2074 		ret = domain->ops->domain_get_attr(domain, attr, data);
2075 	}
2076 
2077 	return ret;
2078 }
2079 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2080 
2081 int iommu_domain_set_attr(struct iommu_domain *domain,
2082 			  enum iommu_attr attr, void *data)
2083 {
2084 	int ret = 0;
2085 
2086 	switch (attr) {
2087 	default:
2088 		if (domain->ops->domain_set_attr == NULL)
2089 			return -EINVAL;
2090 
2091 		ret = domain->ops->domain_set_attr(domain, attr, data);
2092 	}
2093 
2094 	return ret;
2095 }
2096 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2097 
2098 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2099 {
2100 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2101 
2102 	if (ops && ops->get_resv_regions)
2103 		ops->get_resv_regions(dev, list);
2104 }
2105 
2106 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2107 {
2108 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2109 
2110 	if (ops && ops->put_resv_regions)
2111 		ops->put_resv_regions(dev, list);
2112 }
2113 
2114 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2115 						  size_t length, int prot,
2116 						  enum iommu_resv_type type)
2117 {
2118 	struct iommu_resv_region *region;
2119 
2120 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2121 	if (!region)
2122 		return NULL;
2123 
2124 	INIT_LIST_HEAD(&region->list);
2125 	region->start = start;
2126 	region->length = length;
2127 	region->prot = prot;
2128 	region->type = type;
2129 	return region;
2130 }
2131 
2132 static int
2133 request_default_domain_for_dev(struct device *dev, unsigned long type)
2134 {
2135 	struct iommu_domain *domain;
2136 	struct iommu_group *group;
2137 	int ret;
2138 
2139 	/* Device must already be in a group before calling this function */
2140 	group = iommu_group_get(dev);
2141 	if (!group)
2142 		return -EINVAL;
2143 
2144 	mutex_lock(&group->mutex);
2145 
2146 	/* Check if the default domain is already direct mapped */
2147 	ret = 0;
2148 	if (group->default_domain && group->default_domain->type == type)
2149 		goto out;
2150 
2151 	/* Don't change mappings of existing devices */
2152 	ret = -EBUSY;
2153 	if (iommu_group_device_count(group) != 1)
2154 		goto out;
2155 
2156 	/* Allocate a direct mapped domain */
2157 	ret = -ENOMEM;
2158 	domain = __iommu_domain_alloc(dev->bus, type);
2159 	if (!domain)
2160 		goto out;
2161 
2162 	/* Attach the device to the domain */
2163 	ret = __iommu_attach_group(domain, group);
2164 	if (ret) {
2165 		iommu_domain_free(domain);
2166 		goto out;
2167 	}
2168 
2169 	iommu_group_create_direct_mappings(group, dev);
2170 
2171 	/* Make the direct mapped domain the default for this group */
2172 	if (group->default_domain)
2173 		iommu_domain_free(group->default_domain);
2174 	group->default_domain = domain;
2175 
2176 	dev_info(dev, "Using iommu %s mapping\n",
2177 		 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2178 
2179 	ret = 0;
2180 out:
2181 	mutex_unlock(&group->mutex);
2182 	iommu_group_put(group);
2183 
2184 	return ret;
2185 }
2186 
2187 /* Request that a device is direct mapped by the IOMMU */
2188 int iommu_request_dm_for_dev(struct device *dev)
2189 {
2190 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2191 }
2192 
2193 /* Request that a device can't be direct mapped by the IOMMU */
2194 int iommu_request_dma_domain_for_dev(struct device *dev)
2195 {
2196 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2197 }
2198 
2199 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2200 {
2201 	const struct iommu_ops *ops = NULL;
2202 	struct iommu_device *iommu;
2203 
2204 	spin_lock(&iommu_device_lock);
2205 	list_for_each_entry(iommu, &iommu_device_list, list)
2206 		if (iommu->fwnode == fwnode) {
2207 			ops = iommu->ops;
2208 			break;
2209 		}
2210 	spin_unlock(&iommu_device_lock);
2211 	return ops;
2212 }
2213 
2214 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2215 		      const struct iommu_ops *ops)
2216 {
2217 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2218 
2219 	if (fwspec)
2220 		return ops == fwspec->ops ? 0 : -EINVAL;
2221 
2222 	fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2223 	if (!fwspec)
2224 		return -ENOMEM;
2225 
2226 	of_node_get(to_of_node(iommu_fwnode));
2227 	fwspec->iommu_fwnode = iommu_fwnode;
2228 	fwspec->ops = ops;
2229 	dev_iommu_fwspec_set(dev, fwspec);
2230 	return 0;
2231 }
2232 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2233 
2234 void iommu_fwspec_free(struct device *dev)
2235 {
2236 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2237 
2238 	if (fwspec) {
2239 		fwnode_handle_put(fwspec->iommu_fwnode);
2240 		kfree(fwspec);
2241 		dev_iommu_fwspec_set(dev, NULL);
2242 	}
2243 }
2244 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2245 
2246 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2247 {
2248 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2249 	size_t size;
2250 	int i;
2251 
2252 	if (!fwspec)
2253 		return -EINVAL;
2254 
2255 	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2256 	if (size > sizeof(*fwspec)) {
2257 		fwspec = krealloc(fwspec, size, GFP_KERNEL);
2258 		if (!fwspec)
2259 			return -ENOMEM;
2260 
2261 		dev_iommu_fwspec_set(dev, fwspec);
2262 	}
2263 
2264 	for (i = 0; i < num_ids; i++)
2265 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2266 
2267 	fwspec->num_ids += num_ids;
2268 	return 0;
2269 }
2270 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2271 
2272 /*
2273  * Per device IOMMU features.
2274  */
2275 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2276 {
2277 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2278 
2279 	if (ops && ops->dev_has_feat)
2280 		return ops->dev_has_feat(dev, feat);
2281 
2282 	return false;
2283 }
2284 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2285 
2286 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2287 {
2288 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2289 
2290 	if (ops && ops->dev_enable_feat)
2291 		return ops->dev_enable_feat(dev, feat);
2292 
2293 	return -ENODEV;
2294 }
2295 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2296 
2297 /*
2298  * The device drivers should do the necessary cleanups before calling this.
2299  * For example, before disabling the aux-domain feature, the device driver
2300  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2301  */
2302 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2303 {
2304 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2305 
2306 	if (ops && ops->dev_disable_feat)
2307 		return ops->dev_disable_feat(dev, feat);
2308 
2309 	return -EBUSY;
2310 }
2311 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2312 
2313 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2314 {
2315 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2316 
2317 	if (ops && ops->dev_feat_enabled)
2318 		return ops->dev_feat_enabled(dev, feat);
2319 
2320 	return false;
2321 }
2322 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2323 
2324 /*
2325  * Aux-domain specific attach/detach.
2326  *
2327  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2328  * true. Also, as long as domains are attached to a device through this
2329  * interface, any tries to call iommu_attach_device() should fail
2330  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2331  * This should make us safe against a device being attached to a guest as a
2332  * whole while there are still pasid users on it (aux and sva).
2333  */
2334 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2335 {
2336 	int ret = -ENODEV;
2337 
2338 	if (domain->ops->aux_attach_dev)
2339 		ret = domain->ops->aux_attach_dev(domain, dev);
2340 
2341 	if (!ret)
2342 		trace_attach_device_to_domain(dev);
2343 
2344 	return ret;
2345 }
2346 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2347 
2348 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2349 {
2350 	if (domain->ops->aux_detach_dev) {
2351 		domain->ops->aux_detach_dev(domain, dev);
2352 		trace_detach_device_from_domain(dev);
2353 	}
2354 }
2355 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2356 
2357 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2358 {
2359 	int ret = -ENODEV;
2360 
2361 	if (domain->ops->aux_get_pasid)
2362 		ret = domain->ops->aux_get_pasid(domain, dev);
2363 
2364 	return ret;
2365 }
2366 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2367 
2368 /**
2369  * iommu_sva_bind_device() - Bind a process address space to a device
2370  * @dev: the device
2371  * @mm: the mm to bind, caller must hold a reference to it
2372  *
2373  * Create a bond between device and address space, allowing the device to access
2374  * the mm using the returned PASID. If a bond already exists between @device and
2375  * @mm, it is returned and an additional reference is taken. Caller must call
2376  * iommu_sva_unbind_device() to release each reference.
2377  *
2378  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2379  * initialize the required SVA features.
2380  *
2381  * On error, returns an ERR_PTR value.
2382  */
2383 struct iommu_sva *
2384 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2385 {
2386 	struct iommu_group *group;
2387 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2388 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2389 
2390 	if (!ops || !ops->sva_bind)
2391 		return ERR_PTR(-ENODEV);
2392 
2393 	group = iommu_group_get(dev);
2394 	if (!group)
2395 		return ERR_PTR(-ENODEV);
2396 
2397 	/* Ensure device count and domain don't change while we're binding */
2398 	mutex_lock(&group->mutex);
2399 
2400 	/*
2401 	 * To keep things simple, SVA currently doesn't support IOMMU groups
2402 	 * with more than one device. Existing SVA-capable systems are not
2403 	 * affected by the problems that required IOMMU groups (lack of ACS
2404 	 * isolation, device ID aliasing and other hardware issues).
2405 	 */
2406 	if (iommu_group_device_count(group) != 1)
2407 		goto out_unlock;
2408 
2409 	handle = ops->sva_bind(dev, mm, drvdata);
2410 
2411 out_unlock:
2412 	mutex_unlock(&group->mutex);
2413 	iommu_group_put(group);
2414 
2415 	return handle;
2416 }
2417 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2418 
2419 /**
2420  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2421  * @handle: the handle returned by iommu_sva_bind_device()
2422  *
2423  * Put reference to a bond between device and address space. The device should
2424  * not be issuing any more transaction for this PASID. All outstanding page
2425  * requests for this PASID must have been flushed to the IOMMU.
2426  *
2427  * Returns 0 on success, or an error value
2428  */
2429 void iommu_sva_unbind_device(struct iommu_sva *handle)
2430 {
2431 	struct iommu_group *group;
2432 	struct device *dev = handle->dev;
2433 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2434 
2435 	if (!ops || !ops->sva_unbind)
2436 		return;
2437 
2438 	group = iommu_group_get(dev);
2439 	if (!group)
2440 		return;
2441 
2442 	mutex_lock(&group->mutex);
2443 	ops->sva_unbind(handle);
2444 	mutex_unlock(&group->mutex);
2445 
2446 	iommu_group_put(group);
2447 }
2448 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2449 
2450 int iommu_sva_set_ops(struct iommu_sva *handle,
2451 		      const struct iommu_sva_ops *sva_ops)
2452 {
2453 	if (handle->ops && handle->ops != sva_ops)
2454 		return -EEXIST;
2455 
2456 	handle->ops = sva_ops;
2457 	return 0;
2458 }
2459 EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2460 
2461 int iommu_sva_get_pasid(struct iommu_sva *handle)
2462 {
2463 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2464 
2465 	if (!ops || !ops->sva_get_pasid)
2466 		return IOMMU_PASID_INVALID;
2467 
2468 	return ops->sva_get_pasid(handle);
2469 }
2470 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2471