xref: /openbmc/linux/drivers/iommu/iommu.c (revision 1d27a0be)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27 
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30 
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34 
35 struct iommu_group {
36 	struct kobject kobj;
37 	struct kobject *devices_kobj;
38 	struct list_head devices;
39 	struct mutex mutex;
40 	struct blocking_notifier_head notifier;
41 	void *iommu_data;
42 	void (*iommu_data_release)(void *iommu_data);
43 	char *name;
44 	int id;
45 	struct iommu_domain *default_domain;
46 	struct iommu_domain *domain;
47 	struct list_head entry;
48 };
49 
50 struct group_device {
51 	struct list_head list;
52 	struct device *dev;
53 	char *name;
54 };
55 
56 struct iommu_group_attribute {
57 	struct attribute attr;
58 	ssize_t (*show)(struct iommu_group *group, char *buf);
59 	ssize_t (*store)(struct iommu_group *group,
60 			 const char *buf, size_t count);
61 };
62 
63 static const char * const iommu_group_resv_type_string[] = {
64 	[IOMMU_RESV_DIRECT]			= "direct",
65 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
66 	[IOMMU_RESV_RESERVED]			= "reserved",
67 	[IOMMU_RESV_MSI]			= "msi",
68 	[IOMMU_RESV_SW_MSI]			= "msi",
69 };
70 
71 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
72 
73 static void iommu_set_cmd_line_dma_api(void)
74 {
75 	iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
76 }
77 
78 static bool iommu_cmd_line_dma_api(void)
79 {
80 	return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
81 }
82 
83 static int iommu_alloc_default_domain(struct iommu_group *group,
84 				      struct device *dev);
85 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
86 						 unsigned type);
87 static int __iommu_attach_device(struct iommu_domain *domain,
88 				 struct device *dev);
89 static int __iommu_attach_group(struct iommu_domain *domain,
90 				struct iommu_group *group);
91 static void __iommu_detach_group(struct iommu_domain *domain,
92 				 struct iommu_group *group);
93 static int iommu_create_device_direct_mappings(struct iommu_group *group,
94 					       struct device *dev);
95 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
96 
97 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
98 struct iommu_group_attribute iommu_group_attr_##_name =		\
99 	__ATTR(_name, _mode, _show, _store)
100 
101 #define to_iommu_group_attr(_attr)	\
102 	container_of(_attr, struct iommu_group_attribute, attr)
103 #define to_iommu_group(_kobj)		\
104 	container_of(_kobj, struct iommu_group, kobj)
105 
106 static LIST_HEAD(iommu_device_list);
107 static DEFINE_SPINLOCK(iommu_device_lock);
108 
109 /*
110  * Use a function instead of an array here because the domain-type is a
111  * bit-field, so an array would waste memory.
112  */
113 static const char *iommu_domain_type_str(unsigned int t)
114 {
115 	switch (t) {
116 	case IOMMU_DOMAIN_BLOCKED:
117 		return "Blocked";
118 	case IOMMU_DOMAIN_IDENTITY:
119 		return "Passthrough";
120 	case IOMMU_DOMAIN_UNMANAGED:
121 		return "Unmanaged";
122 	case IOMMU_DOMAIN_DMA:
123 		return "Translated";
124 	default:
125 		return "Unknown";
126 	}
127 }
128 
129 static int __init iommu_subsys_init(void)
130 {
131 	bool cmd_line = iommu_cmd_line_dma_api();
132 
133 	if (!cmd_line) {
134 		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
135 			iommu_set_default_passthrough(false);
136 		else
137 			iommu_set_default_translated(false);
138 
139 		if (iommu_default_passthrough() && mem_encrypt_active()) {
140 			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
141 			iommu_set_default_translated(false);
142 		}
143 	}
144 
145 	pr_info("Default domain type: %s %s\n",
146 		iommu_domain_type_str(iommu_def_domain_type),
147 		cmd_line ? "(set via kernel command line)" : "");
148 
149 	return 0;
150 }
151 subsys_initcall(iommu_subsys_init);
152 
153 int iommu_device_register(struct iommu_device *iommu)
154 {
155 	spin_lock(&iommu_device_lock);
156 	list_add_tail(&iommu->list, &iommu_device_list);
157 	spin_unlock(&iommu_device_lock);
158 	return 0;
159 }
160 EXPORT_SYMBOL_GPL(iommu_device_register);
161 
162 void iommu_device_unregister(struct iommu_device *iommu)
163 {
164 	spin_lock(&iommu_device_lock);
165 	list_del(&iommu->list);
166 	spin_unlock(&iommu_device_lock);
167 }
168 EXPORT_SYMBOL_GPL(iommu_device_unregister);
169 
170 static struct dev_iommu *dev_iommu_get(struct device *dev)
171 {
172 	struct dev_iommu *param = dev->iommu;
173 
174 	if (param)
175 		return param;
176 
177 	param = kzalloc(sizeof(*param), GFP_KERNEL);
178 	if (!param)
179 		return NULL;
180 
181 	mutex_init(&param->lock);
182 	dev->iommu = param;
183 	return param;
184 }
185 
186 static void dev_iommu_free(struct device *dev)
187 {
188 	iommu_fwspec_free(dev);
189 	kfree(dev->iommu);
190 	dev->iommu = NULL;
191 }
192 
193 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
194 {
195 	const struct iommu_ops *ops = dev->bus->iommu_ops;
196 	struct iommu_device *iommu_dev;
197 	struct iommu_group *group;
198 	int ret;
199 
200 	if (!ops)
201 		return -ENODEV;
202 
203 	if (!dev_iommu_get(dev))
204 		return -ENOMEM;
205 
206 	if (!try_module_get(ops->owner)) {
207 		ret = -EINVAL;
208 		goto err_free;
209 	}
210 
211 	iommu_dev = ops->probe_device(dev);
212 	if (IS_ERR(iommu_dev)) {
213 		ret = PTR_ERR(iommu_dev);
214 		goto out_module_put;
215 	}
216 
217 	dev->iommu->iommu_dev = iommu_dev;
218 
219 	group = iommu_group_get_for_dev(dev);
220 	if (IS_ERR(group)) {
221 		ret = PTR_ERR(group);
222 		goto out_release;
223 	}
224 	iommu_group_put(group);
225 
226 	if (group_list && !group->default_domain && list_empty(&group->entry))
227 		list_add_tail(&group->entry, group_list);
228 
229 	iommu_device_link(iommu_dev, dev);
230 
231 	return 0;
232 
233 out_release:
234 	ops->release_device(dev);
235 
236 out_module_put:
237 	module_put(ops->owner);
238 
239 err_free:
240 	dev_iommu_free(dev);
241 
242 	return ret;
243 }
244 
245 int iommu_probe_device(struct device *dev)
246 {
247 	const struct iommu_ops *ops = dev->bus->iommu_ops;
248 	struct iommu_group *group;
249 	int ret;
250 
251 	ret = __iommu_probe_device(dev, NULL);
252 	if (ret)
253 		goto err_out;
254 
255 	group = iommu_group_get(dev);
256 	if (!group)
257 		goto err_release;
258 
259 	/*
260 	 * Try to allocate a default domain - needs support from the
261 	 * IOMMU driver. There are still some drivers which don't
262 	 * support default domains, so the return value is not yet
263 	 * checked.
264 	 */
265 	iommu_alloc_default_domain(group, dev);
266 
267 	if (group->default_domain)
268 		ret = __iommu_attach_device(group->default_domain, dev);
269 
270 	iommu_create_device_direct_mappings(group, dev);
271 
272 	iommu_group_put(group);
273 
274 	if (ret)
275 		goto err_release;
276 
277 	if (ops->probe_finalize)
278 		ops->probe_finalize(dev);
279 
280 	return 0;
281 
282 err_release:
283 	iommu_release_device(dev);
284 
285 err_out:
286 	return ret;
287 
288 }
289 
290 void iommu_release_device(struct device *dev)
291 {
292 	const struct iommu_ops *ops = dev->bus->iommu_ops;
293 
294 	if (!dev->iommu)
295 		return;
296 
297 	iommu_device_unlink(dev->iommu->iommu_dev, dev);
298 	iommu_group_remove_device(dev);
299 
300 	ops->release_device(dev);
301 
302 	module_put(ops->owner);
303 	dev_iommu_free(dev);
304 }
305 
306 static int __init iommu_set_def_domain_type(char *str)
307 {
308 	bool pt;
309 	int ret;
310 
311 	ret = kstrtobool(str, &pt);
312 	if (ret)
313 		return ret;
314 
315 	if (pt)
316 		iommu_set_default_passthrough(true);
317 	else
318 		iommu_set_default_translated(true);
319 
320 	return 0;
321 }
322 early_param("iommu.passthrough", iommu_set_def_domain_type);
323 
324 static int __init iommu_dma_setup(char *str)
325 {
326 	return kstrtobool(str, &iommu_dma_strict);
327 }
328 early_param("iommu.strict", iommu_dma_setup);
329 
330 static ssize_t iommu_group_attr_show(struct kobject *kobj,
331 				     struct attribute *__attr, char *buf)
332 {
333 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
334 	struct iommu_group *group = to_iommu_group(kobj);
335 	ssize_t ret = -EIO;
336 
337 	if (attr->show)
338 		ret = attr->show(group, buf);
339 	return ret;
340 }
341 
342 static ssize_t iommu_group_attr_store(struct kobject *kobj,
343 				      struct attribute *__attr,
344 				      const char *buf, size_t count)
345 {
346 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
347 	struct iommu_group *group = to_iommu_group(kobj);
348 	ssize_t ret = -EIO;
349 
350 	if (attr->store)
351 		ret = attr->store(group, buf, count);
352 	return ret;
353 }
354 
355 static const struct sysfs_ops iommu_group_sysfs_ops = {
356 	.show = iommu_group_attr_show,
357 	.store = iommu_group_attr_store,
358 };
359 
360 static int iommu_group_create_file(struct iommu_group *group,
361 				   struct iommu_group_attribute *attr)
362 {
363 	return sysfs_create_file(&group->kobj, &attr->attr);
364 }
365 
366 static void iommu_group_remove_file(struct iommu_group *group,
367 				    struct iommu_group_attribute *attr)
368 {
369 	sysfs_remove_file(&group->kobj, &attr->attr);
370 }
371 
372 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
373 {
374 	return sprintf(buf, "%s\n", group->name);
375 }
376 
377 /**
378  * iommu_insert_resv_region - Insert a new region in the
379  * list of reserved regions.
380  * @new: new region to insert
381  * @regions: list of regions
382  *
383  * Elements are sorted by start address and overlapping segments
384  * of the same type are merged.
385  */
386 int iommu_insert_resv_region(struct iommu_resv_region *new,
387 			     struct list_head *regions)
388 {
389 	struct iommu_resv_region *iter, *tmp, *nr, *top;
390 	LIST_HEAD(stack);
391 
392 	nr = iommu_alloc_resv_region(new->start, new->length,
393 				     new->prot, new->type);
394 	if (!nr)
395 		return -ENOMEM;
396 
397 	/* First add the new element based on start address sorting */
398 	list_for_each_entry(iter, regions, list) {
399 		if (nr->start < iter->start ||
400 		    (nr->start == iter->start && nr->type <= iter->type))
401 			break;
402 	}
403 	list_add_tail(&nr->list, &iter->list);
404 
405 	/* Merge overlapping segments of type nr->type in @regions, if any */
406 	list_for_each_entry_safe(iter, tmp, regions, list) {
407 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
408 
409 		/* no merge needed on elements of different types than @new */
410 		if (iter->type != new->type) {
411 			list_move_tail(&iter->list, &stack);
412 			continue;
413 		}
414 
415 		/* look for the last stack element of same type as @iter */
416 		list_for_each_entry_reverse(top, &stack, list)
417 			if (top->type == iter->type)
418 				goto check_overlap;
419 
420 		list_move_tail(&iter->list, &stack);
421 		continue;
422 
423 check_overlap:
424 		top_end = top->start + top->length - 1;
425 
426 		if (iter->start > top_end + 1) {
427 			list_move_tail(&iter->list, &stack);
428 		} else {
429 			top->length = max(top_end, iter_end) - top->start + 1;
430 			list_del(&iter->list);
431 			kfree(iter);
432 		}
433 	}
434 	list_splice(&stack, regions);
435 	return 0;
436 }
437 
438 static int
439 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
440 				 struct list_head *group_resv_regions)
441 {
442 	struct iommu_resv_region *entry;
443 	int ret = 0;
444 
445 	list_for_each_entry(entry, dev_resv_regions, list) {
446 		ret = iommu_insert_resv_region(entry, group_resv_regions);
447 		if (ret)
448 			break;
449 	}
450 	return ret;
451 }
452 
453 int iommu_get_group_resv_regions(struct iommu_group *group,
454 				 struct list_head *head)
455 {
456 	struct group_device *device;
457 	int ret = 0;
458 
459 	mutex_lock(&group->mutex);
460 	list_for_each_entry(device, &group->devices, list) {
461 		struct list_head dev_resv_regions;
462 
463 		INIT_LIST_HEAD(&dev_resv_regions);
464 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
465 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
466 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
467 		if (ret)
468 			break;
469 	}
470 	mutex_unlock(&group->mutex);
471 	return ret;
472 }
473 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
474 
475 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
476 					     char *buf)
477 {
478 	struct iommu_resv_region *region, *next;
479 	struct list_head group_resv_regions;
480 	char *str = buf;
481 
482 	INIT_LIST_HEAD(&group_resv_regions);
483 	iommu_get_group_resv_regions(group, &group_resv_regions);
484 
485 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
486 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
487 			       (long long int)region->start,
488 			       (long long int)(region->start +
489 						region->length - 1),
490 			       iommu_group_resv_type_string[region->type]);
491 		kfree(region);
492 	}
493 
494 	return (str - buf);
495 }
496 
497 static ssize_t iommu_group_show_type(struct iommu_group *group,
498 				     char *buf)
499 {
500 	char *type = "unknown\n";
501 
502 	if (group->default_domain) {
503 		switch (group->default_domain->type) {
504 		case IOMMU_DOMAIN_BLOCKED:
505 			type = "blocked\n";
506 			break;
507 		case IOMMU_DOMAIN_IDENTITY:
508 			type = "identity\n";
509 			break;
510 		case IOMMU_DOMAIN_UNMANAGED:
511 			type = "unmanaged\n";
512 			break;
513 		case IOMMU_DOMAIN_DMA:
514 			type = "DMA\n";
515 			break;
516 		}
517 	}
518 	strcpy(buf, type);
519 
520 	return strlen(type);
521 }
522 
523 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
524 
525 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
526 			iommu_group_show_resv_regions, NULL);
527 
528 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
529 
530 static void iommu_group_release(struct kobject *kobj)
531 {
532 	struct iommu_group *group = to_iommu_group(kobj);
533 
534 	pr_debug("Releasing group %d\n", group->id);
535 
536 	if (group->iommu_data_release)
537 		group->iommu_data_release(group->iommu_data);
538 
539 	ida_simple_remove(&iommu_group_ida, group->id);
540 
541 	if (group->default_domain)
542 		iommu_domain_free(group->default_domain);
543 
544 	kfree(group->name);
545 	kfree(group);
546 }
547 
548 static struct kobj_type iommu_group_ktype = {
549 	.sysfs_ops = &iommu_group_sysfs_ops,
550 	.release = iommu_group_release,
551 };
552 
553 /**
554  * iommu_group_alloc - Allocate a new group
555  *
556  * This function is called by an iommu driver to allocate a new iommu
557  * group.  The iommu group represents the minimum granularity of the iommu.
558  * Upon successful return, the caller holds a reference to the supplied
559  * group in order to hold the group until devices are added.  Use
560  * iommu_group_put() to release this extra reference count, allowing the
561  * group to be automatically reclaimed once it has no devices or external
562  * references.
563  */
564 struct iommu_group *iommu_group_alloc(void)
565 {
566 	struct iommu_group *group;
567 	int ret;
568 
569 	group = kzalloc(sizeof(*group), GFP_KERNEL);
570 	if (!group)
571 		return ERR_PTR(-ENOMEM);
572 
573 	group->kobj.kset = iommu_group_kset;
574 	mutex_init(&group->mutex);
575 	INIT_LIST_HEAD(&group->devices);
576 	INIT_LIST_HEAD(&group->entry);
577 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
578 
579 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
580 	if (ret < 0) {
581 		kfree(group);
582 		return ERR_PTR(ret);
583 	}
584 	group->id = ret;
585 
586 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
587 				   NULL, "%d", group->id);
588 	if (ret) {
589 		ida_simple_remove(&iommu_group_ida, group->id);
590 		kobject_put(&group->kobj);
591 		return ERR_PTR(ret);
592 	}
593 
594 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
595 	if (!group->devices_kobj) {
596 		kobject_put(&group->kobj); /* triggers .release & free */
597 		return ERR_PTR(-ENOMEM);
598 	}
599 
600 	/*
601 	 * The devices_kobj holds a reference on the group kobject, so
602 	 * as long as that exists so will the group.  We can therefore
603 	 * use the devices_kobj for reference counting.
604 	 */
605 	kobject_put(&group->kobj);
606 
607 	ret = iommu_group_create_file(group,
608 				      &iommu_group_attr_reserved_regions);
609 	if (ret)
610 		return ERR_PTR(ret);
611 
612 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
613 	if (ret)
614 		return ERR_PTR(ret);
615 
616 	pr_debug("Allocated group %d\n", group->id);
617 
618 	return group;
619 }
620 EXPORT_SYMBOL_GPL(iommu_group_alloc);
621 
622 struct iommu_group *iommu_group_get_by_id(int id)
623 {
624 	struct kobject *group_kobj;
625 	struct iommu_group *group;
626 	const char *name;
627 
628 	if (!iommu_group_kset)
629 		return NULL;
630 
631 	name = kasprintf(GFP_KERNEL, "%d", id);
632 	if (!name)
633 		return NULL;
634 
635 	group_kobj = kset_find_obj(iommu_group_kset, name);
636 	kfree(name);
637 
638 	if (!group_kobj)
639 		return NULL;
640 
641 	group = container_of(group_kobj, struct iommu_group, kobj);
642 	BUG_ON(group->id != id);
643 
644 	kobject_get(group->devices_kobj);
645 	kobject_put(&group->kobj);
646 
647 	return group;
648 }
649 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
650 
651 /**
652  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
653  * @group: the group
654  *
655  * iommu drivers can store data in the group for use when doing iommu
656  * operations.  This function provides a way to retrieve it.  Caller
657  * should hold a group reference.
658  */
659 void *iommu_group_get_iommudata(struct iommu_group *group)
660 {
661 	return group->iommu_data;
662 }
663 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
664 
665 /**
666  * iommu_group_set_iommudata - set iommu_data for a group
667  * @group: the group
668  * @iommu_data: new data
669  * @release: release function for iommu_data
670  *
671  * iommu drivers can store data in the group for use when doing iommu
672  * operations.  This function provides a way to set the data after
673  * the group has been allocated.  Caller should hold a group reference.
674  */
675 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
676 			       void (*release)(void *iommu_data))
677 {
678 	group->iommu_data = iommu_data;
679 	group->iommu_data_release = release;
680 }
681 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
682 
683 /**
684  * iommu_group_set_name - set name for a group
685  * @group: the group
686  * @name: name
687  *
688  * Allow iommu driver to set a name for a group.  When set it will
689  * appear in a name attribute file under the group in sysfs.
690  */
691 int iommu_group_set_name(struct iommu_group *group, const char *name)
692 {
693 	int ret;
694 
695 	if (group->name) {
696 		iommu_group_remove_file(group, &iommu_group_attr_name);
697 		kfree(group->name);
698 		group->name = NULL;
699 		if (!name)
700 			return 0;
701 	}
702 
703 	group->name = kstrdup(name, GFP_KERNEL);
704 	if (!group->name)
705 		return -ENOMEM;
706 
707 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
708 	if (ret) {
709 		kfree(group->name);
710 		group->name = NULL;
711 		return ret;
712 	}
713 
714 	return 0;
715 }
716 EXPORT_SYMBOL_GPL(iommu_group_set_name);
717 
718 static int iommu_create_device_direct_mappings(struct iommu_group *group,
719 					       struct device *dev)
720 {
721 	struct iommu_domain *domain = group->default_domain;
722 	struct iommu_resv_region *entry;
723 	struct list_head mappings;
724 	unsigned long pg_size;
725 	int ret = 0;
726 
727 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
728 		return 0;
729 
730 	BUG_ON(!domain->pgsize_bitmap);
731 
732 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
733 	INIT_LIST_HEAD(&mappings);
734 
735 	iommu_get_resv_regions(dev, &mappings);
736 
737 	/* We need to consider overlapping regions for different devices */
738 	list_for_each_entry(entry, &mappings, list) {
739 		dma_addr_t start, end, addr;
740 
741 		if (domain->ops->apply_resv_region)
742 			domain->ops->apply_resv_region(dev, domain, entry);
743 
744 		start = ALIGN(entry->start, pg_size);
745 		end   = ALIGN(entry->start + entry->length, pg_size);
746 
747 		if (entry->type != IOMMU_RESV_DIRECT &&
748 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
749 			continue;
750 
751 		for (addr = start; addr < end; addr += pg_size) {
752 			phys_addr_t phys_addr;
753 
754 			phys_addr = iommu_iova_to_phys(domain, addr);
755 			if (phys_addr)
756 				continue;
757 
758 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
759 			if (ret)
760 				goto out;
761 		}
762 
763 	}
764 
765 	iommu_flush_tlb_all(domain);
766 
767 out:
768 	iommu_put_resv_regions(dev, &mappings);
769 
770 	return ret;
771 }
772 
773 static bool iommu_is_attach_deferred(struct iommu_domain *domain,
774 				     struct device *dev)
775 {
776 	if (domain->ops->is_attach_deferred)
777 		return domain->ops->is_attach_deferred(domain, dev);
778 
779 	return false;
780 }
781 
782 /**
783  * iommu_group_add_device - add a device to an iommu group
784  * @group: the group into which to add the device (reference should be held)
785  * @dev: the device
786  *
787  * This function is called by an iommu driver to add a device into a
788  * group.  Adding a device increments the group reference count.
789  */
790 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
791 {
792 	int ret, i = 0;
793 	struct group_device *device;
794 
795 	device = kzalloc(sizeof(*device), GFP_KERNEL);
796 	if (!device)
797 		return -ENOMEM;
798 
799 	device->dev = dev;
800 
801 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
802 	if (ret)
803 		goto err_free_device;
804 
805 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
806 rename:
807 	if (!device->name) {
808 		ret = -ENOMEM;
809 		goto err_remove_link;
810 	}
811 
812 	ret = sysfs_create_link_nowarn(group->devices_kobj,
813 				       &dev->kobj, device->name);
814 	if (ret) {
815 		if (ret == -EEXIST && i >= 0) {
816 			/*
817 			 * Account for the slim chance of collision
818 			 * and append an instance to the name.
819 			 */
820 			kfree(device->name);
821 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
822 						 kobject_name(&dev->kobj), i++);
823 			goto rename;
824 		}
825 		goto err_free_name;
826 	}
827 
828 	kobject_get(group->devices_kobj);
829 
830 	dev->iommu_group = group;
831 
832 	mutex_lock(&group->mutex);
833 	list_add_tail(&device->list, &group->devices);
834 	if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
835 		ret = __iommu_attach_device(group->domain, dev);
836 	mutex_unlock(&group->mutex);
837 	if (ret)
838 		goto err_put_group;
839 
840 	/* Notify any listeners about change to group. */
841 	blocking_notifier_call_chain(&group->notifier,
842 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
843 
844 	trace_add_device_to_group(group->id, dev);
845 
846 	dev_info(dev, "Adding to iommu group %d\n", group->id);
847 
848 	return 0;
849 
850 err_put_group:
851 	mutex_lock(&group->mutex);
852 	list_del(&device->list);
853 	mutex_unlock(&group->mutex);
854 	dev->iommu_group = NULL;
855 	kobject_put(group->devices_kobj);
856 	sysfs_remove_link(group->devices_kobj, device->name);
857 err_free_name:
858 	kfree(device->name);
859 err_remove_link:
860 	sysfs_remove_link(&dev->kobj, "iommu_group");
861 err_free_device:
862 	kfree(device);
863 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
864 	return ret;
865 }
866 EXPORT_SYMBOL_GPL(iommu_group_add_device);
867 
868 /**
869  * iommu_group_remove_device - remove a device from it's current group
870  * @dev: device to be removed
871  *
872  * This function is called by an iommu driver to remove the device from
873  * it's current group.  This decrements the iommu group reference count.
874  */
875 void iommu_group_remove_device(struct device *dev)
876 {
877 	struct iommu_group *group = dev->iommu_group;
878 	struct group_device *tmp_device, *device = NULL;
879 
880 	dev_info(dev, "Removing from iommu group %d\n", group->id);
881 
882 	/* Pre-notify listeners that a device is being removed. */
883 	blocking_notifier_call_chain(&group->notifier,
884 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
885 
886 	mutex_lock(&group->mutex);
887 	list_for_each_entry(tmp_device, &group->devices, list) {
888 		if (tmp_device->dev == dev) {
889 			device = tmp_device;
890 			list_del(&device->list);
891 			break;
892 		}
893 	}
894 	mutex_unlock(&group->mutex);
895 
896 	if (!device)
897 		return;
898 
899 	sysfs_remove_link(group->devices_kobj, device->name);
900 	sysfs_remove_link(&dev->kobj, "iommu_group");
901 
902 	trace_remove_device_from_group(group->id, dev);
903 
904 	kfree(device->name);
905 	kfree(device);
906 	dev->iommu_group = NULL;
907 	kobject_put(group->devices_kobj);
908 }
909 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
910 
911 static int iommu_group_device_count(struct iommu_group *group)
912 {
913 	struct group_device *entry;
914 	int ret = 0;
915 
916 	list_for_each_entry(entry, &group->devices, list)
917 		ret++;
918 
919 	return ret;
920 }
921 
922 /**
923  * iommu_group_for_each_dev - iterate over each device in the group
924  * @group: the group
925  * @data: caller opaque data to be passed to callback function
926  * @fn: caller supplied callback function
927  *
928  * This function is called by group users to iterate over group devices.
929  * Callers should hold a reference count to the group during callback.
930  * The group->mutex is held across callbacks, which will block calls to
931  * iommu_group_add/remove_device.
932  */
933 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
934 				      int (*fn)(struct device *, void *))
935 {
936 	struct group_device *device;
937 	int ret = 0;
938 
939 	list_for_each_entry(device, &group->devices, list) {
940 		ret = fn(device->dev, data);
941 		if (ret)
942 			break;
943 	}
944 	return ret;
945 }
946 
947 
948 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
949 			     int (*fn)(struct device *, void *))
950 {
951 	int ret;
952 
953 	mutex_lock(&group->mutex);
954 	ret = __iommu_group_for_each_dev(group, data, fn);
955 	mutex_unlock(&group->mutex);
956 
957 	return ret;
958 }
959 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
960 
961 /**
962  * iommu_group_get - Return the group for a device and increment reference
963  * @dev: get the group that this device belongs to
964  *
965  * This function is called by iommu drivers and users to get the group
966  * for the specified device.  If found, the group is returned and the group
967  * reference in incremented, else NULL.
968  */
969 struct iommu_group *iommu_group_get(struct device *dev)
970 {
971 	struct iommu_group *group = dev->iommu_group;
972 
973 	if (group)
974 		kobject_get(group->devices_kobj);
975 
976 	return group;
977 }
978 EXPORT_SYMBOL_GPL(iommu_group_get);
979 
980 /**
981  * iommu_group_ref_get - Increment reference on a group
982  * @group: the group to use, must not be NULL
983  *
984  * This function is called by iommu drivers to take additional references on an
985  * existing group.  Returns the given group for convenience.
986  */
987 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
988 {
989 	kobject_get(group->devices_kobj);
990 	return group;
991 }
992 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
993 
994 /**
995  * iommu_group_put - Decrement group reference
996  * @group: the group to use
997  *
998  * This function is called by iommu drivers and users to release the
999  * iommu group.  Once the reference count is zero, the group is released.
1000  */
1001 void iommu_group_put(struct iommu_group *group)
1002 {
1003 	if (group)
1004 		kobject_put(group->devices_kobj);
1005 }
1006 EXPORT_SYMBOL_GPL(iommu_group_put);
1007 
1008 /**
1009  * iommu_group_register_notifier - Register a notifier for group changes
1010  * @group: the group to watch
1011  * @nb: notifier block to signal
1012  *
1013  * This function allows iommu group users to track changes in a group.
1014  * See include/linux/iommu.h for actions sent via this notifier.  Caller
1015  * should hold a reference to the group throughout notifier registration.
1016  */
1017 int iommu_group_register_notifier(struct iommu_group *group,
1018 				  struct notifier_block *nb)
1019 {
1020 	return blocking_notifier_chain_register(&group->notifier, nb);
1021 }
1022 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1023 
1024 /**
1025  * iommu_group_unregister_notifier - Unregister a notifier
1026  * @group: the group to watch
1027  * @nb: notifier block to signal
1028  *
1029  * Unregister a previously registered group notifier block.
1030  */
1031 int iommu_group_unregister_notifier(struct iommu_group *group,
1032 				    struct notifier_block *nb)
1033 {
1034 	return blocking_notifier_chain_unregister(&group->notifier, nb);
1035 }
1036 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1037 
1038 /**
1039  * iommu_register_device_fault_handler() - Register a device fault handler
1040  * @dev: the device
1041  * @handler: the fault handler
1042  * @data: private data passed as argument to the handler
1043  *
1044  * When an IOMMU fault event is received, this handler gets called with the
1045  * fault event and data as argument. The handler should return 0 on success. If
1046  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1047  * complete the fault by calling iommu_page_response() with one of the following
1048  * response code:
1049  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1050  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1051  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1052  *   page faults if possible.
1053  *
1054  * Return 0 if the fault handler was installed successfully, or an error.
1055  */
1056 int iommu_register_device_fault_handler(struct device *dev,
1057 					iommu_dev_fault_handler_t handler,
1058 					void *data)
1059 {
1060 	struct dev_iommu *param = dev->iommu;
1061 	int ret = 0;
1062 
1063 	if (!param)
1064 		return -EINVAL;
1065 
1066 	mutex_lock(&param->lock);
1067 	/* Only allow one fault handler registered for each device */
1068 	if (param->fault_param) {
1069 		ret = -EBUSY;
1070 		goto done_unlock;
1071 	}
1072 
1073 	get_device(dev);
1074 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1075 	if (!param->fault_param) {
1076 		put_device(dev);
1077 		ret = -ENOMEM;
1078 		goto done_unlock;
1079 	}
1080 	param->fault_param->handler = handler;
1081 	param->fault_param->data = data;
1082 	mutex_init(&param->fault_param->lock);
1083 	INIT_LIST_HEAD(&param->fault_param->faults);
1084 
1085 done_unlock:
1086 	mutex_unlock(&param->lock);
1087 
1088 	return ret;
1089 }
1090 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1091 
1092 /**
1093  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1094  * @dev: the device
1095  *
1096  * Remove the device fault handler installed with
1097  * iommu_register_device_fault_handler().
1098  *
1099  * Return 0 on success, or an error.
1100  */
1101 int iommu_unregister_device_fault_handler(struct device *dev)
1102 {
1103 	struct dev_iommu *param = dev->iommu;
1104 	int ret = 0;
1105 
1106 	if (!param)
1107 		return -EINVAL;
1108 
1109 	mutex_lock(&param->lock);
1110 
1111 	if (!param->fault_param)
1112 		goto unlock;
1113 
1114 	/* we cannot unregister handler if there are pending faults */
1115 	if (!list_empty(&param->fault_param->faults)) {
1116 		ret = -EBUSY;
1117 		goto unlock;
1118 	}
1119 
1120 	kfree(param->fault_param);
1121 	param->fault_param = NULL;
1122 	put_device(dev);
1123 unlock:
1124 	mutex_unlock(&param->lock);
1125 
1126 	return ret;
1127 }
1128 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1129 
1130 /**
1131  * iommu_report_device_fault() - Report fault event to device driver
1132  * @dev: the device
1133  * @evt: fault event data
1134  *
1135  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1136  * handler. When this function fails and the fault is recoverable, it is the
1137  * caller's responsibility to complete the fault.
1138  *
1139  * Return 0 on success, or an error.
1140  */
1141 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1142 {
1143 	struct dev_iommu *param = dev->iommu;
1144 	struct iommu_fault_event *evt_pending = NULL;
1145 	struct iommu_fault_param *fparam;
1146 	int ret = 0;
1147 
1148 	if (!param || !evt)
1149 		return -EINVAL;
1150 
1151 	/* we only report device fault if there is a handler registered */
1152 	mutex_lock(&param->lock);
1153 	fparam = param->fault_param;
1154 	if (!fparam || !fparam->handler) {
1155 		ret = -EINVAL;
1156 		goto done_unlock;
1157 	}
1158 
1159 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1160 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1161 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1162 				      GFP_KERNEL);
1163 		if (!evt_pending) {
1164 			ret = -ENOMEM;
1165 			goto done_unlock;
1166 		}
1167 		mutex_lock(&fparam->lock);
1168 		list_add_tail(&evt_pending->list, &fparam->faults);
1169 		mutex_unlock(&fparam->lock);
1170 	}
1171 
1172 	ret = fparam->handler(&evt->fault, fparam->data);
1173 	if (ret && evt_pending) {
1174 		mutex_lock(&fparam->lock);
1175 		list_del(&evt_pending->list);
1176 		mutex_unlock(&fparam->lock);
1177 		kfree(evt_pending);
1178 	}
1179 done_unlock:
1180 	mutex_unlock(&param->lock);
1181 	return ret;
1182 }
1183 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1184 
1185 int iommu_page_response(struct device *dev,
1186 			struct iommu_page_response *msg)
1187 {
1188 	bool pasid_valid;
1189 	int ret = -EINVAL;
1190 	struct iommu_fault_event *evt;
1191 	struct iommu_fault_page_request *prm;
1192 	struct dev_iommu *param = dev->iommu;
1193 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1194 
1195 	if (!domain || !domain->ops->page_response)
1196 		return -ENODEV;
1197 
1198 	if (!param || !param->fault_param)
1199 		return -EINVAL;
1200 
1201 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1202 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1203 		return -EINVAL;
1204 
1205 	/* Only send response if there is a fault report pending */
1206 	mutex_lock(&param->fault_param->lock);
1207 	if (list_empty(&param->fault_param->faults)) {
1208 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1209 		goto done_unlock;
1210 	}
1211 	/*
1212 	 * Check if we have a matching page request pending to respond,
1213 	 * otherwise return -EINVAL
1214 	 */
1215 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1216 		prm = &evt->fault.prm;
1217 		pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1218 
1219 		if ((pasid_valid && prm->pasid != msg->pasid) ||
1220 		    prm->grpid != msg->grpid)
1221 			continue;
1222 
1223 		/* Sanitize the reply */
1224 		msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1225 
1226 		ret = domain->ops->page_response(dev, evt, msg);
1227 		list_del(&evt->list);
1228 		kfree(evt);
1229 		break;
1230 	}
1231 
1232 done_unlock:
1233 	mutex_unlock(&param->fault_param->lock);
1234 	return ret;
1235 }
1236 EXPORT_SYMBOL_GPL(iommu_page_response);
1237 
1238 /**
1239  * iommu_group_id - Return ID for a group
1240  * @group: the group to ID
1241  *
1242  * Return the unique ID for the group matching the sysfs group number.
1243  */
1244 int iommu_group_id(struct iommu_group *group)
1245 {
1246 	return group->id;
1247 }
1248 EXPORT_SYMBOL_GPL(iommu_group_id);
1249 
1250 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1251 					       unsigned long *devfns);
1252 
1253 /*
1254  * To consider a PCI device isolated, we require ACS to support Source
1255  * Validation, Request Redirection, Completer Redirection, and Upstream
1256  * Forwarding.  This effectively means that devices cannot spoof their
1257  * requester ID, requests and completions cannot be redirected, and all
1258  * transactions are forwarded upstream, even as it passes through a
1259  * bridge where the target device is downstream.
1260  */
1261 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1262 
1263 /*
1264  * For multifunction devices which are not isolated from each other, find
1265  * all the other non-isolated functions and look for existing groups.  For
1266  * each function, we also need to look for aliases to or from other devices
1267  * that may already have a group.
1268  */
1269 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1270 							unsigned long *devfns)
1271 {
1272 	struct pci_dev *tmp = NULL;
1273 	struct iommu_group *group;
1274 
1275 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1276 		return NULL;
1277 
1278 	for_each_pci_dev(tmp) {
1279 		if (tmp == pdev || tmp->bus != pdev->bus ||
1280 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1281 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1282 			continue;
1283 
1284 		group = get_pci_alias_group(tmp, devfns);
1285 		if (group) {
1286 			pci_dev_put(tmp);
1287 			return group;
1288 		}
1289 	}
1290 
1291 	return NULL;
1292 }
1293 
1294 /*
1295  * Look for aliases to or from the given device for existing groups. DMA
1296  * aliases are only supported on the same bus, therefore the search
1297  * space is quite small (especially since we're really only looking at pcie
1298  * device, and therefore only expect multiple slots on the root complex or
1299  * downstream switch ports).  It's conceivable though that a pair of
1300  * multifunction devices could have aliases between them that would cause a
1301  * loop.  To prevent this, we use a bitmap to track where we've been.
1302  */
1303 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1304 					       unsigned long *devfns)
1305 {
1306 	struct pci_dev *tmp = NULL;
1307 	struct iommu_group *group;
1308 
1309 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1310 		return NULL;
1311 
1312 	group = iommu_group_get(&pdev->dev);
1313 	if (group)
1314 		return group;
1315 
1316 	for_each_pci_dev(tmp) {
1317 		if (tmp == pdev || tmp->bus != pdev->bus)
1318 			continue;
1319 
1320 		/* We alias them or they alias us */
1321 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1322 			group = get_pci_alias_group(tmp, devfns);
1323 			if (group) {
1324 				pci_dev_put(tmp);
1325 				return group;
1326 			}
1327 
1328 			group = get_pci_function_alias_group(tmp, devfns);
1329 			if (group) {
1330 				pci_dev_put(tmp);
1331 				return group;
1332 			}
1333 		}
1334 	}
1335 
1336 	return NULL;
1337 }
1338 
1339 struct group_for_pci_data {
1340 	struct pci_dev *pdev;
1341 	struct iommu_group *group;
1342 };
1343 
1344 /*
1345  * DMA alias iterator callback, return the last seen device.  Stop and return
1346  * the IOMMU group if we find one along the way.
1347  */
1348 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1349 {
1350 	struct group_for_pci_data *data = opaque;
1351 
1352 	data->pdev = pdev;
1353 	data->group = iommu_group_get(&pdev->dev);
1354 
1355 	return data->group != NULL;
1356 }
1357 
1358 /*
1359  * Generic device_group call-back function. It just allocates one
1360  * iommu-group per device.
1361  */
1362 struct iommu_group *generic_device_group(struct device *dev)
1363 {
1364 	return iommu_group_alloc();
1365 }
1366 EXPORT_SYMBOL_GPL(generic_device_group);
1367 
1368 /*
1369  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1370  * to find or create an IOMMU group for a device.
1371  */
1372 struct iommu_group *pci_device_group(struct device *dev)
1373 {
1374 	struct pci_dev *pdev = to_pci_dev(dev);
1375 	struct group_for_pci_data data;
1376 	struct pci_bus *bus;
1377 	struct iommu_group *group = NULL;
1378 	u64 devfns[4] = { 0 };
1379 
1380 	if (WARN_ON(!dev_is_pci(dev)))
1381 		return ERR_PTR(-EINVAL);
1382 
1383 	/*
1384 	 * Find the upstream DMA alias for the device.  A device must not
1385 	 * be aliased due to topology in order to have its own IOMMU group.
1386 	 * If we find an alias along the way that already belongs to a
1387 	 * group, use it.
1388 	 */
1389 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1390 		return data.group;
1391 
1392 	pdev = data.pdev;
1393 
1394 	/*
1395 	 * Continue upstream from the point of minimum IOMMU granularity
1396 	 * due to aliases to the point where devices are protected from
1397 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1398 	 * group, use it.
1399 	 */
1400 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1401 		if (!bus->self)
1402 			continue;
1403 
1404 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1405 			break;
1406 
1407 		pdev = bus->self;
1408 
1409 		group = iommu_group_get(&pdev->dev);
1410 		if (group)
1411 			return group;
1412 	}
1413 
1414 	/*
1415 	 * Look for existing groups on device aliases.  If we alias another
1416 	 * device or another device aliases us, use the same group.
1417 	 */
1418 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1419 	if (group)
1420 		return group;
1421 
1422 	/*
1423 	 * Look for existing groups on non-isolated functions on the same
1424 	 * slot and aliases of those funcions, if any.  No need to clear
1425 	 * the search bitmap, the tested devfns are still valid.
1426 	 */
1427 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1428 	if (group)
1429 		return group;
1430 
1431 	/* No shared group found, allocate new */
1432 	return iommu_group_alloc();
1433 }
1434 EXPORT_SYMBOL_GPL(pci_device_group);
1435 
1436 /* Get the IOMMU group for device on fsl-mc bus */
1437 struct iommu_group *fsl_mc_device_group(struct device *dev)
1438 {
1439 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1440 	struct iommu_group *group;
1441 
1442 	group = iommu_group_get(cont_dev);
1443 	if (!group)
1444 		group = iommu_group_alloc();
1445 	return group;
1446 }
1447 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1448 
1449 static int iommu_get_def_domain_type(struct device *dev)
1450 {
1451 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1452 	unsigned int type = 0;
1453 
1454 	if (ops->def_domain_type)
1455 		type = ops->def_domain_type(dev);
1456 
1457 	return (type == 0) ? iommu_def_domain_type : type;
1458 }
1459 
1460 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1461 					    struct iommu_group *group,
1462 					    unsigned int type)
1463 {
1464 	struct iommu_domain *dom;
1465 
1466 	dom = __iommu_domain_alloc(bus, type);
1467 	if (!dom && type != IOMMU_DOMAIN_DMA) {
1468 		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1469 		if (dom)
1470 			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1471 				type, group->name);
1472 	}
1473 
1474 	if (!dom)
1475 		return -ENOMEM;
1476 
1477 	group->default_domain = dom;
1478 	if (!group->domain)
1479 		group->domain = dom;
1480 
1481 	if (!iommu_dma_strict) {
1482 		int attr = 1;
1483 		iommu_domain_set_attr(dom,
1484 				      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1485 				      &attr);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static int iommu_alloc_default_domain(struct iommu_group *group,
1492 				      struct device *dev)
1493 {
1494 	unsigned int type;
1495 
1496 	if (group->default_domain)
1497 		return 0;
1498 
1499 	type = iommu_get_def_domain_type(dev);
1500 
1501 	return iommu_group_alloc_default_domain(dev->bus, group, type);
1502 }
1503 
1504 /**
1505  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1506  * @dev: target device
1507  *
1508  * This function is intended to be called by IOMMU drivers and extended to
1509  * support common, bus-defined algorithms when determining or creating the
1510  * IOMMU group for a device.  On success, the caller will hold a reference
1511  * to the returned IOMMU group, which will already include the provided
1512  * device.  The reference should be released with iommu_group_put().
1513  */
1514 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1515 {
1516 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1517 	struct iommu_group *group;
1518 	int ret;
1519 
1520 	group = iommu_group_get(dev);
1521 	if (group)
1522 		return group;
1523 
1524 	if (!ops)
1525 		return ERR_PTR(-EINVAL);
1526 
1527 	group = ops->device_group(dev);
1528 	if (WARN_ON_ONCE(group == NULL))
1529 		return ERR_PTR(-EINVAL);
1530 
1531 	if (IS_ERR(group))
1532 		return group;
1533 
1534 	ret = iommu_group_add_device(group, dev);
1535 	if (ret)
1536 		goto out_put_group;
1537 
1538 	return group;
1539 
1540 out_put_group:
1541 	iommu_group_put(group);
1542 
1543 	return ERR_PTR(ret);
1544 }
1545 
1546 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1547 {
1548 	return group->default_domain;
1549 }
1550 
1551 static int probe_iommu_group(struct device *dev, void *data)
1552 {
1553 	struct list_head *group_list = data;
1554 	struct iommu_group *group;
1555 	int ret;
1556 
1557 	/* Device is probed already if in a group */
1558 	group = iommu_group_get(dev);
1559 	if (group) {
1560 		iommu_group_put(group);
1561 		return 0;
1562 	}
1563 
1564 	ret = __iommu_probe_device(dev, group_list);
1565 	if (ret == -ENODEV)
1566 		ret = 0;
1567 
1568 	return ret;
1569 }
1570 
1571 static int remove_iommu_group(struct device *dev, void *data)
1572 {
1573 	iommu_release_device(dev);
1574 
1575 	return 0;
1576 }
1577 
1578 static int iommu_bus_notifier(struct notifier_block *nb,
1579 			      unsigned long action, void *data)
1580 {
1581 	unsigned long group_action = 0;
1582 	struct device *dev = data;
1583 	struct iommu_group *group;
1584 
1585 	/*
1586 	 * ADD/DEL call into iommu driver ops if provided, which may
1587 	 * result in ADD/DEL notifiers to group->notifier
1588 	 */
1589 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1590 		int ret;
1591 
1592 		ret = iommu_probe_device(dev);
1593 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1594 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1595 		iommu_release_device(dev);
1596 		return NOTIFY_OK;
1597 	}
1598 
1599 	/*
1600 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1601 	 * group, if anyone is listening
1602 	 */
1603 	group = iommu_group_get(dev);
1604 	if (!group)
1605 		return 0;
1606 
1607 	switch (action) {
1608 	case BUS_NOTIFY_BIND_DRIVER:
1609 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1610 		break;
1611 	case BUS_NOTIFY_BOUND_DRIVER:
1612 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1613 		break;
1614 	case BUS_NOTIFY_UNBIND_DRIVER:
1615 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1616 		break;
1617 	case BUS_NOTIFY_UNBOUND_DRIVER:
1618 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1619 		break;
1620 	}
1621 
1622 	if (group_action)
1623 		blocking_notifier_call_chain(&group->notifier,
1624 					     group_action, dev);
1625 
1626 	iommu_group_put(group);
1627 	return 0;
1628 }
1629 
1630 struct __group_domain_type {
1631 	struct device *dev;
1632 	unsigned int type;
1633 };
1634 
1635 static int probe_get_default_domain_type(struct device *dev, void *data)
1636 {
1637 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1638 	struct __group_domain_type *gtype = data;
1639 	unsigned int type = 0;
1640 
1641 	if (ops->def_domain_type)
1642 		type = ops->def_domain_type(dev);
1643 
1644 	if (type) {
1645 		if (gtype->type && gtype->type != type) {
1646 			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1647 				 iommu_domain_type_str(type),
1648 				 dev_name(gtype->dev),
1649 				 iommu_domain_type_str(gtype->type));
1650 			gtype->type = 0;
1651 		}
1652 
1653 		if (!gtype->dev) {
1654 			gtype->dev  = dev;
1655 			gtype->type = type;
1656 		}
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 static void probe_alloc_default_domain(struct bus_type *bus,
1663 				       struct iommu_group *group)
1664 {
1665 	struct __group_domain_type gtype;
1666 
1667 	memset(&gtype, 0, sizeof(gtype));
1668 
1669 	/* Ask for default domain requirements of all devices in the group */
1670 	__iommu_group_for_each_dev(group, &gtype,
1671 				   probe_get_default_domain_type);
1672 
1673 	if (!gtype.type)
1674 		gtype.type = iommu_def_domain_type;
1675 
1676 	iommu_group_alloc_default_domain(bus, group, gtype.type);
1677 
1678 }
1679 
1680 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1681 {
1682 	struct iommu_domain *domain = data;
1683 	int ret = 0;
1684 
1685 	if (!iommu_is_attach_deferred(domain, dev))
1686 		ret = __iommu_attach_device(domain, dev);
1687 
1688 	return ret;
1689 }
1690 
1691 static int __iommu_group_dma_attach(struct iommu_group *group)
1692 {
1693 	return __iommu_group_for_each_dev(group, group->default_domain,
1694 					  iommu_group_do_dma_attach);
1695 }
1696 
1697 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1698 {
1699 	struct iommu_domain *domain = data;
1700 
1701 	if (domain->ops->probe_finalize)
1702 		domain->ops->probe_finalize(dev);
1703 
1704 	return 0;
1705 }
1706 
1707 static void __iommu_group_dma_finalize(struct iommu_group *group)
1708 {
1709 	__iommu_group_for_each_dev(group, group->default_domain,
1710 				   iommu_group_do_probe_finalize);
1711 }
1712 
1713 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1714 {
1715 	struct iommu_group *group = data;
1716 
1717 	iommu_create_device_direct_mappings(group, dev);
1718 
1719 	return 0;
1720 }
1721 
1722 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1723 {
1724 	return __iommu_group_for_each_dev(group, group,
1725 					  iommu_do_create_direct_mappings);
1726 }
1727 
1728 int bus_iommu_probe(struct bus_type *bus)
1729 {
1730 	struct iommu_group *group, *next;
1731 	LIST_HEAD(group_list);
1732 	int ret;
1733 
1734 	/*
1735 	 * This code-path does not allocate the default domain when
1736 	 * creating the iommu group, so do it after the groups are
1737 	 * created.
1738 	 */
1739 	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1740 	if (ret)
1741 		return ret;
1742 
1743 	list_for_each_entry_safe(group, next, &group_list, entry) {
1744 		/* Remove item from the list */
1745 		list_del_init(&group->entry);
1746 
1747 		mutex_lock(&group->mutex);
1748 
1749 		/* Try to allocate default domain */
1750 		probe_alloc_default_domain(bus, group);
1751 
1752 		if (!group->default_domain) {
1753 			mutex_unlock(&group->mutex);
1754 			continue;
1755 		}
1756 
1757 		iommu_group_create_direct_mappings(group);
1758 
1759 		ret = __iommu_group_dma_attach(group);
1760 
1761 		mutex_unlock(&group->mutex);
1762 
1763 		if (ret)
1764 			break;
1765 
1766 		__iommu_group_dma_finalize(group);
1767 	}
1768 
1769 	return ret;
1770 }
1771 
1772 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1773 {
1774 	struct notifier_block *nb;
1775 	int err;
1776 
1777 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1778 	if (!nb)
1779 		return -ENOMEM;
1780 
1781 	nb->notifier_call = iommu_bus_notifier;
1782 
1783 	err = bus_register_notifier(bus, nb);
1784 	if (err)
1785 		goto out_free;
1786 
1787 	err = bus_iommu_probe(bus);
1788 	if (err)
1789 		goto out_err;
1790 
1791 
1792 	return 0;
1793 
1794 out_err:
1795 	/* Clean up */
1796 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1797 	bus_unregister_notifier(bus, nb);
1798 
1799 out_free:
1800 	kfree(nb);
1801 
1802 	return err;
1803 }
1804 
1805 /**
1806  * bus_set_iommu - set iommu-callbacks for the bus
1807  * @bus: bus.
1808  * @ops: the callbacks provided by the iommu-driver
1809  *
1810  * This function is called by an iommu driver to set the iommu methods
1811  * used for a particular bus. Drivers for devices on that bus can use
1812  * the iommu-api after these ops are registered.
1813  * This special function is needed because IOMMUs are usually devices on
1814  * the bus itself, so the iommu drivers are not initialized when the bus
1815  * is set up. With this function the iommu-driver can set the iommu-ops
1816  * afterwards.
1817  */
1818 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1819 {
1820 	int err;
1821 
1822 	if (ops == NULL) {
1823 		bus->iommu_ops = NULL;
1824 		return 0;
1825 	}
1826 
1827 	if (bus->iommu_ops != NULL)
1828 		return -EBUSY;
1829 
1830 	bus->iommu_ops = ops;
1831 
1832 	/* Do IOMMU specific setup for this bus-type */
1833 	err = iommu_bus_init(bus, ops);
1834 	if (err)
1835 		bus->iommu_ops = NULL;
1836 
1837 	return err;
1838 }
1839 EXPORT_SYMBOL_GPL(bus_set_iommu);
1840 
1841 bool iommu_present(struct bus_type *bus)
1842 {
1843 	return bus->iommu_ops != NULL;
1844 }
1845 EXPORT_SYMBOL_GPL(iommu_present);
1846 
1847 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1848 {
1849 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1850 		return false;
1851 
1852 	return bus->iommu_ops->capable(cap);
1853 }
1854 EXPORT_SYMBOL_GPL(iommu_capable);
1855 
1856 /**
1857  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1858  * @domain: iommu domain
1859  * @handler: fault handler
1860  * @token: user data, will be passed back to the fault handler
1861  *
1862  * This function should be used by IOMMU users which want to be notified
1863  * whenever an IOMMU fault happens.
1864  *
1865  * The fault handler itself should return 0 on success, and an appropriate
1866  * error code otherwise.
1867  */
1868 void iommu_set_fault_handler(struct iommu_domain *domain,
1869 					iommu_fault_handler_t handler,
1870 					void *token)
1871 {
1872 	BUG_ON(!domain);
1873 
1874 	domain->handler = handler;
1875 	domain->handler_token = token;
1876 }
1877 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1878 
1879 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1880 						 unsigned type)
1881 {
1882 	struct iommu_domain *domain;
1883 
1884 	if (bus == NULL || bus->iommu_ops == NULL)
1885 		return NULL;
1886 
1887 	domain = bus->iommu_ops->domain_alloc(type);
1888 	if (!domain)
1889 		return NULL;
1890 
1891 	domain->ops  = bus->iommu_ops;
1892 	domain->type = type;
1893 	/* Assume all sizes by default; the driver may override this later */
1894 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1895 
1896 	return domain;
1897 }
1898 
1899 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1900 {
1901 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1902 }
1903 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1904 
1905 void iommu_domain_free(struct iommu_domain *domain)
1906 {
1907 	domain->ops->domain_free(domain);
1908 }
1909 EXPORT_SYMBOL_GPL(iommu_domain_free);
1910 
1911 static int __iommu_attach_device(struct iommu_domain *domain,
1912 				 struct device *dev)
1913 {
1914 	int ret;
1915 
1916 	if (unlikely(domain->ops->attach_dev == NULL))
1917 		return -ENODEV;
1918 
1919 	ret = domain->ops->attach_dev(domain, dev);
1920 	if (!ret)
1921 		trace_attach_device_to_domain(dev);
1922 	return ret;
1923 }
1924 
1925 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1926 {
1927 	struct iommu_group *group;
1928 	int ret;
1929 
1930 	group = iommu_group_get(dev);
1931 	if (!group)
1932 		return -ENODEV;
1933 
1934 	/*
1935 	 * Lock the group to make sure the device-count doesn't
1936 	 * change while we are attaching
1937 	 */
1938 	mutex_lock(&group->mutex);
1939 	ret = -EINVAL;
1940 	if (iommu_group_device_count(group) != 1)
1941 		goto out_unlock;
1942 
1943 	ret = __iommu_attach_group(domain, group);
1944 
1945 out_unlock:
1946 	mutex_unlock(&group->mutex);
1947 	iommu_group_put(group);
1948 
1949 	return ret;
1950 }
1951 EXPORT_SYMBOL_GPL(iommu_attach_device);
1952 
1953 int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1954 			   struct iommu_cache_invalidate_info *inv_info)
1955 {
1956 	if (unlikely(!domain->ops->cache_invalidate))
1957 		return -ENODEV;
1958 
1959 	return domain->ops->cache_invalidate(domain, dev, inv_info);
1960 }
1961 EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1962 
1963 int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1964 			   struct device *dev, struct iommu_gpasid_bind_data *data)
1965 {
1966 	if (unlikely(!domain->ops->sva_bind_gpasid))
1967 		return -ENODEV;
1968 
1969 	return domain->ops->sva_bind_gpasid(domain, dev, data);
1970 }
1971 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1972 
1973 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1974 			     ioasid_t pasid)
1975 {
1976 	if (unlikely(!domain->ops->sva_unbind_gpasid))
1977 		return -ENODEV;
1978 
1979 	return domain->ops->sva_unbind_gpasid(dev, pasid);
1980 }
1981 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1982 
1983 static void __iommu_detach_device(struct iommu_domain *domain,
1984 				  struct device *dev)
1985 {
1986 	if (iommu_is_attach_deferred(domain, dev))
1987 		return;
1988 
1989 	if (unlikely(domain->ops->detach_dev == NULL))
1990 		return;
1991 
1992 	domain->ops->detach_dev(domain, dev);
1993 	trace_detach_device_from_domain(dev);
1994 }
1995 
1996 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1997 {
1998 	struct iommu_group *group;
1999 
2000 	group = iommu_group_get(dev);
2001 	if (!group)
2002 		return;
2003 
2004 	mutex_lock(&group->mutex);
2005 	if (iommu_group_device_count(group) != 1) {
2006 		WARN_ON(1);
2007 		goto out_unlock;
2008 	}
2009 
2010 	__iommu_detach_group(domain, group);
2011 
2012 out_unlock:
2013 	mutex_unlock(&group->mutex);
2014 	iommu_group_put(group);
2015 }
2016 EXPORT_SYMBOL_GPL(iommu_detach_device);
2017 
2018 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2019 {
2020 	struct iommu_domain *domain;
2021 	struct iommu_group *group;
2022 
2023 	group = iommu_group_get(dev);
2024 	if (!group)
2025 		return NULL;
2026 
2027 	domain = group->domain;
2028 
2029 	iommu_group_put(group);
2030 
2031 	return domain;
2032 }
2033 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2034 
2035 /*
2036  * For IOMMU_DOMAIN_DMA implementations which already provide their own
2037  * guarantees that the group and its default domain are valid and correct.
2038  */
2039 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2040 {
2041 	return dev->iommu_group->default_domain;
2042 }
2043 
2044 /*
2045  * IOMMU groups are really the natural working unit of the IOMMU, but
2046  * the IOMMU API works on domains and devices.  Bridge that gap by
2047  * iterating over the devices in a group.  Ideally we'd have a single
2048  * device which represents the requestor ID of the group, but we also
2049  * allow IOMMU drivers to create policy defined minimum sets, where
2050  * the physical hardware may be able to distiguish members, but we
2051  * wish to group them at a higher level (ex. untrusted multi-function
2052  * PCI devices).  Thus we attach each device.
2053  */
2054 static int iommu_group_do_attach_device(struct device *dev, void *data)
2055 {
2056 	struct iommu_domain *domain = data;
2057 
2058 	return __iommu_attach_device(domain, dev);
2059 }
2060 
2061 static int __iommu_attach_group(struct iommu_domain *domain,
2062 				struct iommu_group *group)
2063 {
2064 	int ret;
2065 
2066 	if (group->default_domain && group->domain != group->default_domain)
2067 		return -EBUSY;
2068 
2069 	ret = __iommu_group_for_each_dev(group, domain,
2070 					 iommu_group_do_attach_device);
2071 	if (ret == 0)
2072 		group->domain = domain;
2073 
2074 	return ret;
2075 }
2076 
2077 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2078 {
2079 	int ret;
2080 
2081 	mutex_lock(&group->mutex);
2082 	ret = __iommu_attach_group(domain, group);
2083 	mutex_unlock(&group->mutex);
2084 
2085 	return ret;
2086 }
2087 EXPORT_SYMBOL_GPL(iommu_attach_group);
2088 
2089 static int iommu_group_do_detach_device(struct device *dev, void *data)
2090 {
2091 	struct iommu_domain *domain = data;
2092 
2093 	__iommu_detach_device(domain, dev);
2094 
2095 	return 0;
2096 }
2097 
2098 static void __iommu_detach_group(struct iommu_domain *domain,
2099 				 struct iommu_group *group)
2100 {
2101 	int ret;
2102 
2103 	if (!group->default_domain) {
2104 		__iommu_group_for_each_dev(group, domain,
2105 					   iommu_group_do_detach_device);
2106 		group->domain = NULL;
2107 		return;
2108 	}
2109 
2110 	if (group->domain == group->default_domain)
2111 		return;
2112 
2113 	/* Detach by re-attaching to the default domain */
2114 	ret = __iommu_group_for_each_dev(group, group->default_domain,
2115 					 iommu_group_do_attach_device);
2116 	if (ret != 0)
2117 		WARN_ON(1);
2118 	else
2119 		group->domain = group->default_domain;
2120 }
2121 
2122 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2123 {
2124 	mutex_lock(&group->mutex);
2125 	__iommu_detach_group(domain, group);
2126 	mutex_unlock(&group->mutex);
2127 }
2128 EXPORT_SYMBOL_GPL(iommu_detach_group);
2129 
2130 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2131 {
2132 	if (unlikely(domain->ops->iova_to_phys == NULL))
2133 		return 0;
2134 
2135 	return domain->ops->iova_to_phys(domain, iova);
2136 }
2137 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2138 
2139 static size_t iommu_pgsize(struct iommu_domain *domain,
2140 			   unsigned long addr_merge, size_t size)
2141 {
2142 	unsigned int pgsize_idx;
2143 	size_t pgsize;
2144 
2145 	/* Max page size that still fits into 'size' */
2146 	pgsize_idx = __fls(size);
2147 
2148 	/* need to consider alignment requirements ? */
2149 	if (likely(addr_merge)) {
2150 		/* Max page size allowed by address */
2151 		unsigned int align_pgsize_idx = __ffs(addr_merge);
2152 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2153 	}
2154 
2155 	/* build a mask of acceptable page sizes */
2156 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
2157 
2158 	/* throw away page sizes not supported by the hardware */
2159 	pgsize &= domain->pgsize_bitmap;
2160 
2161 	/* make sure we're still sane */
2162 	BUG_ON(!pgsize);
2163 
2164 	/* pick the biggest page */
2165 	pgsize_idx = __fls(pgsize);
2166 	pgsize = 1UL << pgsize_idx;
2167 
2168 	return pgsize;
2169 }
2170 
2171 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2172 	      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2173 {
2174 	const struct iommu_ops *ops = domain->ops;
2175 	unsigned long orig_iova = iova;
2176 	unsigned int min_pagesz;
2177 	size_t orig_size = size;
2178 	phys_addr_t orig_paddr = paddr;
2179 	int ret = 0;
2180 
2181 	if (unlikely(ops->map == NULL ||
2182 		     domain->pgsize_bitmap == 0UL))
2183 		return -ENODEV;
2184 
2185 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2186 		return -EINVAL;
2187 
2188 	/* find out the minimum page size supported */
2189 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2190 
2191 	/*
2192 	 * both the virtual address and the physical one, as well as
2193 	 * the size of the mapping, must be aligned (at least) to the
2194 	 * size of the smallest page supported by the hardware
2195 	 */
2196 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2197 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2198 		       iova, &paddr, size, min_pagesz);
2199 		return -EINVAL;
2200 	}
2201 
2202 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2203 
2204 	while (size) {
2205 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2206 
2207 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
2208 			 iova, &paddr, pgsize);
2209 		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2210 
2211 		if (ret)
2212 			break;
2213 
2214 		iova += pgsize;
2215 		paddr += pgsize;
2216 		size -= pgsize;
2217 	}
2218 
2219 	if (ops->iotlb_sync_map)
2220 		ops->iotlb_sync_map(domain);
2221 
2222 	/* unroll mapping in case something went wrong */
2223 	if (ret)
2224 		iommu_unmap(domain, orig_iova, orig_size - size);
2225 	else
2226 		trace_map(orig_iova, orig_paddr, orig_size);
2227 
2228 	return ret;
2229 }
2230 
2231 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2232 	      phys_addr_t paddr, size_t size, int prot)
2233 {
2234 	might_sleep();
2235 	return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2236 }
2237 EXPORT_SYMBOL_GPL(iommu_map);
2238 
2239 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2240 	      phys_addr_t paddr, size_t size, int prot)
2241 {
2242 	return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2243 }
2244 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2245 
2246 static size_t __iommu_unmap(struct iommu_domain *domain,
2247 			    unsigned long iova, size_t size,
2248 			    struct iommu_iotlb_gather *iotlb_gather)
2249 {
2250 	const struct iommu_ops *ops = domain->ops;
2251 	size_t unmapped_page, unmapped = 0;
2252 	unsigned long orig_iova = iova;
2253 	unsigned int min_pagesz;
2254 
2255 	if (unlikely(ops->unmap == NULL ||
2256 		     domain->pgsize_bitmap == 0UL))
2257 		return 0;
2258 
2259 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2260 		return 0;
2261 
2262 	/* find out the minimum page size supported */
2263 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2264 
2265 	/*
2266 	 * The virtual address, as well as the size of the mapping, must be
2267 	 * aligned (at least) to the size of the smallest page supported
2268 	 * by the hardware
2269 	 */
2270 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2271 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2272 		       iova, size, min_pagesz);
2273 		return 0;
2274 	}
2275 
2276 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2277 
2278 	/*
2279 	 * Keep iterating until we either unmap 'size' bytes (or more)
2280 	 * or we hit an area that isn't mapped.
2281 	 */
2282 	while (unmapped < size) {
2283 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2284 
2285 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2286 		if (!unmapped_page)
2287 			break;
2288 
2289 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2290 			 iova, unmapped_page);
2291 
2292 		iova += unmapped_page;
2293 		unmapped += unmapped_page;
2294 	}
2295 
2296 	trace_unmap(orig_iova, size, unmapped);
2297 	return unmapped;
2298 }
2299 
2300 size_t iommu_unmap(struct iommu_domain *domain,
2301 		   unsigned long iova, size_t size)
2302 {
2303 	struct iommu_iotlb_gather iotlb_gather;
2304 	size_t ret;
2305 
2306 	iommu_iotlb_gather_init(&iotlb_gather);
2307 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2308 	iommu_tlb_sync(domain, &iotlb_gather);
2309 
2310 	return ret;
2311 }
2312 EXPORT_SYMBOL_GPL(iommu_unmap);
2313 
2314 size_t iommu_unmap_fast(struct iommu_domain *domain,
2315 			unsigned long iova, size_t size,
2316 			struct iommu_iotlb_gather *iotlb_gather)
2317 {
2318 	return __iommu_unmap(domain, iova, size, iotlb_gather);
2319 }
2320 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2321 
2322 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2323 		    struct scatterlist *sg, unsigned int nents, int prot,
2324 		    gfp_t gfp)
2325 {
2326 	size_t len = 0, mapped = 0;
2327 	phys_addr_t start;
2328 	unsigned int i = 0;
2329 	int ret;
2330 
2331 	while (i <= nents) {
2332 		phys_addr_t s_phys = sg_phys(sg);
2333 
2334 		if (len && s_phys != start + len) {
2335 			ret = __iommu_map(domain, iova + mapped, start,
2336 					len, prot, gfp);
2337 
2338 			if (ret)
2339 				goto out_err;
2340 
2341 			mapped += len;
2342 			len = 0;
2343 		}
2344 
2345 		if (len) {
2346 			len += sg->length;
2347 		} else {
2348 			len = sg->length;
2349 			start = s_phys;
2350 		}
2351 
2352 		if (++i < nents)
2353 			sg = sg_next(sg);
2354 	}
2355 
2356 	return mapped;
2357 
2358 out_err:
2359 	/* undo mappings already done */
2360 	iommu_unmap(domain, iova, mapped);
2361 
2362 	return 0;
2363 
2364 }
2365 
2366 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2367 		    struct scatterlist *sg, unsigned int nents, int prot)
2368 {
2369 	might_sleep();
2370 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2371 }
2372 EXPORT_SYMBOL_GPL(iommu_map_sg);
2373 
2374 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2375 		    struct scatterlist *sg, unsigned int nents, int prot)
2376 {
2377 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2378 }
2379 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2380 
2381 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2382 			       phys_addr_t paddr, u64 size, int prot)
2383 {
2384 	if (unlikely(domain->ops->domain_window_enable == NULL))
2385 		return -ENODEV;
2386 
2387 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2388 						 prot);
2389 }
2390 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2391 
2392 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2393 {
2394 	if (unlikely(domain->ops->domain_window_disable == NULL))
2395 		return;
2396 
2397 	return domain->ops->domain_window_disable(domain, wnd_nr);
2398 }
2399 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2400 
2401 /**
2402  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2403  * @domain: the iommu domain where the fault has happened
2404  * @dev: the device where the fault has happened
2405  * @iova: the faulting address
2406  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2407  *
2408  * This function should be called by the low-level IOMMU implementations
2409  * whenever IOMMU faults happen, to allow high-level users, that are
2410  * interested in such events, to know about them.
2411  *
2412  * This event may be useful for several possible use cases:
2413  * - mere logging of the event
2414  * - dynamic TLB/PTE loading
2415  * - if restarting of the faulting device is required
2416  *
2417  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2418  * PTE/TLB loading will one day be supported, implementations will be able
2419  * to tell whether it succeeded or not according to this return value).
2420  *
2421  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2422  * (though fault handlers can also return -ENOSYS, in case they want to
2423  * elicit the default behavior of the IOMMU drivers).
2424  */
2425 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2426 		       unsigned long iova, int flags)
2427 {
2428 	int ret = -ENOSYS;
2429 
2430 	/*
2431 	 * if upper layers showed interest and installed a fault handler,
2432 	 * invoke it.
2433 	 */
2434 	if (domain->handler)
2435 		ret = domain->handler(domain, dev, iova, flags,
2436 						domain->handler_token);
2437 
2438 	trace_io_page_fault(dev, iova, flags);
2439 	return ret;
2440 }
2441 EXPORT_SYMBOL_GPL(report_iommu_fault);
2442 
2443 static int __init iommu_init(void)
2444 {
2445 	iommu_group_kset = kset_create_and_add("iommu_groups",
2446 					       NULL, kernel_kobj);
2447 	BUG_ON(!iommu_group_kset);
2448 
2449 	iommu_debugfs_setup();
2450 
2451 	return 0;
2452 }
2453 core_initcall(iommu_init);
2454 
2455 int iommu_domain_get_attr(struct iommu_domain *domain,
2456 			  enum iommu_attr attr, void *data)
2457 {
2458 	struct iommu_domain_geometry *geometry;
2459 	bool *paging;
2460 	int ret = 0;
2461 
2462 	switch (attr) {
2463 	case DOMAIN_ATTR_GEOMETRY:
2464 		geometry  = data;
2465 		*geometry = domain->geometry;
2466 
2467 		break;
2468 	case DOMAIN_ATTR_PAGING:
2469 		paging  = data;
2470 		*paging = (domain->pgsize_bitmap != 0UL);
2471 		break;
2472 	default:
2473 		if (!domain->ops->domain_get_attr)
2474 			return -EINVAL;
2475 
2476 		ret = domain->ops->domain_get_attr(domain, attr, data);
2477 	}
2478 
2479 	return ret;
2480 }
2481 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2482 
2483 int iommu_domain_set_attr(struct iommu_domain *domain,
2484 			  enum iommu_attr attr, void *data)
2485 {
2486 	int ret = 0;
2487 
2488 	switch (attr) {
2489 	default:
2490 		if (domain->ops->domain_set_attr == NULL)
2491 			return -EINVAL;
2492 
2493 		ret = domain->ops->domain_set_attr(domain, attr, data);
2494 	}
2495 
2496 	return ret;
2497 }
2498 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2499 
2500 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2501 {
2502 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2503 
2504 	if (ops && ops->get_resv_regions)
2505 		ops->get_resv_regions(dev, list);
2506 }
2507 
2508 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2509 {
2510 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2511 
2512 	if (ops && ops->put_resv_regions)
2513 		ops->put_resv_regions(dev, list);
2514 }
2515 
2516 /**
2517  * generic_iommu_put_resv_regions - Reserved region driver helper
2518  * @dev: device for which to free reserved regions
2519  * @list: reserved region list for device
2520  *
2521  * IOMMU drivers can use this to implement their .put_resv_regions() callback
2522  * for simple reservations. Memory allocated for each reserved region will be
2523  * freed. If an IOMMU driver allocates additional resources per region, it is
2524  * going to have to implement a custom callback.
2525  */
2526 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2527 {
2528 	struct iommu_resv_region *entry, *next;
2529 
2530 	list_for_each_entry_safe(entry, next, list, list)
2531 		kfree(entry);
2532 }
2533 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2534 
2535 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2536 						  size_t length, int prot,
2537 						  enum iommu_resv_type type)
2538 {
2539 	struct iommu_resv_region *region;
2540 
2541 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2542 	if (!region)
2543 		return NULL;
2544 
2545 	INIT_LIST_HEAD(&region->list);
2546 	region->start = start;
2547 	region->length = length;
2548 	region->prot = prot;
2549 	region->type = type;
2550 	return region;
2551 }
2552 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2553 
2554 void iommu_set_default_passthrough(bool cmd_line)
2555 {
2556 	if (cmd_line)
2557 		iommu_set_cmd_line_dma_api();
2558 
2559 	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2560 }
2561 
2562 void iommu_set_default_translated(bool cmd_line)
2563 {
2564 	if (cmd_line)
2565 		iommu_set_cmd_line_dma_api();
2566 
2567 	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2568 }
2569 
2570 bool iommu_default_passthrough(void)
2571 {
2572 	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2573 }
2574 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2575 
2576 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2577 {
2578 	const struct iommu_ops *ops = NULL;
2579 	struct iommu_device *iommu;
2580 
2581 	spin_lock(&iommu_device_lock);
2582 	list_for_each_entry(iommu, &iommu_device_list, list)
2583 		if (iommu->fwnode == fwnode) {
2584 			ops = iommu->ops;
2585 			break;
2586 		}
2587 	spin_unlock(&iommu_device_lock);
2588 	return ops;
2589 }
2590 
2591 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2592 		      const struct iommu_ops *ops)
2593 {
2594 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2595 
2596 	if (fwspec)
2597 		return ops == fwspec->ops ? 0 : -EINVAL;
2598 
2599 	if (!dev_iommu_get(dev))
2600 		return -ENOMEM;
2601 
2602 	/* Preallocate for the overwhelmingly common case of 1 ID */
2603 	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2604 	if (!fwspec)
2605 		return -ENOMEM;
2606 
2607 	of_node_get(to_of_node(iommu_fwnode));
2608 	fwspec->iommu_fwnode = iommu_fwnode;
2609 	fwspec->ops = ops;
2610 	dev_iommu_fwspec_set(dev, fwspec);
2611 	return 0;
2612 }
2613 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2614 
2615 void iommu_fwspec_free(struct device *dev)
2616 {
2617 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2618 
2619 	if (fwspec) {
2620 		fwnode_handle_put(fwspec->iommu_fwnode);
2621 		kfree(fwspec);
2622 		dev_iommu_fwspec_set(dev, NULL);
2623 	}
2624 }
2625 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2626 
2627 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2628 {
2629 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2630 	int i, new_num;
2631 
2632 	if (!fwspec)
2633 		return -EINVAL;
2634 
2635 	new_num = fwspec->num_ids + num_ids;
2636 	if (new_num > 1) {
2637 		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2638 				  GFP_KERNEL);
2639 		if (!fwspec)
2640 			return -ENOMEM;
2641 
2642 		dev_iommu_fwspec_set(dev, fwspec);
2643 	}
2644 
2645 	for (i = 0; i < num_ids; i++)
2646 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2647 
2648 	fwspec->num_ids = new_num;
2649 	return 0;
2650 }
2651 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2652 
2653 /*
2654  * Per device IOMMU features.
2655  */
2656 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2657 {
2658 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2659 
2660 	if (ops && ops->dev_has_feat)
2661 		return ops->dev_has_feat(dev, feat);
2662 
2663 	return false;
2664 }
2665 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2666 
2667 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2668 {
2669 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2670 
2671 	if (ops && ops->dev_enable_feat)
2672 		return ops->dev_enable_feat(dev, feat);
2673 
2674 	return -ENODEV;
2675 }
2676 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2677 
2678 /*
2679  * The device drivers should do the necessary cleanups before calling this.
2680  * For example, before disabling the aux-domain feature, the device driver
2681  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2682  */
2683 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2684 {
2685 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2686 
2687 	if (ops && ops->dev_disable_feat)
2688 		return ops->dev_disable_feat(dev, feat);
2689 
2690 	return -EBUSY;
2691 }
2692 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2693 
2694 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2695 {
2696 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2697 
2698 	if (ops && ops->dev_feat_enabled)
2699 		return ops->dev_feat_enabled(dev, feat);
2700 
2701 	return false;
2702 }
2703 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2704 
2705 /*
2706  * Aux-domain specific attach/detach.
2707  *
2708  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2709  * true. Also, as long as domains are attached to a device through this
2710  * interface, any tries to call iommu_attach_device() should fail
2711  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2712  * This should make us safe against a device being attached to a guest as a
2713  * whole while there are still pasid users on it (aux and sva).
2714  */
2715 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2716 {
2717 	int ret = -ENODEV;
2718 
2719 	if (domain->ops->aux_attach_dev)
2720 		ret = domain->ops->aux_attach_dev(domain, dev);
2721 
2722 	if (!ret)
2723 		trace_attach_device_to_domain(dev);
2724 
2725 	return ret;
2726 }
2727 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2728 
2729 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2730 {
2731 	if (domain->ops->aux_detach_dev) {
2732 		domain->ops->aux_detach_dev(domain, dev);
2733 		trace_detach_device_from_domain(dev);
2734 	}
2735 }
2736 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2737 
2738 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2739 {
2740 	int ret = -ENODEV;
2741 
2742 	if (domain->ops->aux_get_pasid)
2743 		ret = domain->ops->aux_get_pasid(domain, dev);
2744 
2745 	return ret;
2746 }
2747 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2748 
2749 /**
2750  * iommu_sva_bind_device() - Bind a process address space to a device
2751  * @dev: the device
2752  * @mm: the mm to bind, caller must hold a reference to it
2753  *
2754  * Create a bond between device and address space, allowing the device to access
2755  * the mm using the returned PASID. If a bond already exists between @device and
2756  * @mm, it is returned and an additional reference is taken. Caller must call
2757  * iommu_sva_unbind_device() to release each reference.
2758  *
2759  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2760  * initialize the required SVA features.
2761  *
2762  * On error, returns an ERR_PTR value.
2763  */
2764 struct iommu_sva *
2765 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2766 {
2767 	struct iommu_group *group;
2768 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2769 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2770 
2771 	if (!ops || !ops->sva_bind)
2772 		return ERR_PTR(-ENODEV);
2773 
2774 	group = iommu_group_get(dev);
2775 	if (!group)
2776 		return ERR_PTR(-ENODEV);
2777 
2778 	/* Ensure device count and domain don't change while we're binding */
2779 	mutex_lock(&group->mutex);
2780 
2781 	/*
2782 	 * To keep things simple, SVA currently doesn't support IOMMU groups
2783 	 * with more than one device. Existing SVA-capable systems are not
2784 	 * affected by the problems that required IOMMU groups (lack of ACS
2785 	 * isolation, device ID aliasing and other hardware issues).
2786 	 */
2787 	if (iommu_group_device_count(group) != 1)
2788 		goto out_unlock;
2789 
2790 	handle = ops->sva_bind(dev, mm, drvdata);
2791 
2792 out_unlock:
2793 	mutex_unlock(&group->mutex);
2794 	iommu_group_put(group);
2795 
2796 	return handle;
2797 }
2798 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2799 
2800 /**
2801  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2802  * @handle: the handle returned by iommu_sva_bind_device()
2803  *
2804  * Put reference to a bond between device and address space. The device should
2805  * not be issuing any more transaction for this PASID. All outstanding page
2806  * requests for this PASID must have been flushed to the IOMMU.
2807  *
2808  * Returns 0 on success, or an error value
2809  */
2810 void iommu_sva_unbind_device(struct iommu_sva *handle)
2811 {
2812 	struct iommu_group *group;
2813 	struct device *dev = handle->dev;
2814 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2815 
2816 	if (!ops || !ops->sva_unbind)
2817 		return;
2818 
2819 	group = iommu_group_get(dev);
2820 	if (!group)
2821 		return;
2822 
2823 	mutex_lock(&group->mutex);
2824 	ops->sva_unbind(handle);
2825 	mutex_unlock(&group->mutex);
2826 
2827 	iommu_group_put(group);
2828 }
2829 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2830 
2831 int iommu_sva_get_pasid(struct iommu_sva *handle)
2832 {
2833 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2834 
2835 	if (!ops || !ops->sva_get_pasid)
2836 		return IOMMU_PASID_INVALID;
2837 
2838 	return ops->sva_get_pasid(handle);
2839 }
2840 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2841