xref: /openbmc/linux/drivers/iommu/iommu.c (revision ce574c27)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27 
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30 
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34 
35 struct iommu_group {
36 	struct kobject kobj;
37 	struct kobject *devices_kobj;
38 	struct list_head devices;
39 	struct mutex mutex;
40 	struct blocking_notifier_head notifier;
41 	void *iommu_data;
42 	void (*iommu_data_release)(void *iommu_data);
43 	char *name;
44 	int id;
45 	struct iommu_domain *default_domain;
46 	struct iommu_domain *domain;
47 	struct list_head entry;
48 };
49 
50 struct group_device {
51 	struct list_head list;
52 	struct device *dev;
53 	char *name;
54 };
55 
56 struct iommu_group_attribute {
57 	struct attribute attr;
58 	ssize_t (*show)(struct iommu_group *group, char *buf);
59 	ssize_t (*store)(struct iommu_group *group,
60 			 const char *buf, size_t count);
61 };
62 
63 static const char * const iommu_group_resv_type_string[] = {
64 	[IOMMU_RESV_DIRECT]			= "direct",
65 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
66 	[IOMMU_RESV_RESERVED]			= "reserved",
67 	[IOMMU_RESV_MSI]			= "msi",
68 	[IOMMU_RESV_SW_MSI]			= "msi",
69 };
70 
71 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
72 
73 static void iommu_set_cmd_line_dma_api(void)
74 {
75 	iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
76 }
77 
78 static bool iommu_cmd_line_dma_api(void)
79 {
80 	return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
81 }
82 
83 static int iommu_alloc_default_domain(struct device *dev);
84 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
85 						 unsigned type);
86 static int __iommu_attach_device(struct iommu_domain *domain,
87 				 struct device *dev);
88 static int __iommu_attach_group(struct iommu_domain *domain,
89 				struct iommu_group *group);
90 static void __iommu_detach_group(struct iommu_domain *domain,
91 				 struct iommu_group *group);
92 static int iommu_create_device_direct_mappings(struct iommu_group *group,
93 					       struct device *dev);
94 
95 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
96 struct iommu_group_attribute iommu_group_attr_##_name =		\
97 	__ATTR(_name, _mode, _show, _store)
98 
99 #define to_iommu_group_attr(_attr)	\
100 	container_of(_attr, struct iommu_group_attribute, attr)
101 #define to_iommu_group(_kobj)		\
102 	container_of(_kobj, struct iommu_group, kobj)
103 
104 static LIST_HEAD(iommu_device_list);
105 static DEFINE_SPINLOCK(iommu_device_lock);
106 
107 /*
108  * Use a function instead of an array here because the domain-type is a
109  * bit-field, so an array would waste memory.
110  */
111 static const char *iommu_domain_type_str(unsigned int t)
112 {
113 	switch (t) {
114 	case IOMMU_DOMAIN_BLOCKED:
115 		return "Blocked";
116 	case IOMMU_DOMAIN_IDENTITY:
117 		return "Passthrough";
118 	case IOMMU_DOMAIN_UNMANAGED:
119 		return "Unmanaged";
120 	case IOMMU_DOMAIN_DMA:
121 		return "Translated";
122 	default:
123 		return "Unknown";
124 	}
125 }
126 
127 static int __init iommu_subsys_init(void)
128 {
129 	bool cmd_line = iommu_cmd_line_dma_api();
130 
131 	if (!cmd_line) {
132 		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
133 			iommu_set_default_passthrough(false);
134 		else
135 			iommu_set_default_translated(false);
136 
137 		if (iommu_default_passthrough() && mem_encrypt_active()) {
138 			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
139 			iommu_set_default_translated(false);
140 		}
141 	}
142 
143 	pr_info("Default domain type: %s %s\n",
144 		iommu_domain_type_str(iommu_def_domain_type),
145 		cmd_line ? "(set via kernel command line)" : "");
146 
147 	return 0;
148 }
149 subsys_initcall(iommu_subsys_init);
150 
151 int iommu_device_register(struct iommu_device *iommu)
152 {
153 	spin_lock(&iommu_device_lock);
154 	list_add_tail(&iommu->list, &iommu_device_list);
155 	spin_unlock(&iommu_device_lock);
156 	return 0;
157 }
158 EXPORT_SYMBOL_GPL(iommu_device_register);
159 
160 void iommu_device_unregister(struct iommu_device *iommu)
161 {
162 	spin_lock(&iommu_device_lock);
163 	list_del(&iommu->list);
164 	spin_unlock(&iommu_device_lock);
165 }
166 EXPORT_SYMBOL_GPL(iommu_device_unregister);
167 
168 static struct dev_iommu *dev_iommu_get(struct device *dev)
169 {
170 	struct dev_iommu *param = dev->iommu;
171 
172 	if (param)
173 		return param;
174 
175 	param = kzalloc(sizeof(*param), GFP_KERNEL);
176 	if (!param)
177 		return NULL;
178 
179 	mutex_init(&param->lock);
180 	dev->iommu = param;
181 	return param;
182 }
183 
184 static void dev_iommu_free(struct device *dev)
185 {
186 	kfree(dev->iommu);
187 	dev->iommu = NULL;
188 }
189 
190 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
191 {
192 	const struct iommu_ops *ops = dev->bus->iommu_ops;
193 	struct iommu_device *iommu_dev;
194 	struct iommu_group *group;
195 	int ret;
196 
197 	iommu_dev = ops->probe_device(dev);
198 	if (IS_ERR(iommu_dev))
199 		return PTR_ERR(iommu_dev);
200 
201 	dev->iommu->iommu_dev = iommu_dev;
202 
203 	group = iommu_group_get_for_dev(dev);
204 	if (IS_ERR(group)) {
205 		ret = PTR_ERR(group);
206 		goto out_release;
207 	}
208 	iommu_group_put(group);
209 
210 	if (group_list && !group->default_domain && list_empty(&group->entry))
211 		list_add_tail(&group->entry, group_list);
212 
213 	iommu_device_link(iommu_dev, dev);
214 
215 	return 0;
216 
217 out_release:
218 	ops->release_device(dev);
219 
220 	return ret;
221 }
222 
223 static int __iommu_probe_device_helper(struct device *dev)
224 {
225 	const struct iommu_ops *ops = dev->bus->iommu_ops;
226 	struct iommu_group *group;
227 	int ret;
228 
229 	ret = __iommu_probe_device(dev, NULL);
230 	if (ret)
231 		goto err_out;
232 
233 	/*
234 	 * Try to allocate a default domain - needs support from the
235 	 * IOMMU driver. There are still some drivers which don't
236 	 * support default domains, so the return value is not yet
237 	 * checked.
238 	 */
239 	iommu_alloc_default_domain(dev);
240 
241 	group = iommu_group_get(dev);
242 	if (!group)
243 		goto err_release;
244 
245 	if (group->default_domain)
246 		ret = __iommu_attach_device(group->default_domain, dev);
247 
248 	iommu_create_device_direct_mappings(group, dev);
249 
250 	iommu_group_put(group);
251 
252 	if (ret)
253 		goto err_release;
254 
255 	if (ops->probe_finalize)
256 		ops->probe_finalize(dev);
257 
258 	return 0;
259 
260 err_release:
261 	iommu_release_device(dev);
262 err_out:
263 	return ret;
264 
265 }
266 
267 int iommu_probe_device(struct device *dev)
268 {
269 	const struct iommu_ops *ops = dev->bus->iommu_ops;
270 	struct iommu_group *group;
271 	int ret;
272 
273 	WARN_ON(dev->iommu_group);
274 
275 	if (!ops)
276 		return -EINVAL;
277 
278 	if (!dev_iommu_get(dev))
279 		return -ENOMEM;
280 
281 	if (!try_module_get(ops->owner)) {
282 		ret = -EINVAL;
283 		goto err_free_dev_param;
284 	}
285 
286 	if (ops->probe_device)
287 		return __iommu_probe_device_helper(dev);
288 
289 	ret = ops->add_device(dev);
290 	if (ret)
291 		goto err_module_put;
292 
293 	group = iommu_group_get(dev);
294 	iommu_create_device_direct_mappings(group, dev);
295 	iommu_group_put(group);
296 
297 	if (ops->probe_finalize)
298 		ops->probe_finalize(dev);
299 
300 	return 0;
301 
302 err_module_put:
303 	module_put(ops->owner);
304 err_free_dev_param:
305 	dev_iommu_free(dev);
306 	return ret;
307 }
308 
309 static void __iommu_release_device(struct device *dev)
310 {
311 	const struct iommu_ops *ops = dev->bus->iommu_ops;
312 
313 	iommu_device_unlink(dev->iommu->iommu_dev, dev);
314 
315 	iommu_group_remove_device(dev);
316 
317 	ops->release_device(dev);
318 }
319 
320 void iommu_release_device(struct device *dev)
321 {
322 	const struct iommu_ops *ops = dev->bus->iommu_ops;
323 
324 	if (!dev->iommu)
325 		return;
326 
327 	if (ops->release_device)
328 		__iommu_release_device(dev);
329 	else if (dev->iommu_group)
330 		ops->remove_device(dev);
331 
332 	module_put(ops->owner);
333 	dev_iommu_free(dev);
334 }
335 
336 static int __init iommu_set_def_domain_type(char *str)
337 {
338 	bool pt;
339 	int ret;
340 
341 	ret = kstrtobool(str, &pt);
342 	if (ret)
343 		return ret;
344 
345 	if (pt)
346 		iommu_set_default_passthrough(true);
347 	else
348 		iommu_set_default_translated(true);
349 
350 	return 0;
351 }
352 early_param("iommu.passthrough", iommu_set_def_domain_type);
353 
354 static int __init iommu_dma_setup(char *str)
355 {
356 	return kstrtobool(str, &iommu_dma_strict);
357 }
358 early_param("iommu.strict", iommu_dma_setup);
359 
360 static ssize_t iommu_group_attr_show(struct kobject *kobj,
361 				     struct attribute *__attr, char *buf)
362 {
363 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
364 	struct iommu_group *group = to_iommu_group(kobj);
365 	ssize_t ret = -EIO;
366 
367 	if (attr->show)
368 		ret = attr->show(group, buf);
369 	return ret;
370 }
371 
372 static ssize_t iommu_group_attr_store(struct kobject *kobj,
373 				      struct attribute *__attr,
374 				      const char *buf, size_t count)
375 {
376 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
377 	struct iommu_group *group = to_iommu_group(kobj);
378 	ssize_t ret = -EIO;
379 
380 	if (attr->store)
381 		ret = attr->store(group, buf, count);
382 	return ret;
383 }
384 
385 static const struct sysfs_ops iommu_group_sysfs_ops = {
386 	.show = iommu_group_attr_show,
387 	.store = iommu_group_attr_store,
388 };
389 
390 static int iommu_group_create_file(struct iommu_group *group,
391 				   struct iommu_group_attribute *attr)
392 {
393 	return sysfs_create_file(&group->kobj, &attr->attr);
394 }
395 
396 static void iommu_group_remove_file(struct iommu_group *group,
397 				    struct iommu_group_attribute *attr)
398 {
399 	sysfs_remove_file(&group->kobj, &attr->attr);
400 }
401 
402 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
403 {
404 	return sprintf(buf, "%s\n", group->name);
405 }
406 
407 /**
408  * iommu_insert_resv_region - Insert a new region in the
409  * list of reserved regions.
410  * @new: new region to insert
411  * @regions: list of regions
412  *
413  * Elements are sorted by start address and overlapping segments
414  * of the same type are merged.
415  */
416 int iommu_insert_resv_region(struct iommu_resv_region *new,
417 			     struct list_head *regions)
418 {
419 	struct iommu_resv_region *iter, *tmp, *nr, *top;
420 	LIST_HEAD(stack);
421 
422 	nr = iommu_alloc_resv_region(new->start, new->length,
423 				     new->prot, new->type);
424 	if (!nr)
425 		return -ENOMEM;
426 
427 	/* First add the new element based on start address sorting */
428 	list_for_each_entry(iter, regions, list) {
429 		if (nr->start < iter->start ||
430 		    (nr->start == iter->start && nr->type <= iter->type))
431 			break;
432 	}
433 	list_add_tail(&nr->list, &iter->list);
434 
435 	/* Merge overlapping segments of type nr->type in @regions, if any */
436 	list_for_each_entry_safe(iter, tmp, regions, list) {
437 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
438 
439 		/* no merge needed on elements of different types than @new */
440 		if (iter->type != new->type) {
441 			list_move_tail(&iter->list, &stack);
442 			continue;
443 		}
444 
445 		/* look for the last stack element of same type as @iter */
446 		list_for_each_entry_reverse(top, &stack, list)
447 			if (top->type == iter->type)
448 				goto check_overlap;
449 
450 		list_move_tail(&iter->list, &stack);
451 		continue;
452 
453 check_overlap:
454 		top_end = top->start + top->length - 1;
455 
456 		if (iter->start > top_end + 1) {
457 			list_move_tail(&iter->list, &stack);
458 		} else {
459 			top->length = max(top_end, iter_end) - top->start + 1;
460 			list_del(&iter->list);
461 			kfree(iter);
462 		}
463 	}
464 	list_splice(&stack, regions);
465 	return 0;
466 }
467 
468 static int
469 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
470 				 struct list_head *group_resv_regions)
471 {
472 	struct iommu_resv_region *entry;
473 	int ret = 0;
474 
475 	list_for_each_entry(entry, dev_resv_regions, list) {
476 		ret = iommu_insert_resv_region(entry, group_resv_regions);
477 		if (ret)
478 			break;
479 	}
480 	return ret;
481 }
482 
483 int iommu_get_group_resv_regions(struct iommu_group *group,
484 				 struct list_head *head)
485 {
486 	struct group_device *device;
487 	int ret = 0;
488 
489 	mutex_lock(&group->mutex);
490 	list_for_each_entry(device, &group->devices, list) {
491 		struct list_head dev_resv_regions;
492 
493 		INIT_LIST_HEAD(&dev_resv_regions);
494 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
495 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
496 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
497 		if (ret)
498 			break;
499 	}
500 	mutex_unlock(&group->mutex);
501 	return ret;
502 }
503 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
504 
505 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
506 					     char *buf)
507 {
508 	struct iommu_resv_region *region, *next;
509 	struct list_head group_resv_regions;
510 	char *str = buf;
511 
512 	INIT_LIST_HEAD(&group_resv_regions);
513 	iommu_get_group_resv_regions(group, &group_resv_regions);
514 
515 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
516 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
517 			       (long long int)region->start,
518 			       (long long int)(region->start +
519 						region->length - 1),
520 			       iommu_group_resv_type_string[region->type]);
521 		kfree(region);
522 	}
523 
524 	return (str - buf);
525 }
526 
527 static ssize_t iommu_group_show_type(struct iommu_group *group,
528 				     char *buf)
529 {
530 	char *type = "unknown\n";
531 
532 	if (group->default_domain) {
533 		switch (group->default_domain->type) {
534 		case IOMMU_DOMAIN_BLOCKED:
535 			type = "blocked\n";
536 			break;
537 		case IOMMU_DOMAIN_IDENTITY:
538 			type = "identity\n";
539 			break;
540 		case IOMMU_DOMAIN_UNMANAGED:
541 			type = "unmanaged\n";
542 			break;
543 		case IOMMU_DOMAIN_DMA:
544 			type = "DMA\n";
545 			break;
546 		}
547 	}
548 	strcpy(buf, type);
549 
550 	return strlen(type);
551 }
552 
553 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
554 
555 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
556 			iommu_group_show_resv_regions, NULL);
557 
558 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
559 
560 static void iommu_group_release(struct kobject *kobj)
561 {
562 	struct iommu_group *group = to_iommu_group(kobj);
563 
564 	pr_debug("Releasing group %d\n", group->id);
565 
566 	if (group->iommu_data_release)
567 		group->iommu_data_release(group->iommu_data);
568 
569 	ida_simple_remove(&iommu_group_ida, group->id);
570 
571 	if (group->default_domain)
572 		iommu_domain_free(group->default_domain);
573 
574 	kfree(group->name);
575 	kfree(group);
576 }
577 
578 static struct kobj_type iommu_group_ktype = {
579 	.sysfs_ops = &iommu_group_sysfs_ops,
580 	.release = iommu_group_release,
581 };
582 
583 /**
584  * iommu_group_alloc - Allocate a new group
585  *
586  * This function is called by an iommu driver to allocate a new iommu
587  * group.  The iommu group represents the minimum granularity of the iommu.
588  * Upon successful return, the caller holds a reference to the supplied
589  * group in order to hold the group until devices are added.  Use
590  * iommu_group_put() to release this extra reference count, allowing the
591  * group to be automatically reclaimed once it has no devices or external
592  * references.
593  */
594 struct iommu_group *iommu_group_alloc(void)
595 {
596 	struct iommu_group *group;
597 	int ret;
598 
599 	group = kzalloc(sizeof(*group), GFP_KERNEL);
600 	if (!group)
601 		return ERR_PTR(-ENOMEM);
602 
603 	group->kobj.kset = iommu_group_kset;
604 	mutex_init(&group->mutex);
605 	INIT_LIST_HEAD(&group->devices);
606 	INIT_LIST_HEAD(&group->entry);
607 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
608 
609 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
610 	if (ret < 0) {
611 		kfree(group);
612 		return ERR_PTR(ret);
613 	}
614 	group->id = ret;
615 
616 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
617 				   NULL, "%d", group->id);
618 	if (ret) {
619 		ida_simple_remove(&iommu_group_ida, group->id);
620 		kfree(group);
621 		return ERR_PTR(ret);
622 	}
623 
624 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
625 	if (!group->devices_kobj) {
626 		kobject_put(&group->kobj); /* triggers .release & free */
627 		return ERR_PTR(-ENOMEM);
628 	}
629 
630 	/*
631 	 * The devices_kobj holds a reference on the group kobject, so
632 	 * as long as that exists so will the group.  We can therefore
633 	 * use the devices_kobj for reference counting.
634 	 */
635 	kobject_put(&group->kobj);
636 
637 	ret = iommu_group_create_file(group,
638 				      &iommu_group_attr_reserved_regions);
639 	if (ret)
640 		return ERR_PTR(ret);
641 
642 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
643 	if (ret)
644 		return ERR_PTR(ret);
645 
646 	pr_debug("Allocated group %d\n", group->id);
647 
648 	return group;
649 }
650 EXPORT_SYMBOL_GPL(iommu_group_alloc);
651 
652 struct iommu_group *iommu_group_get_by_id(int id)
653 {
654 	struct kobject *group_kobj;
655 	struct iommu_group *group;
656 	const char *name;
657 
658 	if (!iommu_group_kset)
659 		return NULL;
660 
661 	name = kasprintf(GFP_KERNEL, "%d", id);
662 	if (!name)
663 		return NULL;
664 
665 	group_kobj = kset_find_obj(iommu_group_kset, name);
666 	kfree(name);
667 
668 	if (!group_kobj)
669 		return NULL;
670 
671 	group = container_of(group_kobj, struct iommu_group, kobj);
672 	BUG_ON(group->id != id);
673 
674 	kobject_get(group->devices_kobj);
675 	kobject_put(&group->kobj);
676 
677 	return group;
678 }
679 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
680 
681 /**
682  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
683  * @group: the group
684  *
685  * iommu drivers can store data in the group for use when doing iommu
686  * operations.  This function provides a way to retrieve it.  Caller
687  * should hold a group reference.
688  */
689 void *iommu_group_get_iommudata(struct iommu_group *group)
690 {
691 	return group->iommu_data;
692 }
693 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
694 
695 /**
696  * iommu_group_set_iommudata - set iommu_data for a group
697  * @group: the group
698  * @iommu_data: new data
699  * @release: release function for iommu_data
700  *
701  * iommu drivers can store data in the group for use when doing iommu
702  * operations.  This function provides a way to set the data after
703  * the group has been allocated.  Caller should hold a group reference.
704  */
705 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
706 			       void (*release)(void *iommu_data))
707 {
708 	group->iommu_data = iommu_data;
709 	group->iommu_data_release = release;
710 }
711 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
712 
713 /**
714  * iommu_group_set_name - set name for a group
715  * @group: the group
716  * @name: name
717  *
718  * Allow iommu driver to set a name for a group.  When set it will
719  * appear in a name attribute file under the group in sysfs.
720  */
721 int iommu_group_set_name(struct iommu_group *group, const char *name)
722 {
723 	int ret;
724 
725 	if (group->name) {
726 		iommu_group_remove_file(group, &iommu_group_attr_name);
727 		kfree(group->name);
728 		group->name = NULL;
729 		if (!name)
730 			return 0;
731 	}
732 
733 	group->name = kstrdup(name, GFP_KERNEL);
734 	if (!group->name)
735 		return -ENOMEM;
736 
737 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
738 	if (ret) {
739 		kfree(group->name);
740 		group->name = NULL;
741 		return ret;
742 	}
743 
744 	return 0;
745 }
746 EXPORT_SYMBOL_GPL(iommu_group_set_name);
747 
748 static int iommu_create_device_direct_mappings(struct iommu_group *group,
749 					       struct device *dev)
750 {
751 	struct iommu_domain *domain = group->default_domain;
752 	struct iommu_resv_region *entry;
753 	struct list_head mappings;
754 	unsigned long pg_size;
755 	int ret = 0;
756 
757 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
758 		return 0;
759 
760 	BUG_ON(!domain->pgsize_bitmap);
761 
762 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
763 	INIT_LIST_HEAD(&mappings);
764 
765 	iommu_get_resv_regions(dev, &mappings);
766 
767 	/* We need to consider overlapping regions for different devices */
768 	list_for_each_entry(entry, &mappings, list) {
769 		dma_addr_t start, end, addr;
770 
771 		if (domain->ops->apply_resv_region)
772 			domain->ops->apply_resv_region(dev, domain, entry);
773 
774 		start = ALIGN(entry->start, pg_size);
775 		end   = ALIGN(entry->start + entry->length, pg_size);
776 
777 		if (entry->type != IOMMU_RESV_DIRECT &&
778 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
779 			continue;
780 
781 		for (addr = start; addr < end; addr += pg_size) {
782 			phys_addr_t phys_addr;
783 
784 			phys_addr = iommu_iova_to_phys(domain, addr);
785 			if (phys_addr)
786 				continue;
787 
788 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
789 			if (ret)
790 				goto out;
791 		}
792 
793 	}
794 
795 	iommu_flush_tlb_all(domain);
796 
797 out:
798 	iommu_put_resv_regions(dev, &mappings);
799 
800 	return ret;
801 }
802 
803 /**
804  * iommu_group_add_device - add a device to an iommu group
805  * @group: the group into which to add the device (reference should be held)
806  * @dev: the device
807  *
808  * This function is called by an iommu driver to add a device into a
809  * group.  Adding a device increments the group reference count.
810  */
811 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
812 {
813 	int ret, i = 0;
814 	struct group_device *device;
815 
816 	device = kzalloc(sizeof(*device), GFP_KERNEL);
817 	if (!device)
818 		return -ENOMEM;
819 
820 	device->dev = dev;
821 
822 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
823 	if (ret)
824 		goto err_free_device;
825 
826 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
827 rename:
828 	if (!device->name) {
829 		ret = -ENOMEM;
830 		goto err_remove_link;
831 	}
832 
833 	ret = sysfs_create_link_nowarn(group->devices_kobj,
834 				       &dev->kobj, device->name);
835 	if (ret) {
836 		if (ret == -EEXIST && i >= 0) {
837 			/*
838 			 * Account for the slim chance of collision
839 			 * and append an instance to the name.
840 			 */
841 			kfree(device->name);
842 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
843 						 kobject_name(&dev->kobj), i++);
844 			goto rename;
845 		}
846 		goto err_free_name;
847 	}
848 
849 	kobject_get(group->devices_kobj);
850 
851 	dev->iommu_group = group;
852 
853 	mutex_lock(&group->mutex);
854 	list_add_tail(&device->list, &group->devices);
855 	if (group->domain)
856 		ret = __iommu_attach_device(group->domain, dev);
857 	mutex_unlock(&group->mutex);
858 	if (ret)
859 		goto err_put_group;
860 
861 	/* Notify any listeners about change to group. */
862 	blocking_notifier_call_chain(&group->notifier,
863 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
864 
865 	trace_add_device_to_group(group->id, dev);
866 
867 	dev_info(dev, "Adding to iommu group %d\n", group->id);
868 
869 	return 0;
870 
871 err_put_group:
872 	mutex_lock(&group->mutex);
873 	list_del(&device->list);
874 	mutex_unlock(&group->mutex);
875 	dev->iommu_group = NULL;
876 	kobject_put(group->devices_kobj);
877 	sysfs_remove_link(group->devices_kobj, device->name);
878 err_free_name:
879 	kfree(device->name);
880 err_remove_link:
881 	sysfs_remove_link(&dev->kobj, "iommu_group");
882 err_free_device:
883 	kfree(device);
884 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
885 	return ret;
886 }
887 EXPORT_SYMBOL_GPL(iommu_group_add_device);
888 
889 /**
890  * iommu_group_remove_device - remove a device from it's current group
891  * @dev: device to be removed
892  *
893  * This function is called by an iommu driver to remove the device from
894  * it's current group.  This decrements the iommu group reference count.
895  */
896 void iommu_group_remove_device(struct device *dev)
897 {
898 	struct iommu_group *group = dev->iommu_group;
899 	struct group_device *tmp_device, *device = NULL;
900 
901 	dev_info(dev, "Removing from iommu group %d\n", group->id);
902 
903 	/* Pre-notify listeners that a device is being removed. */
904 	blocking_notifier_call_chain(&group->notifier,
905 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
906 
907 	mutex_lock(&group->mutex);
908 	list_for_each_entry(tmp_device, &group->devices, list) {
909 		if (tmp_device->dev == dev) {
910 			device = tmp_device;
911 			list_del(&device->list);
912 			break;
913 		}
914 	}
915 	mutex_unlock(&group->mutex);
916 
917 	if (!device)
918 		return;
919 
920 	sysfs_remove_link(group->devices_kobj, device->name);
921 	sysfs_remove_link(&dev->kobj, "iommu_group");
922 
923 	trace_remove_device_from_group(group->id, dev);
924 
925 	kfree(device->name);
926 	kfree(device);
927 	dev->iommu_group = NULL;
928 	kobject_put(group->devices_kobj);
929 }
930 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
931 
932 static int iommu_group_device_count(struct iommu_group *group)
933 {
934 	struct group_device *entry;
935 	int ret = 0;
936 
937 	list_for_each_entry(entry, &group->devices, list)
938 		ret++;
939 
940 	return ret;
941 }
942 
943 /**
944  * iommu_group_for_each_dev - iterate over each device in the group
945  * @group: the group
946  * @data: caller opaque data to be passed to callback function
947  * @fn: caller supplied callback function
948  *
949  * This function is called by group users to iterate over group devices.
950  * Callers should hold a reference count to the group during callback.
951  * The group->mutex is held across callbacks, which will block calls to
952  * iommu_group_add/remove_device.
953  */
954 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
955 				      int (*fn)(struct device *, void *))
956 {
957 	struct group_device *device;
958 	int ret = 0;
959 
960 	list_for_each_entry(device, &group->devices, list) {
961 		ret = fn(device->dev, data);
962 		if (ret)
963 			break;
964 	}
965 	return ret;
966 }
967 
968 
969 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
970 			     int (*fn)(struct device *, void *))
971 {
972 	int ret;
973 
974 	mutex_lock(&group->mutex);
975 	ret = __iommu_group_for_each_dev(group, data, fn);
976 	mutex_unlock(&group->mutex);
977 
978 	return ret;
979 }
980 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
981 
982 /**
983  * iommu_group_get - Return the group for a device and increment reference
984  * @dev: get the group that this device belongs to
985  *
986  * This function is called by iommu drivers and users to get the group
987  * for the specified device.  If found, the group is returned and the group
988  * reference in incremented, else NULL.
989  */
990 struct iommu_group *iommu_group_get(struct device *dev)
991 {
992 	struct iommu_group *group = dev->iommu_group;
993 
994 	if (group)
995 		kobject_get(group->devices_kobj);
996 
997 	return group;
998 }
999 EXPORT_SYMBOL_GPL(iommu_group_get);
1000 
1001 /**
1002  * iommu_group_ref_get - Increment reference on a group
1003  * @group: the group to use, must not be NULL
1004  *
1005  * This function is called by iommu drivers to take additional references on an
1006  * existing group.  Returns the given group for convenience.
1007  */
1008 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1009 {
1010 	kobject_get(group->devices_kobj);
1011 	return group;
1012 }
1013 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1014 
1015 /**
1016  * iommu_group_put - Decrement group reference
1017  * @group: the group to use
1018  *
1019  * This function is called by iommu drivers and users to release the
1020  * iommu group.  Once the reference count is zero, the group is released.
1021  */
1022 void iommu_group_put(struct iommu_group *group)
1023 {
1024 	if (group)
1025 		kobject_put(group->devices_kobj);
1026 }
1027 EXPORT_SYMBOL_GPL(iommu_group_put);
1028 
1029 /**
1030  * iommu_group_register_notifier - Register a notifier for group changes
1031  * @group: the group to watch
1032  * @nb: notifier block to signal
1033  *
1034  * This function allows iommu group users to track changes in a group.
1035  * See include/linux/iommu.h for actions sent via this notifier.  Caller
1036  * should hold a reference to the group throughout notifier registration.
1037  */
1038 int iommu_group_register_notifier(struct iommu_group *group,
1039 				  struct notifier_block *nb)
1040 {
1041 	return blocking_notifier_chain_register(&group->notifier, nb);
1042 }
1043 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1044 
1045 /**
1046  * iommu_group_unregister_notifier - Unregister a notifier
1047  * @group: the group to watch
1048  * @nb: notifier block to signal
1049  *
1050  * Unregister a previously registered group notifier block.
1051  */
1052 int iommu_group_unregister_notifier(struct iommu_group *group,
1053 				    struct notifier_block *nb)
1054 {
1055 	return blocking_notifier_chain_unregister(&group->notifier, nb);
1056 }
1057 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1058 
1059 /**
1060  * iommu_register_device_fault_handler() - Register a device fault handler
1061  * @dev: the device
1062  * @handler: the fault handler
1063  * @data: private data passed as argument to the handler
1064  *
1065  * When an IOMMU fault event is received, this handler gets called with the
1066  * fault event and data as argument. The handler should return 0 on success. If
1067  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1068  * complete the fault by calling iommu_page_response() with one of the following
1069  * response code:
1070  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1071  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1072  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1073  *   page faults if possible.
1074  *
1075  * Return 0 if the fault handler was installed successfully, or an error.
1076  */
1077 int iommu_register_device_fault_handler(struct device *dev,
1078 					iommu_dev_fault_handler_t handler,
1079 					void *data)
1080 {
1081 	struct dev_iommu *param = dev->iommu;
1082 	int ret = 0;
1083 
1084 	if (!param)
1085 		return -EINVAL;
1086 
1087 	mutex_lock(&param->lock);
1088 	/* Only allow one fault handler registered for each device */
1089 	if (param->fault_param) {
1090 		ret = -EBUSY;
1091 		goto done_unlock;
1092 	}
1093 
1094 	get_device(dev);
1095 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1096 	if (!param->fault_param) {
1097 		put_device(dev);
1098 		ret = -ENOMEM;
1099 		goto done_unlock;
1100 	}
1101 	param->fault_param->handler = handler;
1102 	param->fault_param->data = data;
1103 	mutex_init(&param->fault_param->lock);
1104 	INIT_LIST_HEAD(&param->fault_param->faults);
1105 
1106 done_unlock:
1107 	mutex_unlock(&param->lock);
1108 
1109 	return ret;
1110 }
1111 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1112 
1113 /**
1114  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1115  * @dev: the device
1116  *
1117  * Remove the device fault handler installed with
1118  * iommu_register_device_fault_handler().
1119  *
1120  * Return 0 on success, or an error.
1121  */
1122 int iommu_unregister_device_fault_handler(struct device *dev)
1123 {
1124 	struct dev_iommu *param = dev->iommu;
1125 	int ret = 0;
1126 
1127 	if (!param)
1128 		return -EINVAL;
1129 
1130 	mutex_lock(&param->lock);
1131 
1132 	if (!param->fault_param)
1133 		goto unlock;
1134 
1135 	/* we cannot unregister handler if there are pending faults */
1136 	if (!list_empty(&param->fault_param->faults)) {
1137 		ret = -EBUSY;
1138 		goto unlock;
1139 	}
1140 
1141 	kfree(param->fault_param);
1142 	param->fault_param = NULL;
1143 	put_device(dev);
1144 unlock:
1145 	mutex_unlock(&param->lock);
1146 
1147 	return ret;
1148 }
1149 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1150 
1151 /**
1152  * iommu_report_device_fault() - Report fault event to device driver
1153  * @dev: the device
1154  * @evt: fault event data
1155  *
1156  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1157  * handler. When this function fails and the fault is recoverable, it is the
1158  * caller's responsibility to complete the fault.
1159  *
1160  * Return 0 on success, or an error.
1161  */
1162 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1163 {
1164 	struct dev_iommu *param = dev->iommu;
1165 	struct iommu_fault_event *evt_pending = NULL;
1166 	struct iommu_fault_param *fparam;
1167 	int ret = 0;
1168 
1169 	if (!param || !evt)
1170 		return -EINVAL;
1171 
1172 	/* we only report device fault if there is a handler registered */
1173 	mutex_lock(&param->lock);
1174 	fparam = param->fault_param;
1175 	if (!fparam || !fparam->handler) {
1176 		ret = -EINVAL;
1177 		goto done_unlock;
1178 	}
1179 
1180 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1181 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1182 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1183 				      GFP_KERNEL);
1184 		if (!evt_pending) {
1185 			ret = -ENOMEM;
1186 			goto done_unlock;
1187 		}
1188 		mutex_lock(&fparam->lock);
1189 		list_add_tail(&evt_pending->list, &fparam->faults);
1190 		mutex_unlock(&fparam->lock);
1191 	}
1192 
1193 	ret = fparam->handler(&evt->fault, fparam->data);
1194 	if (ret && evt_pending) {
1195 		mutex_lock(&fparam->lock);
1196 		list_del(&evt_pending->list);
1197 		mutex_unlock(&fparam->lock);
1198 		kfree(evt_pending);
1199 	}
1200 done_unlock:
1201 	mutex_unlock(&param->lock);
1202 	return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1205 
1206 int iommu_page_response(struct device *dev,
1207 			struct iommu_page_response *msg)
1208 {
1209 	bool pasid_valid;
1210 	int ret = -EINVAL;
1211 	struct iommu_fault_event *evt;
1212 	struct iommu_fault_page_request *prm;
1213 	struct dev_iommu *param = dev->iommu;
1214 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1215 
1216 	if (!domain || !domain->ops->page_response)
1217 		return -ENODEV;
1218 
1219 	if (!param || !param->fault_param)
1220 		return -EINVAL;
1221 
1222 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1223 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1224 		return -EINVAL;
1225 
1226 	/* Only send response if there is a fault report pending */
1227 	mutex_lock(&param->fault_param->lock);
1228 	if (list_empty(&param->fault_param->faults)) {
1229 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1230 		goto done_unlock;
1231 	}
1232 	/*
1233 	 * Check if we have a matching page request pending to respond,
1234 	 * otherwise return -EINVAL
1235 	 */
1236 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1237 		prm = &evt->fault.prm;
1238 		pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1239 
1240 		if ((pasid_valid && prm->pasid != msg->pasid) ||
1241 		    prm->grpid != msg->grpid)
1242 			continue;
1243 
1244 		/* Sanitize the reply */
1245 		msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1246 
1247 		ret = domain->ops->page_response(dev, evt, msg);
1248 		list_del(&evt->list);
1249 		kfree(evt);
1250 		break;
1251 	}
1252 
1253 done_unlock:
1254 	mutex_unlock(&param->fault_param->lock);
1255 	return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(iommu_page_response);
1258 
1259 /**
1260  * iommu_group_id - Return ID for a group
1261  * @group: the group to ID
1262  *
1263  * Return the unique ID for the group matching the sysfs group number.
1264  */
1265 int iommu_group_id(struct iommu_group *group)
1266 {
1267 	return group->id;
1268 }
1269 EXPORT_SYMBOL_GPL(iommu_group_id);
1270 
1271 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1272 					       unsigned long *devfns);
1273 
1274 /*
1275  * To consider a PCI device isolated, we require ACS to support Source
1276  * Validation, Request Redirection, Completer Redirection, and Upstream
1277  * Forwarding.  This effectively means that devices cannot spoof their
1278  * requester ID, requests and completions cannot be redirected, and all
1279  * transactions are forwarded upstream, even as it passes through a
1280  * bridge where the target device is downstream.
1281  */
1282 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1283 
1284 /*
1285  * For multifunction devices which are not isolated from each other, find
1286  * all the other non-isolated functions and look for existing groups.  For
1287  * each function, we also need to look for aliases to or from other devices
1288  * that may already have a group.
1289  */
1290 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1291 							unsigned long *devfns)
1292 {
1293 	struct pci_dev *tmp = NULL;
1294 	struct iommu_group *group;
1295 
1296 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1297 		return NULL;
1298 
1299 	for_each_pci_dev(tmp) {
1300 		if (tmp == pdev || tmp->bus != pdev->bus ||
1301 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1302 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1303 			continue;
1304 
1305 		group = get_pci_alias_group(tmp, devfns);
1306 		if (group) {
1307 			pci_dev_put(tmp);
1308 			return group;
1309 		}
1310 	}
1311 
1312 	return NULL;
1313 }
1314 
1315 /*
1316  * Look for aliases to or from the given device for existing groups. DMA
1317  * aliases are only supported on the same bus, therefore the search
1318  * space is quite small (especially since we're really only looking at pcie
1319  * device, and therefore only expect multiple slots on the root complex or
1320  * downstream switch ports).  It's conceivable though that a pair of
1321  * multifunction devices could have aliases between them that would cause a
1322  * loop.  To prevent this, we use a bitmap to track where we've been.
1323  */
1324 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1325 					       unsigned long *devfns)
1326 {
1327 	struct pci_dev *tmp = NULL;
1328 	struct iommu_group *group;
1329 
1330 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1331 		return NULL;
1332 
1333 	group = iommu_group_get(&pdev->dev);
1334 	if (group)
1335 		return group;
1336 
1337 	for_each_pci_dev(tmp) {
1338 		if (tmp == pdev || tmp->bus != pdev->bus)
1339 			continue;
1340 
1341 		/* We alias them or they alias us */
1342 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1343 			group = get_pci_alias_group(tmp, devfns);
1344 			if (group) {
1345 				pci_dev_put(tmp);
1346 				return group;
1347 			}
1348 
1349 			group = get_pci_function_alias_group(tmp, devfns);
1350 			if (group) {
1351 				pci_dev_put(tmp);
1352 				return group;
1353 			}
1354 		}
1355 	}
1356 
1357 	return NULL;
1358 }
1359 
1360 struct group_for_pci_data {
1361 	struct pci_dev *pdev;
1362 	struct iommu_group *group;
1363 };
1364 
1365 /*
1366  * DMA alias iterator callback, return the last seen device.  Stop and return
1367  * the IOMMU group if we find one along the way.
1368  */
1369 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1370 {
1371 	struct group_for_pci_data *data = opaque;
1372 
1373 	data->pdev = pdev;
1374 	data->group = iommu_group_get(&pdev->dev);
1375 
1376 	return data->group != NULL;
1377 }
1378 
1379 /*
1380  * Generic device_group call-back function. It just allocates one
1381  * iommu-group per device.
1382  */
1383 struct iommu_group *generic_device_group(struct device *dev)
1384 {
1385 	return iommu_group_alloc();
1386 }
1387 EXPORT_SYMBOL_GPL(generic_device_group);
1388 
1389 /*
1390  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1391  * to find or create an IOMMU group for a device.
1392  */
1393 struct iommu_group *pci_device_group(struct device *dev)
1394 {
1395 	struct pci_dev *pdev = to_pci_dev(dev);
1396 	struct group_for_pci_data data;
1397 	struct pci_bus *bus;
1398 	struct iommu_group *group = NULL;
1399 	u64 devfns[4] = { 0 };
1400 
1401 	if (WARN_ON(!dev_is_pci(dev)))
1402 		return ERR_PTR(-EINVAL);
1403 
1404 	/*
1405 	 * Find the upstream DMA alias for the device.  A device must not
1406 	 * be aliased due to topology in order to have its own IOMMU group.
1407 	 * If we find an alias along the way that already belongs to a
1408 	 * group, use it.
1409 	 */
1410 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1411 		return data.group;
1412 
1413 	pdev = data.pdev;
1414 
1415 	/*
1416 	 * Continue upstream from the point of minimum IOMMU granularity
1417 	 * due to aliases to the point where devices are protected from
1418 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1419 	 * group, use it.
1420 	 */
1421 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1422 		if (!bus->self)
1423 			continue;
1424 
1425 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1426 			break;
1427 
1428 		pdev = bus->self;
1429 
1430 		group = iommu_group_get(&pdev->dev);
1431 		if (group)
1432 			return group;
1433 	}
1434 
1435 	/*
1436 	 * Look for existing groups on device aliases.  If we alias another
1437 	 * device or another device aliases us, use the same group.
1438 	 */
1439 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1440 	if (group)
1441 		return group;
1442 
1443 	/*
1444 	 * Look for existing groups on non-isolated functions on the same
1445 	 * slot and aliases of those funcions, if any.  No need to clear
1446 	 * the search bitmap, the tested devfns are still valid.
1447 	 */
1448 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1449 	if (group)
1450 		return group;
1451 
1452 	/* No shared group found, allocate new */
1453 	return iommu_group_alloc();
1454 }
1455 EXPORT_SYMBOL_GPL(pci_device_group);
1456 
1457 /* Get the IOMMU group for device on fsl-mc bus */
1458 struct iommu_group *fsl_mc_device_group(struct device *dev)
1459 {
1460 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1461 	struct iommu_group *group;
1462 
1463 	group = iommu_group_get(cont_dev);
1464 	if (!group)
1465 		group = iommu_group_alloc();
1466 	return group;
1467 }
1468 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1469 
1470 static int iommu_get_def_domain_type(struct device *dev)
1471 {
1472 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1473 	unsigned int type = 0;
1474 
1475 	if (ops->def_domain_type)
1476 		type = ops->def_domain_type(dev);
1477 
1478 	return (type == 0) ? iommu_def_domain_type : type;
1479 }
1480 
1481 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1482 					    struct iommu_group *group,
1483 					    unsigned int type)
1484 {
1485 	struct iommu_domain *dom;
1486 
1487 	dom = __iommu_domain_alloc(bus, type);
1488 	if (!dom && type != IOMMU_DOMAIN_DMA) {
1489 		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1490 		if (dom)
1491 			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1492 				type, group->name);
1493 	}
1494 
1495 	if (!dom)
1496 		return -ENOMEM;
1497 
1498 	group->default_domain = dom;
1499 	if (!group->domain)
1500 		group->domain = dom;
1501 
1502 	if (!iommu_dma_strict) {
1503 		int attr = 1;
1504 		iommu_domain_set_attr(dom,
1505 				      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1506 				      &attr);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 static int iommu_alloc_default_domain(struct device *dev)
1513 {
1514 	struct iommu_group *group;
1515 	unsigned int type;
1516 
1517 	group = iommu_group_get(dev);
1518 	if (!group)
1519 		return -ENODEV;
1520 
1521 	if (group->default_domain)
1522 		return 0;
1523 
1524 	type = iommu_get_def_domain_type(dev);
1525 
1526 	return iommu_group_alloc_default_domain(dev->bus, group, type);
1527 }
1528 
1529 /**
1530  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1531  * @dev: target device
1532  *
1533  * This function is intended to be called by IOMMU drivers and extended to
1534  * support common, bus-defined algorithms when determining or creating the
1535  * IOMMU group for a device.  On success, the caller will hold a reference
1536  * to the returned IOMMU group, which will already include the provided
1537  * device.  The reference should be released with iommu_group_put().
1538  */
1539 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1540 {
1541 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1542 	struct iommu_group *group;
1543 	int ret;
1544 
1545 	group = iommu_group_get(dev);
1546 	if (group)
1547 		return group;
1548 
1549 	if (!ops)
1550 		return ERR_PTR(-EINVAL);
1551 
1552 	group = ops->device_group(dev);
1553 	if (WARN_ON_ONCE(group == NULL))
1554 		return ERR_PTR(-EINVAL);
1555 
1556 	if (IS_ERR(group))
1557 		return group;
1558 
1559 	ret = iommu_group_add_device(group, dev);
1560 	if (ret)
1561 		goto out_put_group;
1562 
1563 	/*
1564 	 * Try to allocate a default domain - needs support from the
1565 	 * IOMMU driver. There are still some drivers which don't support
1566 	 * default domains, so the return value is not yet checked. Only
1567 	 * allocate the domain here when the driver still has the
1568 	 * add_device/remove_device call-backs implemented.
1569 	 */
1570 	if (!ops->probe_device) {
1571 		iommu_alloc_default_domain(dev);
1572 
1573 		if (group->default_domain)
1574 			ret = __iommu_attach_device(group->default_domain, dev);
1575 
1576 		if (ret)
1577 			goto out_put_group;
1578 	}
1579 
1580 	return group;
1581 
1582 out_put_group:
1583 	iommu_group_put(group);
1584 
1585 	return ERR_PTR(ret);
1586 }
1587 EXPORT_SYMBOL(iommu_group_get_for_dev);
1588 
1589 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1590 {
1591 	return group->default_domain;
1592 }
1593 
1594 static int add_iommu_group(struct device *dev, void *data)
1595 {
1596 	int ret = iommu_probe_device(dev);
1597 
1598 	/*
1599 	 * We ignore -ENODEV errors for now, as they just mean that the
1600 	 * device is not translated by an IOMMU. We still care about
1601 	 * other errors and fail to initialize when they happen.
1602 	 */
1603 	if (ret == -ENODEV)
1604 		ret = 0;
1605 
1606 	return ret;
1607 }
1608 
1609 static int probe_iommu_group(struct device *dev, void *data)
1610 {
1611 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1612 	struct list_head *group_list = data;
1613 	int ret;
1614 
1615 	if (!dev_iommu_get(dev))
1616 		return -ENOMEM;
1617 
1618 	if (!try_module_get(ops->owner)) {
1619 		ret = -EINVAL;
1620 		goto err_free_dev_iommu;
1621 	}
1622 
1623 	ret = __iommu_probe_device(dev, group_list);
1624 	if (ret)
1625 		goto err_module_put;
1626 
1627 	return 0;
1628 
1629 err_module_put:
1630 	module_put(ops->owner);
1631 err_free_dev_iommu:
1632 	dev_iommu_free(dev);
1633 
1634 	if (ret == -ENODEV)
1635 		ret = 0;
1636 
1637 	return ret;
1638 }
1639 
1640 static int remove_iommu_group(struct device *dev, void *data)
1641 {
1642 	iommu_release_device(dev);
1643 
1644 	return 0;
1645 }
1646 
1647 static int iommu_bus_notifier(struct notifier_block *nb,
1648 			      unsigned long action, void *data)
1649 {
1650 	unsigned long group_action = 0;
1651 	struct device *dev = data;
1652 	struct iommu_group *group;
1653 
1654 	/*
1655 	 * ADD/DEL call into iommu driver ops if provided, which may
1656 	 * result in ADD/DEL notifiers to group->notifier
1657 	 */
1658 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1659 		int ret;
1660 
1661 		ret = iommu_probe_device(dev);
1662 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1663 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1664 		iommu_release_device(dev);
1665 		return NOTIFY_OK;
1666 	}
1667 
1668 	/*
1669 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1670 	 * group, if anyone is listening
1671 	 */
1672 	group = iommu_group_get(dev);
1673 	if (!group)
1674 		return 0;
1675 
1676 	switch (action) {
1677 	case BUS_NOTIFY_BIND_DRIVER:
1678 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1679 		break;
1680 	case BUS_NOTIFY_BOUND_DRIVER:
1681 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1682 		break;
1683 	case BUS_NOTIFY_UNBIND_DRIVER:
1684 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1685 		break;
1686 	case BUS_NOTIFY_UNBOUND_DRIVER:
1687 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1688 		break;
1689 	}
1690 
1691 	if (group_action)
1692 		blocking_notifier_call_chain(&group->notifier,
1693 					     group_action, dev);
1694 
1695 	iommu_group_put(group);
1696 	return 0;
1697 }
1698 
1699 struct __group_domain_type {
1700 	struct device *dev;
1701 	unsigned int type;
1702 };
1703 
1704 static int probe_get_default_domain_type(struct device *dev, void *data)
1705 {
1706 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1707 	struct __group_domain_type *gtype = data;
1708 	unsigned int type = 0;
1709 
1710 	if (ops->def_domain_type)
1711 		type = ops->def_domain_type(dev);
1712 
1713 	if (type) {
1714 		if (gtype->type && gtype->type != type) {
1715 			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1716 				 iommu_domain_type_str(type),
1717 				 dev_name(gtype->dev),
1718 				 iommu_domain_type_str(gtype->type));
1719 			gtype->type = 0;
1720 		}
1721 
1722 		if (!gtype->dev) {
1723 			gtype->dev  = dev;
1724 			gtype->type = type;
1725 		}
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 static void probe_alloc_default_domain(struct bus_type *bus,
1732 				       struct iommu_group *group)
1733 {
1734 	struct __group_domain_type gtype;
1735 
1736 	memset(&gtype, 0, sizeof(gtype));
1737 
1738 	/* Ask for default domain requirements of all devices in the group */
1739 	__iommu_group_for_each_dev(group, &gtype,
1740 				   probe_get_default_domain_type);
1741 
1742 	if (!gtype.type)
1743 		gtype.type = iommu_def_domain_type;
1744 
1745 	iommu_group_alloc_default_domain(bus, group, gtype.type);
1746 
1747 }
1748 
1749 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1750 {
1751 	struct iommu_domain *domain = data;
1752 	const struct iommu_ops *ops;
1753 	int ret;
1754 
1755 	ret = __iommu_attach_device(domain, dev);
1756 
1757 	ops = domain->ops;
1758 
1759 	if (ret == 0 && ops->probe_finalize)
1760 		ops->probe_finalize(dev);
1761 
1762 	return ret;
1763 }
1764 
1765 static int __iommu_group_dma_attach(struct iommu_group *group)
1766 {
1767 	return __iommu_group_for_each_dev(group, group->default_domain,
1768 					  iommu_group_do_dma_attach);
1769 }
1770 
1771 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1772 {
1773 	struct iommu_group *group = data;
1774 
1775 	iommu_create_device_direct_mappings(group, dev);
1776 
1777 	return 0;
1778 }
1779 
1780 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1781 {
1782 	return __iommu_group_for_each_dev(group, group,
1783 					  iommu_do_create_direct_mappings);
1784 }
1785 
1786 static int bus_iommu_probe(struct bus_type *bus)
1787 {
1788 	const struct iommu_ops *ops = bus->iommu_ops;
1789 	int ret;
1790 
1791 	if (ops->probe_device) {
1792 		struct iommu_group *group, *next;
1793 		LIST_HEAD(group_list);
1794 
1795 		/*
1796 		 * This code-path does not allocate the default domain when
1797 		 * creating the iommu group, so do it after the groups are
1798 		 * created.
1799 		 */
1800 		ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1801 		if (ret)
1802 			return ret;
1803 
1804 		list_for_each_entry_safe(group, next, &group_list, entry) {
1805 			/* Remove item from the list */
1806 			list_del_init(&group->entry);
1807 
1808 			mutex_lock(&group->mutex);
1809 
1810 			/* Try to allocate default domain */
1811 			probe_alloc_default_domain(bus, group);
1812 
1813 			if (!group->default_domain) {
1814 				mutex_unlock(&group->mutex);
1815 				continue;
1816 			}
1817 
1818 			iommu_group_create_direct_mappings(group);
1819 
1820 			ret = __iommu_group_dma_attach(group);
1821 
1822 			mutex_unlock(&group->mutex);
1823 
1824 			if (ret)
1825 				break;
1826 		}
1827 	} else {
1828 		ret = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1829 	}
1830 
1831 	return ret;
1832 }
1833 
1834 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1835 {
1836 	struct notifier_block *nb;
1837 	int err;
1838 
1839 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1840 	if (!nb)
1841 		return -ENOMEM;
1842 
1843 	nb->notifier_call = iommu_bus_notifier;
1844 
1845 	err = bus_register_notifier(bus, nb);
1846 	if (err)
1847 		goto out_free;
1848 
1849 	err = bus_iommu_probe(bus);
1850 	if (err)
1851 		goto out_err;
1852 
1853 
1854 	return 0;
1855 
1856 out_err:
1857 	/* Clean up */
1858 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1859 	bus_unregister_notifier(bus, nb);
1860 
1861 out_free:
1862 	kfree(nb);
1863 
1864 	return err;
1865 }
1866 
1867 /**
1868  * bus_set_iommu - set iommu-callbacks for the bus
1869  * @bus: bus.
1870  * @ops: the callbacks provided by the iommu-driver
1871  *
1872  * This function is called by an iommu driver to set the iommu methods
1873  * used for a particular bus. Drivers for devices on that bus can use
1874  * the iommu-api after these ops are registered.
1875  * This special function is needed because IOMMUs are usually devices on
1876  * the bus itself, so the iommu drivers are not initialized when the bus
1877  * is set up. With this function the iommu-driver can set the iommu-ops
1878  * afterwards.
1879  */
1880 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1881 {
1882 	int err;
1883 
1884 	if (ops == NULL) {
1885 		bus->iommu_ops = NULL;
1886 		return 0;
1887 	}
1888 
1889 	if (bus->iommu_ops != NULL)
1890 		return -EBUSY;
1891 
1892 	bus->iommu_ops = ops;
1893 
1894 	/* Do IOMMU specific setup for this bus-type */
1895 	err = iommu_bus_init(bus, ops);
1896 	if (err)
1897 		bus->iommu_ops = NULL;
1898 
1899 	return err;
1900 }
1901 EXPORT_SYMBOL_GPL(bus_set_iommu);
1902 
1903 bool iommu_present(struct bus_type *bus)
1904 {
1905 	return bus->iommu_ops != NULL;
1906 }
1907 EXPORT_SYMBOL_GPL(iommu_present);
1908 
1909 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1910 {
1911 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1912 		return false;
1913 
1914 	return bus->iommu_ops->capable(cap);
1915 }
1916 EXPORT_SYMBOL_GPL(iommu_capable);
1917 
1918 /**
1919  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1920  * @domain: iommu domain
1921  * @handler: fault handler
1922  * @token: user data, will be passed back to the fault handler
1923  *
1924  * This function should be used by IOMMU users which want to be notified
1925  * whenever an IOMMU fault happens.
1926  *
1927  * The fault handler itself should return 0 on success, and an appropriate
1928  * error code otherwise.
1929  */
1930 void iommu_set_fault_handler(struct iommu_domain *domain,
1931 					iommu_fault_handler_t handler,
1932 					void *token)
1933 {
1934 	BUG_ON(!domain);
1935 
1936 	domain->handler = handler;
1937 	domain->handler_token = token;
1938 }
1939 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1940 
1941 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1942 						 unsigned type)
1943 {
1944 	struct iommu_domain *domain;
1945 
1946 	if (bus == NULL || bus->iommu_ops == NULL)
1947 		return NULL;
1948 
1949 	domain = bus->iommu_ops->domain_alloc(type);
1950 	if (!domain)
1951 		return NULL;
1952 
1953 	domain->ops  = bus->iommu_ops;
1954 	domain->type = type;
1955 	/* Assume all sizes by default; the driver may override this later */
1956 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1957 
1958 	return domain;
1959 }
1960 
1961 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1962 {
1963 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1964 }
1965 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1966 
1967 void iommu_domain_free(struct iommu_domain *domain)
1968 {
1969 	domain->ops->domain_free(domain);
1970 }
1971 EXPORT_SYMBOL_GPL(iommu_domain_free);
1972 
1973 static int __iommu_attach_device(struct iommu_domain *domain,
1974 				 struct device *dev)
1975 {
1976 	int ret;
1977 	if ((domain->ops->is_attach_deferred != NULL) &&
1978 	    domain->ops->is_attach_deferred(domain, dev))
1979 		return 0;
1980 
1981 	if (unlikely(domain->ops->attach_dev == NULL))
1982 		return -ENODEV;
1983 
1984 	ret = domain->ops->attach_dev(domain, dev);
1985 	if (!ret)
1986 		trace_attach_device_to_domain(dev);
1987 	return ret;
1988 }
1989 
1990 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1991 {
1992 	struct iommu_group *group;
1993 	int ret;
1994 
1995 	group = iommu_group_get(dev);
1996 	if (!group)
1997 		return -ENODEV;
1998 
1999 	/*
2000 	 * Lock the group to make sure the device-count doesn't
2001 	 * change while we are attaching
2002 	 */
2003 	mutex_lock(&group->mutex);
2004 	ret = -EINVAL;
2005 	if (iommu_group_device_count(group) != 1)
2006 		goto out_unlock;
2007 
2008 	ret = __iommu_attach_group(domain, group);
2009 
2010 out_unlock:
2011 	mutex_unlock(&group->mutex);
2012 	iommu_group_put(group);
2013 
2014 	return ret;
2015 }
2016 EXPORT_SYMBOL_GPL(iommu_attach_device);
2017 
2018 int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2019 			   struct iommu_cache_invalidate_info *inv_info)
2020 {
2021 	if (unlikely(!domain->ops->cache_invalidate))
2022 		return -ENODEV;
2023 
2024 	return domain->ops->cache_invalidate(domain, dev, inv_info);
2025 }
2026 EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
2027 
2028 int iommu_sva_bind_gpasid(struct iommu_domain *domain,
2029 			   struct device *dev, struct iommu_gpasid_bind_data *data)
2030 {
2031 	if (unlikely(!domain->ops->sva_bind_gpasid))
2032 		return -ENODEV;
2033 
2034 	return domain->ops->sva_bind_gpasid(domain, dev, data);
2035 }
2036 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
2037 
2038 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2039 			     ioasid_t pasid)
2040 {
2041 	if (unlikely(!domain->ops->sva_unbind_gpasid))
2042 		return -ENODEV;
2043 
2044 	return domain->ops->sva_unbind_gpasid(dev, pasid);
2045 }
2046 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2047 
2048 static void __iommu_detach_device(struct iommu_domain *domain,
2049 				  struct device *dev)
2050 {
2051 	if ((domain->ops->is_attach_deferred != NULL) &&
2052 	    domain->ops->is_attach_deferred(domain, dev))
2053 		return;
2054 
2055 	if (unlikely(domain->ops->detach_dev == NULL))
2056 		return;
2057 
2058 	domain->ops->detach_dev(domain, dev);
2059 	trace_detach_device_from_domain(dev);
2060 }
2061 
2062 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2063 {
2064 	struct iommu_group *group;
2065 
2066 	group = iommu_group_get(dev);
2067 	if (!group)
2068 		return;
2069 
2070 	mutex_lock(&group->mutex);
2071 	if (iommu_group_device_count(group) != 1) {
2072 		WARN_ON(1);
2073 		goto out_unlock;
2074 	}
2075 
2076 	__iommu_detach_group(domain, group);
2077 
2078 out_unlock:
2079 	mutex_unlock(&group->mutex);
2080 	iommu_group_put(group);
2081 }
2082 EXPORT_SYMBOL_GPL(iommu_detach_device);
2083 
2084 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2085 {
2086 	struct iommu_domain *domain;
2087 	struct iommu_group *group;
2088 
2089 	group = iommu_group_get(dev);
2090 	if (!group)
2091 		return NULL;
2092 
2093 	domain = group->domain;
2094 
2095 	iommu_group_put(group);
2096 
2097 	return domain;
2098 }
2099 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2100 
2101 /*
2102  * For IOMMU_DOMAIN_DMA implementations which already provide their own
2103  * guarantees that the group and its default domain are valid and correct.
2104  */
2105 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2106 {
2107 	return dev->iommu_group->default_domain;
2108 }
2109 
2110 /*
2111  * IOMMU groups are really the natural working unit of the IOMMU, but
2112  * the IOMMU API works on domains and devices.  Bridge that gap by
2113  * iterating over the devices in a group.  Ideally we'd have a single
2114  * device which represents the requestor ID of the group, but we also
2115  * allow IOMMU drivers to create policy defined minimum sets, where
2116  * the physical hardware may be able to distiguish members, but we
2117  * wish to group them at a higher level (ex. untrusted multi-function
2118  * PCI devices).  Thus we attach each device.
2119  */
2120 static int iommu_group_do_attach_device(struct device *dev, void *data)
2121 {
2122 	struct iommu_domain *domain = data;
2123 
2124 	return __iommu_attach_device(domain, dev);
2125 }
2126 
2127 static int __iommu_attach_group(struct iommu_domain *domain,
2128 				struct iommu_group *group)
2129 {
2130 	int ret;
2131 
2132 	if (group->default_domain && group->domain != group->default_domain)
2133 		return -EBUSY;
2134 
2135 	ret = __iommu_group_for_each_dev(group, domain,
2136 					 iommu_group_do_attach_device);
2137 	if (ret == 0)
2138 		group->domain = domain;
2139 
2140 	return ret;
2141 }
2142 
2143 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2144 {
2145 	int ret;
2146 
2147 	mutex_lock(&group->mutex);
2148 	ret = __iommu_attach_group(domain, group);
2149 	mutex_unlock(&group->mutex);
2150 
2151 	return ret;
2152 }
2153 EXPORT_SYMBOL_GPL(iommu_attach_group);
2154 
2155 static int iommu_group_do_detach_device(struct device *dev, void *data)
2156 {
2157 	struct iommu_domain *domain = data;
2158 
2159 	__iommu_detach_device(domain, dev);
2160 
2161 	return 0;
2162 }
2163 
2164 static void __iommu_detach_group(struct iommu_domain *domain,
2165 				 struct iommu_group *group)
2166 {
2167 	int ret;
2168 
2169 	if (!group->default_domain) {
2170 		__iommu_group_for_each_dev(group, domain,
2171 					   iommu_group_do_detach_device);
2172 		group->domain = NULL;
2173 		return;
2174 	}
2175 
2176 	if (group->domain == group->default_domain)
2177 		return;
2178 
2179 	/* Detach by re-attaching to the default domain */
2180 	ret = __iommu_group_for_each_dev(group, group->default_domain,
2181 					 iommu_group_do_attach_device);
2182 	if (ret != 0)
2183 		WARN_ON(1);
2184 	else
2185 		group->domain = group->default_domain;
2186 }
2187 
2188 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2189 {
2190 	mutex_lock(&group->mutex);
2191 	__iommu_detach_group(domain, group);
2192 	mutex_unlock(&group->mutex);
2193 }
2194 EXPORT_SYMBOL_GPL(iommu_detach_group);
2195 
2196 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2197 {
2198 	if (unlikely(domain->ops->iova_to_phys == NULL))
2199 		return 0;
2200 
2201 	return domain->ops->iova_to_phys(domain, iova);
2202 }
2203 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2204 
2205 static size_t iommu_pgsize(struct iommu_domain *domain,
2206 			   unsigned long addr_merge, size_t size)
2207 {
2208 	unsigned int pgsize_idx;
2209 	size_t pgsize;
2210 
2211 	/* Max page size that still fits into 'size' */
2212 	pgsize_idx = __fls(size);
2213 
2214 	/* need to consider alignment requirements ? */
2215 	if (likely(addr_merge)) {
2216 		/* Max page size allowed by address */
2217 		unsigned int align_pgsize_idx = __ffs(addr_merge);
2218 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2219 	}
2220 
2221 	/* build a mask of acceptable page sizes */
2222 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
2223 
2224 	/* throw away page sizes not supported by the hardware */
2225 	pgsize &= domain->pgsize_bitmap;
2226 
2227 	/* make sure we're still sane */
2228 	BUG_ON(!pgsize);
2229 
2230 	/* pick the biggest page */
2231 	pgsize_idx = __fls(pgsize);
2232 	pgsize = 1UL << pgsize_idx;
2233 
2234 	return pgsize;
2235 }
2236 
2237 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2238 	      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2239 {
2240 	const struct iommu_ops *ops = domain->ops;
2241 	unsigned long orig_iova = iova;
2242 	unsigned int min_pagesz;
2243 	size_t orig_size = size;
2244 	phys_addr_t orig_paddr = paddr;
2245 	int ret = 0;
2246 
2247 	if (unlikely(ops->map == NULL ||
2248 		     domain->pgsize_bitmap == 0UL))
2249 		return -ENODEV;
2250 
2251 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2252 		return -EINVAL;
2253 
2254 	/* find out the minimum page size supported */
2255 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2256 
2257 	/*
2258 	 * both the virtual address and the physical one, as well as
2259 	 * the size of the mapping, must be aligned (at least) to the
2260 	 * size of the smallest page supported by the hardware
2261 	 */
2262 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2263 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2264 		       iova, &paddr, size, min_pagesz);
2265 		return -EINVAL;
2266 	}
2267 
2268 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2269 
2270 	while (size) {
2271 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2272 
2273 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
2274 			 iova, &paddr, pgsize);
2275 		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2276 
2277 		if (ret)
2278 			break;
2279 
2280 		iova += pgsize;
2281 		paddr += pgsize;
2282 		size -= pgsize;
2283 	}
2284 
2285 	if (ops->iotlb_sync_map)
2286 		ops->iotlb_sync_map(domain);
2287 
2288 	/* unroll mapping in case something went wrong */
2289 	if (ret)
2290 		iommu_unmap(domain, orig_iova, orig_size - size);
2291 	else
2292 		trace_map(orig_iova, orig_paddr, orig_size);
2293 
2294 	return ret;
2295 }
2296 
2297 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2298 	      phys_addr_t paddr, size_t size, int prot)
2299 {
2300 	might_sleep();
2301 	return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2302 }
2303 EXPORT_SYMBOL_GPL(iommu_map);
2304 
2305 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2306 	      phys_addr_t paddr, size_t size, int prot)
2307 {
2308 	return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2309 }
2310 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2311 
2312 static size_t __iommu_unmap(struct iommu_domain *domain,
2313 			    unsigned long iova, size_t size,
2314 			    struct iommu_iotlb_gather *iotlb_gather)
2315 {
2316 	const struct iommu_ops *ops = domain->ops;
2317 	size_t unmapped_page, unmapped = 0;
2318 	unsigned long orig_iova = iova;
2319 	unsigned int min_pagesz;
2320 
2321 	if (unlikely(ops->unmap == NULL ||
2322 		     domain->pgsize_bitmap == 0UL))
2323 		return 0;
2324 
2325 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2326 		return 0;
2327 
2328 	/* find out the minimum page size supported */
2329 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2330 
2331 	/*
2332 	 * The virtual address, as well as the size of the mapping, must be
2333 	 * aligned (at least) to the size of the smallest page supported
2334 	 * by the hardware
2335 	 */
2336 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2337 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2338 		       iova, size, min_pagesz);
2339 		return 0;
2340 	}
2341 
2342 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2343 
2344 	/*
2345 	 * Keep iterating until we either unmap 'size' bytes (or more)
2346 	 * or we hit an area that isn't mapped.
2347 	 */
2348 	while (unmapped < size) {
2349 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2350 
2351 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2352 		if (!unmapped_page)
2353 			break;
2354 
2355 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2356 			 iova, unmapped_page);
2357 
2358 		iova += unmapped_page;
2359 		unmapped += unmapped_page;
2360 	}
2361 
2362 	trace_unmap(orig_iova, size, unmapped);
2363 	return unmapped;
2364 }
2365 
2366 size_t iommu_unmap(struct iommu_domain *domain,
2367 		   unsigned long iova, size_t size)
2368 {
2369 	struct iommu_iotlb_gather iotlb_gather;
2370 	size_t ret;
2371 
2372 	iommu_iotlb_gather_init(&iotlb_gather);
2373 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2374 	iommu_tlb_sync(domain, &iotlb_gather);
2375 
2376 	return ret;
2377 }
2378 EXPORT_SYMBOL_GPL(iommu_unmap);
2379 
2380 size_t iommu_unmap_fast(struct iommu_domain *domain,
2381 			unsigned long iova, size_t size,
2382 			struct iommu_iotlb_gather *iotlb_gather)
2383 {
2384 	return __iommu_unmap(domain, iova, size, iotlb_gather);
2385 }
2386 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2387 
2388 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2389 		    struct scatterlist *sg, unsigned int nents, int prot,
2390 		    gfp_t gfp)
2391 {
2392 	size_t len = 0, mapped = 0;
2393 	phys_addr_t start;
2394 	unsigned int i = 0;
2395 	int ret;
2396 
2397 	while (i <= nents) {
2398 		phys_addr_t s_phys = sg_phys(sg);
2399 
2400 		if (len && s_phys != start + len) {
2401 			ret = __iommu_map(domain, iova + mapped, start,
2402 					len, prot, gfp);
2403 
2404 			if (ret)
2405 				goto out_err;
2406 
2407 			mapped += len;
2408 			len = 0;
2409 		}
2410 
2411 		if (len) {
2412 			len += sg->length;
2413 		} else {
2414 			len = sg->length;
2415 			start = s_phys;
2416 		}
2417 
2418 		if (++i < nents)
2419 			sg = sg_next(sg);
2420 	}
2421 
2422 	return mapped;
2423 
2424 out_err:
2425 	/* undo mappings already done */
2426 	iommu_unmap(domain, iova, mapped);
2427 
2428 	return 0;
2429 
2430 }
2431 
2432 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2433 		    struct scatterlist *sg, unsigned int nents, int prot)
2434 {
2435 	might_sleep();
2436 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2437 }
2438 EXPORT_SYMBOL_GPL(iommu_map_sg);
2439 
2440 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2441 		    struct scatterlist *sg, unsigned int nents, int prot)
2442 {
2443 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2444 }
2445 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2446 
2447 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2448 			       phys_addr_t paddr, u64 size, int prot)
2449 {
2450 	if (unlikely(domain->ops->domain_window_enable == NULL))
2451 		return -ENODEV;
2452 
2453 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2454 						 prot);
2455 }
2456 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2457 
2458 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2459 {
2460 	if (unlikely(domain->ops->domain_window_disable == NULL))
2461 		return;
2462 
2463 	return domain->ops->domain_window_disable(domain, wnd_nr);
2464 }
2465 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2466 
2467 /**
2468  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2469  * @domain: the iommu domain where the fault has happened
2470  * @dev: the device where the fault has happened
2471  * @iova: the faulting address
2472  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2473  *
2474  * This function should be called by the low-level IOMMU implementations
2475  * whenever IOMMU faults happen, to allow high-level users, that are
2476  * interested in such events, to know about them.
2477  *
2478  * This event may be useful for several possible use cases:
2479  * - mere logging of the event
2480  * - dynamic TLB/PTE loading
2481  * - if restarting of the faulting device is required
2482  *
2483  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2484  * PTE/TLB loading will one day be supported, implementations will be able
2485  * to tell whether it succeeded or not according to this return value).
2486  *
2487  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2488  * (though fault handlers can also return -ENOSYS, in case they want to
2489  * elicit the default behavior of the IOMMU drivers).
2490  */
2491 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2492 		       unsigned long iova, int flags)
2493 {
2494 	int ret = -ENOSYS;
2495 
2496 	/*
2497 	 * if upper layers showed interest and installed a fault handler,
2498 	 * invoke it.
2499 	 */
2500 	if (domain->handler)
2501 		ret = domain->handler(domain, dev, iova, flags,
2502 						domain->handler_token);
2503 
2504 	trace_io_page_fault(dev, iova, flags);
2505 	return ret;
2506 }
2507 EXPORT_SYMBOL_GPL(report_iommu_fault);
2508 
2509 static int __init iommu_init(void)
2510 {
2511 	iommu_group_kset = kset_create_and_add("iommu_groups",
2512 					       NULL, kernel_kobj);
2513 	BUG_ON(!iommu_group_kset);
2514 
2515 	iommu_debugfs_setup();
2516 
2517 	return 0;
2518 }
2519 core_initcall(iommu_init);
2520 
2521 int iommu_domain_get_attr(struct iommu_domain *domain,
2522 			  enum iommu_attr attr, void *data)
2523 {
2524 	struct iommu_domain_geometry *geometry;
2525 	bool *paging;
2526 	int ret = 0;
2527 
2528 	switch (attr) {
2529 	case DOMAIN_ATTR_GEOMETRY:
2530 		geometry  = data;
2531 		*geometry = domain->geometry;
2532 
2533 		break;
2534 	case DOMAIN_ATTR_PAGING:
2535 		paging  = data;
2536 		*paging = (domain->pgsize_bitmap != 0UL);
2537 		break;
2538 	default:
2539 		if (!domain->ops->domain_get_attr)
2540 			return -EINVAL;
2541 
2542 		ret = domain->ops->domain_get_attr(domain, attr, data);
2543 	}
2544 
2545 	return ret;
2546 }
2547 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2548 
2549 int iommu_domain_set_attr(struct iommu_domain *domain,
2550 			  enum iommu_attr attr, void *data)
2551 {
2552 	int ret = 0;
2553 
2554 	switch (attr) {
2555 	default:
2556 		if (domain->ops->domain_set_attr == NULL)
2557 			return -EINVAL;
2558 
2559 		ret = domain->ops->domain_set_attr(domain, attr, data);
2560 	}
2561 
2562 	return ret;
2563 }
2564 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2565 
2566 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2567 {
2568 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2569 
2570 	if (ops && ops->get_resv_regions)
2571 		ops->get_resv_regions(dev, list);
2572 }
2573 
2574 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2575 {
2576 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2577 
2578 	if (ops && ops->put_resv_regions)
2579 		ops->put_resv_regions(dev, list);
2580 }
2581 
2582 /**
2583  * generic_iommu_put_resv_regions - Reserved region driver helper
2584  * @dev: device for which to free reserved regions
2585  * @list: reserved region list for device
2586  *
2587  * IOMMU drivers can use this to implement their .put_resv_regions() callback
2588  * for simple reservations. Memory allocated for each reserved region will be
2589  * freed. If an IOMMU driver allocates additional resources per region, it is
2590  * going to have to implement a custom callback.
2591  */
2592 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2593 {
2594 	struct iommu_resv_region *entry, *next;
2595 
2596 	list_for_each_entry_safe(entry, next, list, list)
2597 		kfree(entry);
2598 }
2599 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2600 
2601 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2602 						  size_t length, int prot,
2603 						  enum iommu_resv_type type)
2604 {
2605 	struct iommu_resv_region *region;
2606 
2607 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2608 	if (!region)
2609 		return NULL;
2610 
2611 	INIT_LIST_HEAD(&region->list);
2612 	region->start = start;
2613 	region->length = length;
2614 	region->prot = prot;
2615 	region->type = type;
2616 	return region;
2617 }
2618 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2619 
2620 static int
2621 request_default_domain_for_dev(struct device *dev, unsigned long type)
2622 {
2623 	struct iommu_domain *domain;
2624 	struct iommu_group *group;
2625 	int ret;
2626 
2627 	/* Device must already be in a group before calling this function */
2628 	group = iommu_group_get(dev);
2629 	if (!group)
2630 		return -EINVAL;
2631 
2632 	mutex_lock(&group->mutex);
2633 
2634 	ret = 0;
2635 	if (group->default_domain && group->default_domain->type == type)
2636 		goto out;
2637 
2638 	/* Don't change mappings of existing devices */
2639 	ret = -EBUSY;
2640 	if (iommu_group_device_count(group) != 1)
2641 		goto out;
2642 
2643 	ret = -ENOMEM;
2644 	domain = __iommu_domain_alloc(dev->bus, type);
2645 	if (!domain)
2646 		goto out;
2647 
2648 	/* Attach the device to the domain */
2649 	ret = __iommu_attach_group(domain, group);
2650 	if (ret) {
2651 		iommu_domain_free(domain);
2652 		goto out;
2653 	}
2654 
2655 	/* Make the domain the default for this group */
2656 	if (group->default_domain)
2657 		iommu_domain_free(group->default_domain);
2658 	group->default_domain = domain;
2659 
2660 	iommu_create_device_direct_mappings(group, dev);
2661 
2662 	dev_info(dev, "Using iommu %s mapping\n",
2663 		 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2664 
2665 	ret = 0;
2666 out:
2667 	mutex_unlock(&group->mutex);
2668 	iommu_group_put(group);
2669 
2670 	return ret;
2671 }
2672 
2673 /* Request that a device is direct mapped by the IOMMU */
2674 int iommu_request_dm_for_dev(struct device *dev)
2675 {
2676 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2677 }
2678 
2679 /* Request that a device can't be direct mapped by the IOMMU */
2680 int iommu_request_dma_domain_for_dev(struct device *dev)
2681 {
2682 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2683 }
2684 
2685 void iommu_set_default_passthrough(bool cmd_line)
2686 {
2687 	if (cmd_line)
2688 		iommu_set_cmd_line_dma_api();
2689 
2690 	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2691 }
2692 
2693 void iommu_set_default_translated(bool cmd_line)
2694 {
2695 	if (cmd_line)
2696 		iommu_set_cmd_line_dma_api();
2697 
2698 	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2699 }
2700 
2701 bool iommu_default_passthrough(void)
2702 {
2703 	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2704 }
2705 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2706 
2707 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2708 {
2709 	const struct iommu_ops *ops = NULL;
2710 	struct iommu_device *iommu;
2711 
2712 	spin_lock(&iommu_device_lock);
2713 	list_for_each_entry(iommu, &iommu_device_list, list)
2714 		if (iommu->fwnode == fwnode) {
2715 			ops = iommu->ops;
2716 			break;
2717 		}
2718 	spin_unlock(&iommu_device_lock);
2719 	return ops;
2720 }
2721 
2722 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2723 		      const struct iommu_ops *ops)
2724 {
2725 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2726 
2727 	if (fwspec)
2728 		return ops == fwspec->ops ? 0 : -EINVAL;
2729 
2730 	if (!dev_iommu_get(dev))
2731 		return -ENOMEM;
2732 
2733 	/* Preallocate for the overwhelmingly common case of 1 ID */
2734 	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2735 	if (!fwspec)
2736 		return -ENOMEM;
2737 
2738 	of_node_get(to_of_node(iommu_fwnode));
2739 	fwspec->iommu_fwnode = iommu_fwnode;
2740 	fwspec->ops = ops;
2741 	dev_iommu_fwspec_set(dev, fwspec);
2742 	return 0;
2743 }
2744 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2745 
2746 void iommu_fwspec_free(struct device *dev)
2747 {
2748 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2749 
2750 	if (fwspec) {
2751 		fwnode_handle_put(fwspec->iommu_fwnode);
2752 		kfree(fwspec);
2753 		dev_iommu_fwspec_set(dev, NULL);
2754 	}
2755 }
2756 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2757 
2758 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2759 {
2760 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2761 	int i, new_num;
2762 
2763 	if (!fwspec)
2764 		return -EINVAL;
2765 
2766 	new_num = fwspec->num_ids + num_ids;
2767 	if (new_num > 1) {
2768 		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2769 				  GFP_KERNEL);
2770 		if (!fwspec)
2771 			return -ENOMEM;
2772 
2773 		dev_iommu_fwspec_set(dev, fwspec);
2774 	}
2775 
2776 	for (i = 0; i < num_ids; i++)
2777 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2778 
2779 	fwspec->num_ids = new_num;
2780 	return 0;
2781 }
2782 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2783 
2784 /*
2785  * Per device IOMMU features.
2786  */
2787 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2788 {
2789 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2790 
2791 	if (ops && ops->dev_has_feat)
2792 		return ops->dev_has_feat(dev, feat);
2793 
2794 	return false;
2795 }
2796 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2797 
2798 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2799 {
2800 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2801 
2802 	if (ops && ops->dev_enable_feat)
2803 		return ops->dev_enable_feat(dev, feat);
2804 
2805 	return -ENODEV;
2806 }
2807 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2808 
2809 /*
2810  * The device drivers should do the necessary cleanups before calling this.
2811  * For example, before disabling the aux-domain feature, the device driver
2812  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2813  */
2814 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2815 {
2816 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2817 
2818 	if (ops && ops->dev_disable_feat)
2819 		return ops->dev_disable_feat(dev, feat);
2820 
2821 	return -EBUSY;
2822 }
2823 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2824 
2825 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2826 {
2827 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2828 
2829 	if (ops && ops->dev_feat_enabled)
2830 		return ops->dev_feat_enabled(dev, feat);
2831 
2832 	return false;
2833 }
2834 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2835 
2836 /*
2837  * Aux-domain specific attach/detach.
2838  *
2839  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2840  * true. Also, as long as domains are attached to a device through this
2841  * interface, any tries to call iommu_attach_device() should fail
2842  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2843  * This should make us safe against a device being attached to a guest as a
2844  * whole while there are still pasid users on it (aux and sva).
2845  */
2846 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2847 {
2848 	int ret = -ENODEV;
2849 
2850 	if (domain->ops->aux_attach_dev)
2851 		ret = domain->ops->aux_attach_dev(domain, dev);
2852 
2853 	if (!ret)
2854 		trace_attach_device_to_domain(dev);
2855 
2856 	return ret;
2857 }
2858 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2859 
2860 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2861 {
2862 	if (domain->ops->aux_detach_dev) {
2863 		domain->ops->aux_detach_dev(domain, dev);
2864 		trace_detach_device_from_domain(dev);
2865 	}
2866 }
2867 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2868 
2869 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2870 {
2871 	int ret = -ENODEV;
2872 
2873 	if (domain->ops->aux_get_pasid)
2874 		ret = domain->ops->aux_get_pasid(domain, dev);
2875 
2876 	return ret;
2877 }
2878 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2879 
2880 /**
2881  * iommu_sva_bind_device() - Bind a process address space to a device
2882  * @dev: the device
2883  * @mm: the mm to bind, caller must hold a reference to it
2884  *
2885  * Create a bond between device and address space, allowing the device to access
2886  * the mm using the returned PASID. If a bond already exists between @device and
2887  * @mm, it is returned and an additional reference is taken. Caller must call
2888  * iommu_sva_unbind_device() to release each reference.
2889  *
2890  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2891  * initialize the required SVA features.
2892  *
2893  * On error, returns an ERR_PTR value.
2894  */
2895 struct iommu_sva *
2896 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2897 {
2898 	struct iommu_group *group;
2899 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2900 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2901 
2902 	if (!ops || !ops->sva_bind)
2903 		return ERR_PTR(-ENODEV);
2904 
2905 	group = iommu_group_get(dev);
2906 	if (!group)
2907 		return ERR_PTR(-ENODEV);
2908 
2909 	/* Ensure device count and domain don't change while we're binding */
2910 	mutex_lock(&group->mutex);
2911 
2912 	/*
2913 	 * To keep things simple, SVA currently doesn't support IOMMU groups
2914 	 * with more than one device. Existing SVA-capable systems are not
2915 	 * affected by the problems that required IOMMU groups (lack of ACS
2916 	 * isolation, device ID aliasing and other hardware issues).
2917 	 */
2918 	if (iommu_group_device_count(group) != 1)
2919 		goto out_unlock;
2920 
2921 	handle = ops->sva_bind(dev, mm, drvdata);
2922 
2923 out_unlock:
2924 	mutex_unlock(&group->mutex);
2925 	iommu_group_put(group);
2926 
2927 	return handle;
2928 }
2929 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2930 
2931 /**
2932  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2933  * @handle: the handle returned by iommu_sva_bind_device()
2934  *
2935  * Put reference to a bond between device and address space. The device should
2936  * not be issuing any more transaction for this PASID. All outstanding page
2937  * requests for this PASID must have been flushed to the IOMMU.
2938  *
2939  * Returns 0 on success, or an error value
2940  */
2941 void iommu_sva_unbind_device(struct iommu_sva *handle)
2942 {
2943 	struct iommu_group *group;
2944 	struct device *dev = handle->dev;
2945 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2946 
2947 	if (!ops || !ops->sva_unbind)
2948 		return;
2949 
2950 	group = iommu_group_get(dev);
2951 	if (!group)
2952 		return;
2953 
2954 	mutex_lock(&group->mutex);
2955 	ops->sva_unbind(handle);
2956 	mutex_unlock(&group->mutex);
2957 
2958 	iommu_group_put(group);
2959 }
2960 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2961 
2962 int iommu_sva_set_ops(struct iommu_sva *handle,
2963 		      const struct iommu_sva_ops *sva_ops)
2964 {
2965 	if (handle->ops && handle->ops != sva_ops)
2966 		return -EEXIST;
2967 
2968 	handle->ops = sva_ops;
2969 	return 0;
2970 }
2971 EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2972 
2973 int iommu_sva_get_pasid(struct iommu_sva *handle)
2974 {
2975 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2976 
2977 	if (!ops || !ops->sva_get_pasid)
2978 		return IOMMU_PASID_INVALID;
2979 
2980 	return ops->sva_get_pasid(handle);
2981 }
2982 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2983